From 1fddb9a4ba978a70ff29f9789eb377fec5d39955 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Jun 2022 01:52:28 +0000 Subject: [PATCH 01/91] build(deps): bump reqwest from 0.11.10 to 0.11.11 (#4610) Bumps [reqwest](https://github.com/seanmonstar/reqwest) from 0.11.10 to 0.11.11. - [Release notes](https://github.com/seanmonstar/reqwest/releases) - [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md) - [Commits](https://github.com/seanmonstar/reqwest/compare/v0.11.10...v0.11.11) --- updated-dependencies: - dependency-name: reqwest dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 9 +++++---- zebrad/Cargo.toml | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b5b72c7e23d..961c5d17f9e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3792,9 +3792,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.10" +version = "0.11.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46a1f7aa4f35e5e8b4160449f51afc758f0ce6454315a9fa7d0d113e958c41eb" +checksum = "b75aa69a3f06bbcc66ede33af2af253c6f7a86b1ca0033f60c580a27074fbf92" dependencies = [ "base64", "bytes", @@ -3823,6 +3823,7 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls", + "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -3983,9 +3984,9 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "0.3.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ee86d63972a7c661d1536fefe8c3c8407321c3df668891286de28abcd087360" +checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9" dependencies = [ "base64", ] diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 4b9ad4cc47b..fff90f6036e 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -116,7 +116,7 @@ abscissa_core = { version = "0.5", features = ["testing"] } hex = "0.4.3" once_cell = "1.12.0" regex = "1.5.6" -reqwest = "0.11.10" +reqwest = "0.11.11" semver = "1.0.10" # zebra-rpc needs the preserve_order feature, it also makes test results more stable serde_json = { version = "1.0.81", features = ["preserve_order"] } From cc75c3f5f9ae2f4c90bf0bf7acee22675ba24f73 Mon Sep 17 00:00:00 2001 From: Marek Date: Wed, 15 Jun 2022 05:57:19 +0200 Subject: [PATCH 02/91] fix(doc): Fix various doc warnings, part 3 (#4611) * Fix the syntax of links in comments * Fix a mistake in the docs Co-authored-by: Alfredo Garcia * Remove unnecessary angle brackets from a link * Revert the changes for links that serve as references * Revert "Revert the changes for links that serve as references" This reverts commit 8b091aa9fab453e7d3559a5d474e0879183b9bfb. * Remove `<` `>` from links that serve as references This reverts commit 046ef25620ae1a2140760ae7ea379deecb4b583c. * Don't use `<` `>` in normal comments * Don't use `<` `>` for normal comments * Revert changes for comments starting with `//` * Fix some warnings produced by `cargo doc` * Fix some rustdoc warnings * Fix some warnings * Refactor some changes * Fix some rustdoc warnings * Fix some rustdoc warnings * Resolve various TODOs Co-authored-by: teor * Fix some unresolved links * Allow links to private items * Fix some unresolved links Co-authored-by: Alfredo Garcia Co-authored-by: teor Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-chain/src/lib.rs | 1 + zebra-chain/src/transparent/utxo.rs | 6 ++-- zebra-consensus/src/lib.rs | 1 + zebra-network/src/isolated/tor.rs | 6 ++-- zebra-network/src/lib.rs | 1 + zebra-network/src/meta_addr.rs | 7 ++-- zebra-network/src/peer/priority.rs | 8 +++-- zebra-network/src/peer_set/candidate_set.rs | 19 ++++++++--- zebra-network/src/peer_set/initialize.rs | 9 ++++-- zebra-network/src/peer_set/limit.rs | 4 ++- zebra-network/src/peer_set/set.rs | 32 +++++++++++-------- zebra-network/src/protocol/external/addr.rs | 4 +-- .../src/protocol/external/addr/v1.rs | 4 +++ .../src/protocol/external/addr/v2.rs | 10 ++++++ .../src/protocol/external/arbitrary.rs | 13 +++++--- .../src/protocol/external/message.rs | 4 +-- zebra-state/src/lib.rs | 1 + zebra-test/src/lib.rs | 2 +- zebrad/src/components/mempool.rs | 2 +- zebrad/src/components/mempool/config.rs | 5 +-- zebrad/src/components/mempool/downloads.rs | 12 ++++--- zebrad/src/components/mempool/gossip.rs | 7 ++-- zebrad/src/components/mempool/storage.rs | 3 +- .../mempool/storage/eviction_list.rs | 4 ++- zebrad/src/components/sync/gossip.rs | 4 +++ .../components/sync/recent_sync_lengths.rs | 10 ++++-- zebrad/src/lib.rs | 1 + 27 files changed, 125 insertions(+), 55 deletions(-) diff --git a/zebra-chain/src/lib.rs b/zebra-chain/src/lib.rs index 755cd48c9cf..b529a5b44fb 100644 --- a/zebra-chain/src/lib.rs +++ b/zebra-chain/src/lib.rs @@ -6,6 +6,7 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] #![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_chain")] +#![allow(rustdoc::private_intra_doc_links)] // Required by bitvec! macro #![recursion_limit = "256"] diff --git a/zebra-chain/src/transparent/utxo.rs b/zebra-chain/src/transparent/utxo.rs index ec17a7048b2..f331619ae92 100644 --- a/zebra-chain/src/transparent/utxo.rs +++ b/zebra-chain/src/transparent/utxo.rs @@ -111,10 +111,8 @@ impl OrderedUtxo { /// A restriction that must be checked before spending a transparent output of a /// coinbase transaction. /// -/// TODO: fix the comment below because -/// [`CoinbaseSpendRestriction::check_spend`] doesn't exist. -/// -/// See [`CoinbaseSpendRestriction::check_spend`] for the consensus rules. +/// See the function `transparent_coinbase_spend` in `zebra-state` for the +/// consensus rules. #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[cfg_attr( any(test, feature = "proptest-impl"), diff --git a/zebra-consensus/src/lib.rs b/zebra-consensus/src/lib.rs index 392971e1b5d..43f1cbc74d1 100644 --- a/zebra-consensus/src/lib.rs +++ b/zebra-consensus/src/lib.rs @@ -33,6 +33,7 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] #![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_consensus")] +#![allow(rustdoc::private_intra_doc_links)] mod block; mod checkpoint; diff --git a/zebra-network/src/isolated/tor.rs b/zebra-network/src/isolated/tor.rs index e9922511ded..b51113a0868 100644 --- a/zebra-network/src/isolated/tor.rs +++ b/zebra-network/src/isolated/tor.rs @@ -96,9 +96,9 @@ async fn new_tor_stream(hostname: String) -> Result { Ok(tor_stream) } -/// Returns a new tor client instance, and updates [`SHARED_TOR_CLIENT`]. +/// Returns a new tor client instance, and updates [`struct@SHARED_TOR_CLIENT`]. /// -/// If there is a bootstrap error, [`SHARED_TOR_CLIENT`] is not modified. +/// If there is a bootstrap error, [`struct@SHARED_TOR_CLIENT`] is not modified. async fn new_tor_client() -> Result, BoxError> { let runtime = tokio::runtime::Handle::current(); let runtime = TokioRuntimeHandle::new(runtime); @@ -117,7 +117,7 @@ async fn new_tor_client() -> Result, BoxError> { Ok(tor_client) } -/// Returns an isolated tor client instance by cloning [`SHARED_TOR_CLIENT`]. +/// Returns an isolated tor client instance by cloning [`struct@SHARED_TOR_CLIENT`]. /// /// If [`new_tor_client`] has not run successfully yet, returns `None`. fn cloned_tor_client() -> Option> { diff --git a/zebra-network/src/lib.rs b/zebra-network/src/lib.rs index a8309fa4042..172fe7f8caf 100644 --- a/zebra-network/src/lib.rs +++ b/zebra-network/src/lib.rs @@ -124,6 +124,7 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] #![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_network")] +#![allow(rustdoc::private_intra_doc_links)] #[macro_use] extern crate pin_project; diff --git a/zebra-network/src/meta_addr.rs b/zebra-network/src/meta_addr.rs index 599585b58cc..be3b1ef4e60 100644 --- a/zebra-network/src/meta_addr.rs +++ b/zebra-network/src/meta_addr.rs @@ -91,6 +91,8 @@ impl Ord for PeerAddrState { /// order, ignoring liveness. /// /// See [`CandidateSet`] and [`MetaAddr::cmp`] for more details. + /// + /// [`CandidateSet`]: super::peer_set::CandidateSet fn cmp(&self, other: &Self) -> Ordering { use Ordering::*; match (self, other) { @@ -861,11 +863,12 @@ impl Ord for MetaAddr { /// with `Responded` peers sorted first as a group. /// /// But this order should not be used for reconnection attempts: use - /// [`reconnection_peers`][rp] instead. + /// [`reconnection_peers`] instead. /// /// See [`CandidateSet`] for more details. /// - /// [rp]: crate::AddressBook::reconnection_peers + /// [`CandidateSet`]: super::peer_set::CandidateSet + /// [`reconnection_peers`]: crate::AddressBook::reconnection_peers fn cmp(&self, other: &Self) -> Ordering { use std::net::IpAddr::{V4, V6}; use Ordering::*; diff --git a/zebra-network/src/peer/priority.rs b/zebra-network/src/peer/priority.rs index 5e01dc5acdd..d50130e70d1 100644 --- a/zebra-network/src/peer/priority.rs +++ b/zebra-network/src/peer/priority.rs @@ -10,7 +10,8 @@ use AttributePreference::*; /// /// Invalid peer attributes are represented as errors. /// -/// Outbound peer connections are initiated in the sorted [order](std::ops::Ord) of this type. +/// Outbound peer connections are initiated in the sorted [order](std::cmp::Ord) +/// of this type. /// /// The derived order depends on the order of the variants in the enum. /// The variants are sorted in the order they are listed. @@ -30,7 +31,8 @@ pub enum AttributePreference { /// A level of preference for a peer. /// -/// Outbound peer connections are initiated in the sorted [order](std::ops::Ord) of this type. +/// Outbound peer connections are initiated in the sorted [order](std::cmp::Ord) +/// of this type. /// /// The derived order depends on the order of the fields in the struct. /// The first field determines the overall order, then later fields sort equal first field values. @@ -84,6 +86,8 @@ impl PeerPreference { /// /// Since the addresses in the address book are unique, this check can be /// used to permanently reject entire [`MetaAddr`]s. +/// +/// [`MetaAddr`]: crate::meta_addr::MetaAddr fn address_is_valid_for_outbound_connections( peer_addr: &SocketAddr, network: impl Into>, diff --git a/zebra-network/src/peer_set/candidate_set.rs b/zebra-network/src/peer_set/candidate_set.rs index 212aa35b3a6..950f05672a7 100644 --- a/zebra-network/src/peer_set/candidate_set.rs +++ b/zebra-network/src/peer_set/candidate_set.rs @@ -15,11 +15,11 @@ use crate::{ #[cfg(test)] mod tests; -/// The [`CandidateSet`] manages outbound peer connection attempts. -/// Successful connections become peers in the [`PeerSet`]. +/// The [`CandidateSet`] manages outbound peer connection attempts. Successful +/// connections become peers in the [`PeerSet`](super::set::PeerSet). /// /// The candidate set divides the set of all possible outbound peers into -/// disjoint subsets, using the [`PeerAddrState`]: +/// disjoint subsets, using the [`PeerAddrState`](crate::PeerAddrState): /// /// 1. [`Responded`] peers, which we have had an outbound connection to. /// 2. [`NeverAttemptedGossiped`] peers, which we learned about from other peers @@ -107,6 +107,13 @@ mod tests; /// │ * update last_response to now() │ /// └───────────────────────────────────────┘ /// ``` +/// +/// [`Responded`]: crate::PeerAddrState::Responded +/// [`Version`]: crate::protocol::external::types::Version +/// [`NeverAttemptedGossiped`]: crate::PeerAddrState::NeverAttemptedGossiped +/// [`NeverAttemptedAlternate`]: crate::PeerAddrState::NeverAttemptedAlternate +/// [`Failed`]: crate::PeerAddrState::Failed +/// [`AttemptPending`]: crate::PeerAddrState::AttemptPending // TODO: // * show all possible transitions between Attempt/Responded/Failed, // except Failed -> Responded is invalid, must go through Attempt @@ -340,10 +347,10 @@ where /// /// ## Correctness /// - /// `AttemptPending` peers will become `Responded` if they respond, or + /// `AttemptPending` peers will become [`Responded`] if they respond, or /// become `Failed` if they time out or provide a bad response. /// - /// Live `Responded` peers will stay live if they keep responding, or + /// Live [`Responded`] peers will stay live if they keep responding, or /// become a reconnection candidate if they stop responding. /// /// ## Security @@ -351,6 +358,8 @@ where /// Zebra resists distributed denial of service attacks by making sure that /// new peer connections are initiated at least /// [`MIN_PEER_CONNECTION_INTERVAL`][constants::MIN_PEER_CONNECTION_INTERVAL] apart. + /// + /// [`Responded`]: crate::PeerAddrState::Responded pub async fn next(&mut self) -> Option { // Correctness: To avoid hangs, computation in the critical section should be kept to a minimum. let address_book = self.address_book.clone(); diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index 6e22221323d..66fca1fb660 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -42,9 +42,10 @@ use crate::{ #[cfg(test)] mod tests; -/// The result of an outbound peer connection attempt or inbound connection handshake. +/// The result of an outbound peer connection attempt or inbound connection +/// handshake. /// -/// This result comes from the [`Handshaker`]. +/// This result comes from the `Handshaker`. type DiscoveredPeer = Result<(SocketAddr, peer::Client), BoxError>; /// Initialize a peer set, using a network `config`, `inbound_service`, @@ -65,7 +66,7 @@ type DiscoveredPeer = Result<(SocketAddr, peer::Client), BoxError>; /// cause the peer set to shrink when the inbound service is unable to keep up /// with the volume of inbound requests. /// -/// Use [`NoChainTip`] to explicitly provide no chain tip receiver. +/// Use [`NoChainTip`][1] to explicitly provide no chain tip receiver. /// /// In addition to returning a service for outbound requests, this method /// returns a shared [`AddressBook`] updated with last-seen timestamps for @@ -77,6 +78,8 @@ type DiscoveredPeer = Result<(SocketAddr, peer::Client), BoxError>; /// /// If `config.config.peerset_initial_target_size` is zero. /// (zebra-network expects to be able to connect to at least one peer.) +/// +/// [1]: zebra_chain::chain_tip::NoChainTip pub async fn init( config: Config, inbound_service: S, diff --git a/zebra-network/src/peer_set/limit.rs b/zebra-network/src/peer_set/limit.rs index 9979ba3d157..5b3a6244518 100644 --- a/zebra-network/src/peer_set/limit.rs +++ b/zebra-network/src/peer_set/limit.rs @@ -7,9 +7,11 @@ use std::fmt; use tokio::sync::mpsc; -/// A signal sent by a [`Connection`] when it closes. +/// A signal sent by a [`Connection`][1] when it closes. /// /// Used to count the number of open connections. +/// +/// [1]: crate::peer::Connection #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub struct ConnectionClosed; diff --git a/zebra-network/src/peer_set/set.rs b/zebra-network/src/peer_set/set.rs index 5d563180949..d12b60b9108 100644 --- a/zebra-network/src/peer_set/set.rs +++ b/zebra-network/src/peer_set/set.rs @@ -55,8 +55,7 @@ //! therefore automatically increased when the block height reaches a network upgrade's activation //! height. The helper type is then used to: //! -//! - cancel handshakes to outdated peers, in -//! [`handshake::negotiate_version`][crate::peer::handshake::negotiate_version] +//! - cancel handshakes to outdated peers, in `handshake::negotiate_version` //! - cancel requests to and disconnect from peers that have become outdated, in //! [`PeerSet::push_unready`] //! - disconnect from peers that have just responded and became outdated, in @@ -67,9 +66,9 @@ //! ## Network Coalescence //! //! [ZIP-201] also specifies how Zcashd behaves [leading up to a activation -//! height][network-coalescence]. Since Zcashd limits the number of connections to at most eight -//! peers, it will gradually migrate its connections to up-to-date peers as it approaches the -//! activation height. +//! height][1]. Since Zcashd limits the number of connections to at most eight +//! peers, it will gradually migrate its connections to up-to-date peers as it +//! approaches the activation height. //! //! The motivation for this behavior is to avoid an abrupt partitioning the network, which can lead //! to isolated peers and increases the chance of an eclipse attack on some peers of the network. @@ -80,14 +79,18 @@ //! more costly to execute, and the probability of an abrupt network partition that isolates peers //! is lower. //! -//! Even if a Zebra node is manually configured to connect to a smaller number of peers, the -//! [`AddressBook`] is configured to hold a large number of peer addresses -//! ([`MAX_ADDRS_IN_ADDRESS_BOOK`]). Since the address book prioritizes addresses it trusts (like -//! those that it has successfully connected to before), the node should be able to recover and -//! rejoin the network by itself, as long as the address book is populated with enough entries. +//! Even if a Zebra node is manually configured to connect to a smaller number +//! of peers, the [`AddressBook`][2] is configured to hold a large number of +//! peer addresses ([`MAX_ADDRS_IN_ADDRESS_BOOK`][3]). Since the address book +//! prioritizes addresses it trusts (like those that it has successfully +//! connected to before), the node should be able to recover and rejoin the +//! network by itself, as long as the address book is populated with enough +//! entries. //! +//! [1]: https://zips.z.cash/zip-0201#network-coalescence +//! [2]: crate::AddressBook +//! [3]: crate::constants::MAX_ADDRS_IN_ADDRESS_BOOK //! [ZIP-201]: https://zips.z.cash/zip-0201 -//! [network-coalescence]: https://zips.z.cash/zip-0201#network-coalescence use std::{ collections::{HashMap, HashSet}, @@ -142,9 +145,12 @@ mod tests; #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub struct MorePeers; -/// A signal sent by the [`PeerSet`] to cancel a [`Client`]'s current request or response. +/// A signal sent by the [`PeerSet`] to cancel a [`Client`][1]'s current request +/// or response. /// -/// When it receives this signal, the [`Client`] stops processing and exits. +/// When it receives this signal, the [`Client`][1] stops processing and exits. +/// +/// [1]: crate::peer::Client #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub struct CancelClientWork; diff --git a/zebra-network/src/protocol/external/addr.rs b/zebra-network/src/protocol/external/addr.rs index a855e5662e1..111100ecc68 100644 --- a/zebra-network/src/protocol/external/addr.rs +++ b/zebra-network/src/protocol/external/addr.rs @@ -1,9 +1,9 @@ //! Zcash node address types and serialization for Zcash network messages. //! //! Zcash has 3 different node address formats: -//! - [`AddrV1`]: the format used in `addr` (v1) messages, +//! - `AddrV1`: the format used in `addr` (v1) messages, //! - [`AddrInVersion`]: the format used in `version` messages, and -//! - [`AddrV2`]: the format used in `addrv2` messages. +//! - `AddrV2`: the format used in `addrv2` messages. pub mod canonical; pub mod in_version; diff --git a/zebra-network/src/protocol/external/addr/v1.rs b/zebra-network/src/protocol/external/addr/v1.rs index 8b09f90c740..efdd82456f0 100644 --- a/zebra-network/src/protocol/external/addr/v1.rs +++ b/zebra-network/src/protocol/external/addr/v1.rs @@ -140,6 +140,8 @@ impl TrustedPreallocate for AddrV1 { /// Transform a `SocketAddr` into an IPv6-mapped IPv4 addresses. /// /// See [`canonical_ip_addr`] for detailed info on IPv6-mapped IPv4 addresses. +/// +/// [`canonical_ip_addr`]: super::canonical::canonical_ip_addr pub(in super::super) fn ipv6_mapped_ip_addr(ip_addr: &IpAddr) -> Ipv6Addr { use IpAddr::*; @@ -155,6 +157,8 @@ pub(in super::super) fn ipv6_mapped_ip_addr(ip_addr: &IpAddr) -> Ipv6Addr { /// Also remove IPv6 scope IDs and flow information. /// /// See [`canonical_ip_addr`] for detailed info on IPv6-mapped IPv4 addresses. +/// +/// [`canonical_ip_addr`]: super::canonical::canonical_ip_addr pub(in super::super) fn ipv6_mapped_socket_addr( socket_addr: impl Into, ) -> SocketAddrV6 { diff --git a/zebra-network/src/protocol/external/addr/v2.rs b/zebra-network/src/protocol/external/addr/v2.rs index 7d768f0ddba..d11c6560ab7 100644 --- a/zebra-network/src/protocol/external/addr/v2.rs +++ b/zebra-network/src/protocol/external/addr/v2.rs @@ -47,11 +47,15 @@ pub const MAX_ADDR_V2_ADDR_SIZE: usize = 512; /// > 0x01 IPV4 4 IPv4 address (globally routed internet) /// /// +/// +/// [`Ipv4Addr`]: std::net::Ipv4Addr pub const ADDR_V2_IPV4_NETWORK_ID: u8 = 0x01; /// The size of [`Ipv4Addr`]s in `addrv2` messages. /// /// +/// +/// [`Ipv4Addr`]: std::net::Ipv4Addr pub const ADDR_V2_IPV4_ADDR_SIZE: usize = 4; /// The network ID of [`Ipv6Addr`]s in `addrv2` messages. @@ -59,11 +63,15 @@ pub const ADDR_V2_IPV4_ADDR_SIZE: usize = 4; /// > 0x02 IPV6 16 IPv6 address (globally routed internet) /// /// +/// +/// [`Ipv6Addr`]: std::net::Ipv6Addr pub const ADDR_V2_IPV6_NETWORK_ID: u8 = 0x02; /// The size of [`Ipv6Addr`]s in `addrv2` messages. /// /// +/// +/// [`Ipv6Addr`]: std::net::Ipv6Addr pub const ADDR_V2_IPV6_ADDR_SIZE: usize = 16; /// The second format used for Bitcoin node addresses. @@ -96,6 +104,8 @@ pub(in super::super) enum AddrV2 { /// The peer's IP address. /// /// Unlike [`AddrV1`], this can be an IPv4 or IPv6 address. + /// + /// [`AddrV1`]: super::v1::AddrV1 ip: IpAddr, /// The peer's TCP port. diff --git a/zebra-network/src/protocol/external/arbitrary.rs b/zebra-network/src/protocol/external/arbitrary.rs index 830b11386f4..de5369283f1 100644 --- a/zebra-network/src/protocol/external/arbitrary.rs +++ b/zebra-network/src/protocol/external/arbitrary.rs @@ -29,7 +29,7 @@ impl InventoryHash { .boxed() } - /// Generate a proptest strategy for [`InventotryHash::Block`] hashes. + /// Generate a proptest strategy for [`InventoryHash::Block`] hashes. pub fn block_strategy() -> BoxedStrategy { (any::<[u8; 32]>()) .prop_map(block::Hash) @@ -126,14 +126,19 @@ impl Arbitrary for Version { /// Returns a random canonical Zebra `SocketAddr`. /// -/// See [`canonical_ip_addr`](super::addr::canonical_ip_addr) for details. +/// See [`canonical_ip_addr`] for details. +/// +/// [`canonical_ip_addr`]: super::addr::canonical::canonical_ip_addr pub fn canonical_socket_addr_strategy() -> impl Strategy { any::().prop_map(canonical_socket_addr) } -/// Returns a random `SocketAddrV6` for use in `addr` (v1) Zcash network messages. +/// Returns a random `SocketAddrV6` for use in `addr` (v1) Zcash network +/// messages. +/// +/// See [`canonical_ip_addr`] for details. /// -/// See [`canonical_ip_addr`](super::addr::canonical_ip_addr) for details. +/// [`canonical_ip_addr`]: super::addr::canonical::canonical_ip_addr pub fn addr_v1_ipv6_mapped_socket_addr_strategy() -> impl Strategy { any::().prop_map(ipv6_mapped_socket_addr) } diff --git a/zebra-network/src/protocol/external/message.rs b/zebra-network/src/protocol/external/message.rs index a55c2063052..11ee9e55c38 100644 --- a/zebra-network/src/protocol/external/message.rs +++ b/zebra-network/src/protocol/external/message.rs @@ -103,7 +103,7 @@ pub enum Message { /// /// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#ping) Ping( - /// A nonce unique to this [`Ping`] message. + /// A nonce unique to this [`Self::Ping`] message. Nonce, ), @@ -111,7 +111,7 @@ pub enum Message { /// /// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#pong) Pong( - /// The nonce from the [`Ping`] message this was in response to. + /// The nonce from the [`Self::Ping`] message this was in response to. Nonce, ), diff --git a/zebra-state/src/lib.rs b/zebra-state/src/lib.rs index 3e8601bd934..089c44c451c 100644 --- a/zebra-state/src/lib.rs +++ b/zebra-state/src/lib.rs @@ -11,6 +11,7 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] #![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_state")] +#![allow(rustdoc::private_intra_doc_links)] #[macro_use] extern crate tracing; diff --git a/zebra-test/src/lib.rs b/zebra-test/src/lib.rs index 254eff71daf..a9476529cb7 100644 --- a/zebra-test/src/lib.rs +++ b/zebra-test/src/lib.rs @@ -2,9 +2,9 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] #![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_test")] +#![allow(rustdoc::private_intra_doc_links)] // Each lazy_static variable uses additional recursion #![recursion_limit = "512"] - use color_eyre::section::PanicMessage; use once_cell::sync::Lazy; use owo_colors::OwoColorize; diff --git a/zebrad/src/components/mempool.rs b/zebrad/src/components/mempool.rs index 2454dea0da3..844624bb92e 100644 --- a/zebrad/src/components/mempool.rs +++ b/zebrad/src/components/mempool.rs @@ -111,7 +111,7 @@ impl Drop for ActiveState { } impl ActiveState { - /// Returns the current state, leaving a [`Disabled`] in its place. + /// Returns the current state, leaving [`Self::Disabled`] in its place. fn take(&mut self) -> Self { std::mem::take(self) } diff --git a/zebrad/src/components/mempool/config.rs b/zebrad/src/components/mempool/config.rs index 01708680a06..2b4f20f6a1b 100644 --- a/zebrad/src/components/mempool/config.rs +++ b/zebrad/src/components/mempool/config.rs @@ -20,8 +20,9 @@ pub struct Config { /// The mempool transaction eviction age limit. /// - /// This limits the maximum amount of time evicted transaction IDs stay in the mempool rejection list. - /// Transactions are randomly evicted from the mempool when the mempool reaches [`tx_cost_limit`]. + /// This limits the maximum amount of time evicted transaction IDs stay in + /// the mempool rejection list. Transactions are randomly evicted from the + /// mempool when the mempool reaches [`Self::tx_cost_limit`]. /// /// (Transactions can also be rejected by the mempool for other reasons. /// Different rejection reasons can have different age limits.) diff --git a/zebrad/src/components/mempool/downloads.rs b/zebrad/src/components/mempool/downloads.rs index 7eda18f4e5a..c8135faee2a 100644 --- a/zebrad/src/components/mempool/downloads.rs +++ b/zebrad/src/components/mempool/downloads.rs @@ -12,15 +12,19 @@ //! //! # Correctness //! -//! The mempool downloader doesn't send verified transactions to the [`Mempool`] service. -//! So Zebra must spawn a task that regularly polls the downloader for ready transactions. -//! (To ensure that transactions propagate across the entire network in each 75s block interval, -//! the polling interval should be around 5-10 seconds.) +//! The mempool downloader doesn't send verified transactions to the [`Mempool`] +//! service. So Zebra must spawn a task that regularly polls the downloader for +//! ready transactions. (To ensure that transactions propagate across the entire +//! network in each 75s block interval, the polling interval should be around +//! 5-10 seconds.) //! //! Polling the downloader from [`Mempool::poll_ready`] is not sufficient. //! [`Service::poll_ready`] is only called when there is a service request. //! But we want to download and gossip transactions, //! even when there are no other service requests. +//! +//! [`Mempool`]: super::Mempool +//! [`Mempool::poll_ready`]: super::Mempool::poll_ready use std::{ collections::{HashMap, HashSet}, pin::Pin, diff --git a/zebrad/src/components/mempool/gossip.rs b/zebrad/src/components/mempool/gossip.rs index 93037a5cda9..b55dc37ce41 100644 --- a/zebrad/src/components/mempool/gossip.rs +++ b/zebrad/src/components/mempool/gossip.rs @@ -16,9 +16,12 @@ use crate::BoxError; use crate::components::sync::TIPS_RESPONSE_TIMEOUT; -/// Run continuously, gossiping new [`zebra_chain::transaction::UnminedTxId`] to peers. +/// Runs continuously, gossiping new [`UnminedTxId`] to peers. /// -/// Broadcast any [`transaction::UnminedTxId`] that gets stored in the mempool to all ready peers. +/// Broadcasts any [`UnminedTxId`] that gets stored in the mempool to all ready +/// peers. +/// +/// [`UnminedTxId`]: zebra_chain::transaction::UnminedTxId pub async fn gossip_mempool_transaction_id( mut receiver: watch::Receiver>, broadcast_network: ZN, diff --git a/zebrad/src/components/mempool/storage.rs b/zebrad/src/components/mempool/storage.rs index 83e49d8e948..2af476e57ef 100644 --- a/zebrad/src/components/mempool/storage.rs +++ b/zebrad/src/components/mempool/storage.rs @@ -341,7 +341,8 @@ impl Storage { /// Returns the set of [`UnminedTx`]es with matching [`transaction::Hash`]es /// in the mempool. /// - /// This matches transactions with the same effects, regardless of [`AuthDigest`]. + /// This matches transactions with the same effects, regardless of + /// [`transaction::AuthDigest`]. pub fn transactions_same_effects( &self, tx_ids: HashSet, diff --git a/zebrad/src/components/mempool/storage/eviction_list.rs b/zebrad/src/components/mempool/storage/eviction_list.rs index 2809608e406..8f0c001a11b 100644 --- a/zebrad/src/components/mempool/storage/eviction_list.rs +++ b/zebrad/src/components/mempool/storage/eviction_list.rs @@ -17,7 +17,9 @@ pub struct EvictionList { // The maximum size of `unique_entries`. max_size: usize, /// The mempool transaction eviction age limit. - /// Same as [`Config::eviction_memory_time`]. + /// Same as [`Config::eviction_memory_time`][1]. + /// + /// [1]: super::super::Config::eviction_memory_time eviction_memory_time: Duration, } diff --git a/zebrad/src/components/sync/gossip.rs b/zebrad/src/components/sync/gossip.rs index 06745349e57..09edc630d0b 100644 --- a/zebrad/src/components/sync/gossip.rs +++ b/zebrad/src/components/sync/gossip.rs @@ -1,4 +1,6 @@ //! A task that gossips newly verified [`block::Hash`]es to peers. +//! +//! [`block::Hash`]: zebra_chain::block::Hash use thiserror::Error; use tokio::sync::watch; @@ -37,6 +39,8 @@ pub enum BlockGossipError { /// /// In particular, if a lot of blocks are committed at the same time, /// gossips will be disabled or skipped until the state reaches the latest tip. +/// +/// [`block::Hash`]: zebra_chain::block::Hash pub async fn gossip_best_tip_block_hashes( mut sync_status: SyncStatus, mut chain_state: ChainTipChange, diff --git a/zebrad/src/components/sync/recent_sync_lengths.rs b/zebrad/src/components/sync/recent_sync_lengths.rs index 2aa59d8b079..acf32f07813 100644 --- a/zebrad/src/components/sync/recent_sync_lengths.rs +++ b/zebrad/src/components/sync/recent_sync_lengths.rs @@ -51,7 +51,10 @@ impl RecentSyncLengths { // rather than asking peers for the next blocks in the chain. // (And if genesis downloads kept failing, we could accidentally activate the mempool.) - /// Insert a sync length from [`ChainSync::obtain_tips`] at the front of the list. + /// Insert a sync length from [`ChainSync::obtain_tips`] at the front of the + /// list. + /// + /// [`ChainSync::obtain_tips`]: super::ChainSync::obtain_tips #[instrument(skip(self), fields(self.recent_lengths))] pub fn push_obtain_tips_length(&mut self, sync_length: usize) { // currently, we treat lengths from obtain and extend tips exactly the same, @@ -62,7 +65,10 @@ impl RecentSyncLengths { self.update(sync_length) } - /// Insert a sync length from [`ChainSync::extend_tips`] at the front of the list. + /// Insert a sync length from [`ChainSync::extend_tips`] at the front of the + /// list. + /// + /// [`ChainSync::extend_tips`]: super::ChainSync::extend_tips #[instrument(skip(self), fields(self.recent_lengths))] pub fn push_extend_tips_length(&mut self, sync_length: usize) { self.update(sync_length) diff --git a/zebrad/src/lib.rs b/zebrad/src/lib.rs index b697612578d..5711c50c098 100644 --- a/zebrad/src/lib.rs +++ b/zebrad/src/lib.rs @@ -17,6 +17,7 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] #![doc(html_root_url = "https://doc.zebra.zfnd.org/zebrad")] +#![allow(rustdoc::private_intra_doc_links)] // Tracing causes false positives on this lint: // https://github.com/tokio-rs/tracing/issues/553 #![allow(clippy::cognitive_complexity)] From 83b4e6f97516da555aad9756dfe0f7c16e4e2943 Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 15 Jun 2022 16:43:20 +1000 Subject: [PATCH 03/91] feat(diagnostics): Add tokio-console support to zebrad (#4519) * Always activate tokio/tracing feature And always build tests with all tokio features. * Refactor tracing-subscriber init to simplify it * Add the tokio-console feature and dependencies * Add optional tokio-console support, and log the installed tracing layers at info level Uses a tracing Registry for tokio-console, and a fmt::Subscriber otherwise. * Add some TODOs based on tracing-subscriber features * Fix up some spans * Add a TODO for fixing a log filter bug in tokio-console mode --- .cargo/config.toml | 2 + Cargo.lock | 41 +++++- tower-batch/Cargo.toml | 4 +- tower-fallback/Cargo.toml | 3 +- zebra-chain/Cargo.toml | 6 +- zebra-consensus/Cargo.toml | 3 +- zebra-network/Cargo.toml | 2 +- zebra-network/src/peer_set/initialize.rs | 33 +++-- zebra-rpc/Cargo.toml | 3 +- zebra-state/Cargo.toml | 5 +- zebra-test/Cargo.toml | 2 +- zebrad/Cargo.toml | 19 ++- zebrad/src/components/tracing/component.rs | 146 ++++++++++++++++----- 13 files changed, 212 insertions(+), 57 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 4e644a328ad..6d9e7e56c07 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,4 +1,6 @@ +# Zebra cargo configuration +# Flags that apply to all Zebra crates and configurations [target.'cfg(all())'] rustflags = [ # Zebra standard lints for Rust 1.58+ diff --git a/Cargo.lock b/Cargo.lock index 961c5d17f9e..dcd650d51c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -854,6 +854,42 @@ dependencies = [ "winapi", ] +[[package]] +name = "console-api" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06c5fd425783d81668ed68ec98408a80498fb4ae2fd607797539e1a9dfa3618f" +dependencies = [ + "prost", + "prost-types", + "tonic", + "tracing-core", +] + +[[package]] +name = "console-subscriber" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31432bc31ff8883bf6a693a79371862f73087822470c82d6a1ec778781ee3978" +dependencies = [ + "console-api", + "crossbeam-channel", + "crossbeam-utils", + "futures", + "hdrhistogram", + "humantime", + "prost-types", + "serde", + "serde_json", + "thread_local", + "tokio", + "tokio-stream", + "tonic", + "tracing", + "tracing-core", + "tracing-subscriber 0.3.11", +] + [[package]] name = "const-oid" version = "0.6.2" @@ -5428,9 +5464,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.22" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03cfcb51380632a72d3111cb8d3447a8d908e577d31beeac006f836383d29a23" +checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" dependencies = [ "lazy_static", "valuable", @@ -6403,6 +6439,7 @@ dependencies = [ "atty", "chrono", "color-eyre", + "console-subscriber", "dirs", "futures", "gumdrop", diff --git a/tower-batch/Cargo.toml b/tower-batch/Cargo.toml index 88202f82cb2..550b88b08f8 100644 --- a/tower-batch/Cargo.toml +++ b/tower-batch/Cargo.toml @@ -18,9 +18,11 @@ tracing-futures = "0.2.5" color-eyre = "0.6.1" ed25519-zebra = "3.0.0" rand = { version = "0.8.5", package = "rand" } -tokio = { version = "1.19.2", features = ["full"] } + +tokio = { version = "1.19.2", features = ["full", "tracing", "test-util"] } tokio-test = "0.4.2" tower-fallback = { path = "../tower-fallback/" } tower-test = "0.4.0" tracing = "0.1.31" + zebra-test = { path = "../zebra-test/" } diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index 34ba264628e..0a617cc2347 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -12,5 +12,6 @@ futures-core = "0.3.21" tracing = "0.1.31" [dev-dependencies] +tokio = { version = "1.19.2", features = ["full", "tracing", "test-util"] } + zebra-test = { path = "../zebra-test/" } -tokio = { version = "1.19.2", features = ["full"] } diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index c44ed0bbb96..6be5f905545 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -62,7 +62,8 @@ proptest-derive = { version = "0.3.0", optional = true } rand = { version = "0.8.5", optional = true, package = "rand" } rand_chacha = { version = "0.3.1", optional = true } -tokio = { version = "1.19.2", optional = true } + +tokio = { version = "1.19.2", features = ["tracing"], optional = true } # ZF deps ed25519-zebra = "3.0.0" @@ -79,10 +80,11 @@ tracing = "0.1.31" proptest = "0.10.1" proptest-derive = "0.3.0" + rand = { version = "0.8.5", package = "rand" } rand_chacha = "0.3.1" -tokio = "1.19.2" +tokio = { version = "1.19.2", features = ["full", "tracing", "test-util"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 894a4ad1edc..eadbb5ffeca 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -57,7 +57,8 @@ proptest = "0.10.1" proptest-derive = "0.3.0" rand07 = { package = "rand", version = "0.7" } spandoc = "0.2.2" -tokio = { version = "1.19.2", features = ["full"] } + +tokio = { version = "1.19.2", features = ["full", "tracing", "test-util"] } tracing-error = "0.2.0" tracing-subscriber = "0.3.11" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index a55e18b8671..a24abbbe901 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -53,7 +53,7 @@ proptest = "0.10.1" proptest-derive = "0.3.0" static_assertions = "1.1.0" -tokio = { version = "1.19.2", features = ["test-util"] } +tokio = { version = "1.19.2", features = ["full", "tracing", "test-util"] } toml = "0.5.9" zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index 66fca1fb660..4de951bc2ab 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -25,7 +25,6 @@ use tokio_stream::wrappers::IntervalStream; use tower::{ buffer::Buffer, discover::Change, layer::Layer, util::BoxService, Service, ServiceExt, }; -use tracing::Span; use tracing_futures::Instrument; use zebra_chain::{chain_tip::ChainTip, parameters::Network}; @@ -179,7 +178,7 @@ where listen_handshaker, peerset_tx.clone(), ); - let listen_guard = tokio::spawn(listen_fut.instrument(Span::current())); + let listen_guard = tokio::spawn(listen_fut.in_current_span()); // 2. Initial peers, specified in the config. let initial_peers_fut = add_initial_peers( @@ -188,7 +187,7 @@ where peerset_tx.clone(), address_book_updater, ); - let initial_peers_join = tokio::spawn(initial_peers_fut.instrument(Span::current())); + let initial_peers_join = tokio::spawn(initial_peers_fut.in_current_span()); // 3. Outgoing peers we connect to in response to load. let mut candidates = CandidateSet::new(address_book.clone(), peer_set.clone()); @@ -228,7 +227,7 @@ where peerset_tx, active_outbound_connections, ); - let crawl_guard = tokio::spawn(crawl_fut.instrument(Span::current())); + let crawl_guard = tokio::spawn(crawl_fut.in_current_span()); handle_tx .send(vec![listen_guard, crawl_guard, address_book_updater_guard]) @@ -646,15 +645,20 @@ enum CrawlerAction { /// /// Uses `active_outbound_connections` to limit the number of active outbound connections /// across both the initial peers and crawler. The limit is based on `config`. -#[instrument(skip( - config, - demand_tx, - demand_rx, - candidates, - outbound_connector, - peerset_tx, - active_outbound_connections, -))] +#[instrument( + skip( + config, + demand_tx, + demand_rx, + candidates, + outbound_connector, + peerset_tx, + active_outbound_connections, + ), + fields( + new_peer_interval = ?config.crawl_new_peer_interval, + ) +)] async fn crawl_and_dial( config: Config, mut demand_tx: futures::channel::mpsc::Sender, @@ -761,7 +765,8 @@ where panic!("panic during handshaking with {:?}: {:?} ", candidate, e); } }) - .instrument(Span::current()); + .in_current_span(); + handshakes.push(Box::pin(hs_join)); } DemandCrawl => { diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 13a15673388..e46c4210fa1 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -48,7 +48,8 @@ proptest = "0.10.1" proptest-derive = "0.3.0" serde_json = "1.0.81" thiserror = "1.0.31" -tokio = { version = "1.19.2", features = ["full", "test-util"] } + +tokio = { version = "1.19.2", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-state = { path = "../zebra-state", features = ["proptest-impl"] } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index b136baab27f..504cdffde2d 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -27,7 +27,8 @@ rocksdb = { version = "0.18.0", default_features = false, features = ["lz4"] } serde = { version = "1.0.137", features = ["serde_derive"] } tempfile = "3.3.0" thiserror = "1.0.31" -tokio = { version = "1.19.2", features = ["sync"] } + +tokio = { version = "1.19.2", features = ["sync", "tracing"] } tower = { version = "0.4.12", features = ["buffer", "util"] } tracing = "0.1.31" @@ -48,7 +49,7 @@ proptest-derive = "0.3.0" halo2 = { package = "halo2_proofs", version = "0.1.0" } jubjub = "0.9.0" -tokio = { version = "1.19.2", features = ["full"] } +tokio = { version = "1.19.2", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index ca6bb2df4d6..0041a980d3a 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -17,7 +17,7 @@ once_cell = "1.12.0" rand = { version = "0.8.5", package = "rand" } regex = "1.5.6" -tokio = { version = "1.19.2", features = ["full"] } +tokio = { version = "1.19.2", features = ["full", "tracing", "test-util"] } tower = { version = "0.4.12", features = ["util"] } futures = "0.3.21" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index fff90f6036e..1c4c3a29c6b 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -42,6 +42,19 @@ proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl", "ze # The gRPC tests also need an installed lightwalletd binary lightwalletd-grpc-tests = ["tonic-build"] +# tokio-console support +# +# To activate this feature, run: +# ```sh +# RUSTFLAGS="--cfg tokio_unstable" cargo build --no-default-features --features="tokio-console" --bin zebrad +# ``` +# +# The console-subscriber is incompatible with the tracing/max_level_* features. +# +# For more details, see: +# https://github.com/tokio-rs/console/blob/main/console-subscriber/README.md#enabling-tokio-instrumentation +tokio-console = ["console-subscriber"] + # TODO: replace with environmental variables that skip the tests when not set (part of #2995) test_sync_to_mandatory_checkpoint_mainnet = [] test_sync_to_mandatory_checkpoint_testnet = [] @@ -105,6 +118,9 @@ log = "0.4.17" proptest = { version = "0.10.1", optional = true } proptest-derive = { version = "0.3.0", optional = true } +# test feature tokio-console +console-subscriber = { version = "0.1.6", optional = true } + [build-dependencies] vergen = { version = "7.2.1", default-features = false, features = ["cargo", "git"] } @@ -121,7 +137,8 @@ semver = "1.0.10" # zebra-rpc needs the preserve_order feature, it also makes test results more stable serde_json = { version = "1.0.81", features = ["preserve_order"] } tempfile = "3.3.0" -tokio = { version = "1.19.2", features = ["full", "test-util"] } + +tokio = { version = "1.19.2", features = ["full", "tracing", "test-util"] } tokio-stream = "0.1.9" # test feature lightwalletd-grpc-tests diff --git a/zebrad/src/components/tracing/component.rs b/zebrad/src/components/tracing/component.rs index eff9492e7fe..fd23b3cdda4 100644 --- a/zebrad/src/components/tracing/component.rs +++ b/zebrad/src/components/tracing/component.rs @@ -4,7 +4,6 @@ use abscissa_core::{Component, FrameworkError, FrameworkErrorKind, Shutdown}; use tracing_error::ErrorLayer; use tracing_subscriber::{ fmt::Formatter, layer::SubscriberExt, reload::Handle, util::SubscriberInitExt, EnvFilter, - FmtSubscriber, }; use crate::{application::app_version, config::TracingSection}; @@ -13,7 +12,15 @@ use super::flame; /// Abscissa component for initializing the `tracing` subsystem pub struct Tracing { - filter_handle: Handle, + /// The installed filter reloading handle, if enabled. + // + // TODO: when fmt::Subscriber supports per-layer filtering, remove the Option + filter_handle: Option>, + + /// The originally configured filter. + initial_filter: String, + + /// The installed flame graph collector, if enabled. flamegrapher: Option, } @@ -28,58 +35,129 @@ impl Tracing { let use_color = config.force_use_color || (config.use_color && atty::is(atty::Stream::Stdout)); - // Construct a tracing subscriber with the supplied filter and enable reloading. - let builder = FmtSubscriber::builder() - .with_ansi(use_color) - .with_env_filter(&filter) - .with_filter_reloading(); - let filter_handle = builder.reload_handle(); + // Construct a format subscriber with the supplied global logging filter, and enable reloading. + // TODO: when fmt::Subscriber supports per-layer filtering, always enable this code + #[cfg(not(all(feature = "tokio-console", tokio_unstable)))] + let (subscriber, filter_handle) = { + use tracing_subscriber::FmtSubscriber; + + let logger = FmtSubscriber::builder() + .with_ansi(use_color) + .with_env_filter(&filter) + .with_filter_reloading(); + + let filter_handle = logger.reload_handle(); + let subscriber = logger.finish().with(ErrorLayer::default()); + + (subscriber, Some(filter_handle)) + }; + + // Construct a tracing registry with the supplied per-layer logging filter, + // and disable filter reloading. + // + // TODO: when fmt::Subscriber supports per-layer filtering, + // remove this registry code, and layer tokio-console on top of fmt::Subscriber + #[cfg(all(feature = "tokio-console", tokio_unstable))] + let (subscriber, filter_handle) = { + use tracing_subscriber::{fmt, Layer}; + + let subscriber = tracing_subscriber::registry(); + // TODO: find out why crawl_and_dial and try_to_sync evade this filter, + // and why they also don't get the global net/commit span + // + // Using `registry` as the base subscriber, the logs from most other functions get filtered. + // Using `FmtSubscriber` as the base subscriber, all the logs get filtered. + let logger = fmt::Layer::new() + .with_ansi(use_color) + .with_filter(EnvFilter::from(&filter)); + + let subscriber = subscriber.with(logger); + + let span_logger = ErrorLayer::default().with_filter(EnvFilter::from(&filter)); + let subscriber = subscriber.with(span_logger); + + (subscriber, None) + }; + // Add optional layers based on dynamic and compile-time configs + + // Add a flamegraph let (flamelayer, flamegrapher) = if let Some(path) = flame_root { let (flamelayer, flamegrapher) = flame::layer(path); + (Some(flamelayer), Some(flamegrapher)) } else { (None, None) }; + let subscriber = subscriber.with(flamelayer); let journaldlayer = if config.use_journald { let layer = tracing_journald::layer() .map_err(|e| FrameworkErrorKind::ComponentError.context(e))?; + + // If the global filter can't be used, add a per-layer filter instead. + // TODO: when fmt::Subscriber supports per-layer filtering, always enable this code + #[cfg(all(feature = "tokio-console", tokio_unstable))] + let layer = { + use tracing_subscriber::Layer; + layer.with_filter(EnvFilter::from(&filter)) + }; + Some(layer) } else { None }; - - let subscriber = builder.finish().with(ErrorLayer::default()); + let subscriber = subscriber.with(journaldlayer); #[cfg(feature = "enable-sentry")] let subscriber = subscriber.with(sentry_tracing::layer()); - match (flamelayer, journaldlayer) { - (None, None) => subscriber.init(), - (Some(layer1), None) => subscriber.with(layer1).init(), - (None, Some(layer2)) => subscriber.with(layer2).init(), - (Some(layer1), Some(layer2)) => subscriber.with(layer1).with(layer2).init(), - }; + // spawn the console server in the background, and apply the console layer + // TODO: set Builder::poll_duration_histogram_max() if needed + #[cfg(all(feature = "tokio-console", tokio_unstable))] + let subscriber = subscriber.with(console_subscriber::spawn()); + + // Initialise the global tracing subscriber + subscriber.init(); + // Log the tracing stack we just created tracing::info!( ?filter, TRACING_STATIC_MAX_LEVEL = ?tracing::level_filters::STATIC_MAX_LEVEL, LOG_STATIC_MAX_LEVEL = ?log::STATIC_MAX_LEVEL, "started tracing component", ); + if flame_root.is_some() { + info!("installed flamegraph tracing layer"); + } + if config.use_journald { + info!(?filter, "installed journald tracing layer"); + } + #[cfg(feature = "enable-sentry")] + info!("installed sentry tracing layer"); + #[cfg(all(feature = "tokio-console", tokio_unstable))] + info!( + TRACING_STATIC_MAX_LEVEL = ?tracing::level_filters::STATIC_MAX_LEVEL, + LOG_STATIC_MAX_LEVEL = ?log::STATIC_MAX_LEVEL, + "installed tokio-console tracing layer", + ); Ok(Self { filter_handle, + initial_filter: filter, flamegrapher, }) } /// Return the currently-active tracing filter. pub fn filter(&self) -> String { - self.filter_handle - .with_current(|filter| filter.to_string()) - .expect("the subscriber is not dropped before the component is") + if let Some(filter_handle) = self.filter_handle.as_ref() { + filter_handle + .with_current(|filter| filter.to_string()) + .expect("the subscriber is not dropped before the component is") + } else { + self.initial_filter.clone() + } } /// Reload the currently-active filter with the supplied value. @@ -87,18 +165,26 @@ impl Tracing { /// This can be used to provide a dynamic tracing filter endpoint. pub fn reload_filter(&self, filter: impl Into) { let filter = filter.into(); - let filter_str = filter.to_string(); - self.filter_handle - .reload(filter) - .expect("the subscriber is not dropped before the component is"); - - tracing::info!( - filter = ?filter_str, - TRACING_STATIC_MAX_LEVEL = ?tracing::level_filters::STATIC_MAX_LEVEL, - LOG_STATIC_MAX_LEVEL = ?log::STATIC_MAX_LEVEL, - "reloaded tracing filter", - ); + if let Some(filter_handle) = self.filter_handle.as_ref() { + tracing::info!( + ?filter, + TRACING_STATIC_MAX_LEVEL = ?tracing::level_filters::STATIC_MAX_LEVEL, + LOG_STATIC_MAX_LEVEL = ?log::STATIC_MAX_LEVEL, + "reloading tracing filter", + ); + + filter_handle + .reload(filter) + .expect("the subscriber is not dropped before the component is"); + } else { + tracing::warn!( + ?filter, + TRACING_STATIC_MAX_LEVEL = ?tracing::level_filters::STATIC_MAX_LEVEL, + LOG_STATIC_MAX_LEVEL = ?log::STATIC_MAX_LEVEL, + "attempted to reload tracing filter, but filter reloading is disabled", + ); + } } } From 9975d5077d00df35936e78bfa1c4627ed3972b5b Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Wed, 15 Jun 2022 05:05:02 -0300 Subject: [PATCH 04/91] doc(tests): full sync and lightwalletd tests (#4523) * document full sync and lightwalletd tests * clippy * upgrade error * move env variables Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebrad/tests/acceptance.rs | 93 +++++++++++++++++++++++++++-- zebrad/tests/common/lightwalletd.rs | 3 +- 2 files changed, 89 insertions(+), 7 deletions(-) diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 5fef4b6c934..7c4e9022dd7 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -20,6 +20,88 @@ //! If it does not have any IPv4 interfaces, IPv4 localhost is not on `127.0.0.1`, //! or you have poor network connectivity, //! skip all the network tests by setting the `ZEBRA_SKIP_NETWORK_TESTS` environmental variable. +//! +//! ## Large/full sync tests +//! +//! This file has sync tests that are marked as ignored because they take too much time to run. +//! Some of them require environment variables or directories to be present: +//! +//! - `FULL_SYNC_MAINNET_TIMEOUT_MINUTES` env variable: The total number of minutes we +//! will allow this test to run or give up. Value for the Mainnet full sync tests. +//! - `FULL_SYNC_TESTNET_TIMEOUT_MINUTES` env variable: The total number of minutes we +//! will allow this test to run or give up. Value for the Testnet ful sync tests. +//! - `/zebrad-cache` directory: For some sync tests, this needs to be created in +//! the file system, the created directory should have write permissions. +//! +//! Here are some examples on how to run each of the tests: +//! +//! ```console +//! $ cargo test sync_large_checkpoints_mainnet -- --ignored --nocapture +//! +//! $ cargo test sync_large_checkpoints_mempool_mainnet -- --ignored --nocapture +//! +//! $ sudo mkdir /zebrad-cache +//! $ sudo chmod 777 /zebrad-cache +//! $ export FULL_SYNC_MAINNET_TIMEOUT_MINUTES=600 +//! $ cargo test full_sync_mainnet -- --ignored --nocapture +//! +//! $ sudo mkdir /zebrad-cache +//! $ sudo chmod 777 /zebrad-cache +//! $ export FULL_SYNC_TESTNET_TIMEOUT_MINUTES=600 +//! $ cargo test full_sync_testnet -- --ignored --nocapture +//! ``` +//! +//! Please refer to the documentation of each test for more information. +//! +//! ## Lightwalletd tests +//! +//! The lightwalletd software is an interface service that uses zebrad or zcashd RPC methods to serve wallets or other applications with blockchain content in an efficient manner. +//! There are several versions of lightwalled in the form of different forks. The original +//! repo is but Zecwallet Lite uses a custom fork: . +//! The custom fork from adityapk00 is the one Zebra use for this tests: +//! +//! Zebra lightwalletd tests are not all marked as ignored but none will run unless +//! at least the `ZEBRA_TEST_LIGHTWALLETD` environment variable is present: +//! +//! - `ZEBRA_TEST_LIGHTWALLETD` env variable: Needs to be present to run any of the lightwalletd tests. +//! - `ZEBRA_CACHED_STATE_DIR` env var: The path to a zebra blockchain database. +//! - `LIGHTWALLETD_DATA_DIR` env variable. The path to a lightwalletd database. +//! - `--features lightwalletd-grpc-tests` cargo flag. The flag given to cargo to build the source code of the running test. +//! +//! Here are some examples of running each test: +//! +//! ```console +//! $ export ZEBRA_TEST_LIGHTWALLETD=true +//! $ cargo test lightwalletd_integration -- --nocapture +//! +//! $ export ZEBRA_TEST_LIGHTWALLETD=true +//! $ export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/chain" +//! $ export LIGHTWALLETD_DATA_DIR="/path/to/lightwalletd/database" +//! $ cargo test lightwalletd_update_sync -- --nocapture +//! +//! $ export ZEBRA_TEST_LIGHTWALLETD=true +//! $ export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/chain" +//! $ cargo test lightwalletd_full_sync -- --ignored --nocapture +//! +//! $ export ZEBRA_TEST_LIGHTWALLETD=true +//! $ cargo test lightwalletd_test_suite -- --ignored --nocapture +//! +//! $ export ZEBRA_TEST_LIGHTWALLETD=true +//! $ export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/chain" +//! $ cargo test fully_synced_rpc_test -- --ignored --nocapture +//! +//! $ export ZEBRA_TEST_LIGHTWALLETD=true +//! $ export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/chain" +//! $ export LIGHTWALLETD_DATA_DIR="/path/to/lightwalletd/database" +//! $ cargo test sending_transactions_using_lightwalletd --features lightwalletd-grpc-tests -- --ignored --nocapture +//! +//! $ export ZEBRA_TEST_LIGHTWALLETD=true +//! $ export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/chain" +//! $ export LIGHTWALLETD_DATA_DIR="/path/to/lightwalletd/database" +//! $ cargo test lightwalletd_wallet_grpc_tests --features lightwalletd-grpc-tests -- --ignored --nocapture +//! ``` +//! +//! Please refer to the documentation of each test for more information. use std::{collections::HashSet, convert::TryInto, env, path::PathBuf}; @@ -675,6 +757,7 @@ fn sync_past_mandatory_checkpoint(network: Network) -> Result<()> { /// The timeout is specified using an environment variable, with the name configured by the /// `timeout_argument_name` parameter. The value of the environment variable must the number of /// minutes specified as an integer. +#[allow(clippy::print_stderr)] fn full_sync_test(network: Network, timeout_argument_name: &str) -> Result<()> { let timeout_argument: Option = env::var(timeout_argument_name) .ok() @@ -698,11 +781,10 @@ fn full_sync_test(network: Network, timeout_argument_name: &str) -> Result<()> { SYNC_FINISHED_REGEX_TMP_STOP_EARLY, ) } else { - tracing::info!( - ?network, - "skipped full sync test, \ - set the {:?} environmental variable to run the test", - timeout_argument_name, + eprintln!( + "Skipped full sync test for {}, \ + set the {:?} environmental variable to run the test", + network, timeout_argument_name, ); Ok(()) @@ -1518,7 +1600,6 @@ async fn fully_synced_rpc_test() -> Result<()> { let cached_state_path = test_type.zebrad_state_path("fully_synced_rpc_test".to_string()); if cached_state_path.is_none() { - tracing::info!("skipping fully synced zebrad RPC test"); return Ok(()); }; diff --git a/zebrad/tests/common/lightwalletd.rs b/zebrad/tests/common/lightwalletd.rs index 552f957890c..fb75e53b23c 100644 --- a/zebrad/tests/common/lightwalletd.rs +++ b/zebrad/tests/common/lightwalletd.rs @@ -266,11 +266,12 @@ impl LightwalletdTestType { } /// Returns the Zebra state path for this test, if set. + #[allow(clippy::print_stderr)] pub fn zebrad_state_path(&self, test_name: String) -> Option { match env::var_os(ZEBRA_CACHED_STATE_DIR) { Some(path) => Some(path.into()), None => { - tracing::info!( + eprintln!( "skipped {test_name:?} {self:?} lightwalletd test, \ set the {ZEBRA_CACHED_STATE_DIR:?} environment variable to run the test", ); From a224550d8ee6148444be04f76d817d1be69229b4 Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 16 Jun 2022 07:12:14 +1000 Subject: [PATCH 05/91] Add a draft "breaking changes" section to the next changelog (#4624) --- CHANGELOG.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index be4a6529c0f..8a50b3fc6c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,23 @@ All notable changes to Zebra are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org). +## Next Release: [Zebra 1.0.0-beta.12](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0-beta.12) - 2022-06-?? + +This release contains some breaking changes which improve config usability, and increase compilation speed. + +### Breaking Changes + +#### Compile-Time Features + +- Most of Zebra's [tracing](https://github.com/ZcashFoundation/zebra/blob/main/book/src/user/tracing.md) and [metrics](https://github.com/ZcashFoundation/zebra/blob/main/book/src/user/metrics.md) features are off by default (#4539) +- The `enable-sentry` feature has been renamed to `sentry` (#4623) + +#### Config + +- Times in `zebrad.config` change from seconds/nanoseconds to a [human-readable format](https://docs.rs/humantime/latest/humantime/) (#4587) + +TODO: insert changelog here + ## [Zebra 1.0.0-beta.11](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0-beta.11) - 2022-06-03 This release cleans up a lot of tech dept accumulated in the previous From 8160b4c35ad3ecf15eae2e929f4a949413e91eb0 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 15 Jun 2022 17:59:55 -0400 Subject: [PATCH 06/91] feat(actions): delete old GCP resources (#4598) * feat(actions): delete old GCP resources * fix(ci): delete old instances templates * fix(actions): use correct date arguments and conversion * fix(actions): missing command in gcloud * fix(gcp): if an instance can't be deleted, continue * refacor(action): cleanup and execute monthly --- .github/workflows/delete-gcp-resources.yml | 33 ++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 .github/workflows/delete-gcp-resources.yml diff --git a/.github/workflows/delete-gcp-resources.yml b/.github/workflows/delete-gcp-resources.yml new file mode 100644 index 00000000000..6cb58f7a857 --- /dev/null +++ b/.github/workflows/delete-gcp-resources.yml @@ -0,0 +1,33 @@ +name: Delete GCP resources + +on: + schedule: + - cron: "0 0 1 * *" + workflow_dispatch: + +jobs: + delete-resources: + name: Delete old GCP resources + runs-on: ubuntu-latest + permissions: + contents: 'read' + id-token: 'write' + steps: + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v0.8.0 + with: + workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' + service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' + token_format: 'access_token' + + # Deletes all the instances template older than 30 days + - name: Delete old instance templates + run: | + TEMPLATES=$(gcloud compute instance-templates list --sort-by=creationTimestamp --filter="creationTimestamp < $(date --date='30 days ago' '+%Y%m%d')" --format='value(NAME)') + + for TEMPLATE in $TEMPLATES + do + gcloud compute instance-templates delete ${TEMPLATE} --quiet || continue + done From b91aaf7863fe3b262117abdd257b7effeb37c571 Mon Sep 17 00:00:00 2001 From: Marek Date: Thu, 16 Jun 2022 01:21:27 +0200 Subject: [PATCH 07/91] Use `config.toml` instead of inner attributes for private links (#4627) --- .cargo/config.toml | 3 +++ zebra-chain/src/lib.rs | 1 - zebra-consensus/src/lib.rs | 1 - zebra-network/src/lib.rs | 1 - zebra-state/src/lib.rs | 1 - zebra-test/src/lib.rs | 1 - zebrad/src/lib.rs | 1 - 7 files changed, 3 insertions(+), 6 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 6d9e7e56c07..f126caa6279 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -49,6 +49,9 @@ rustflags = [ # Code styles we want to accept "-Aclippy::try_err", + # Links in public docs can point to private items. + "-Arustdoc::private_intra_doc_links" + # TODOs: # `cargo fix` might help do these fixes, # or add a config.toml to sub-directories which should allow these lints, diff --git a/zebra-chain/src/lib.rs b/zebra-chain/src/lib.rs index b529a5b44fb..755cd48c9cf 100644 --- a/zebra-chain/src/lib.rs +++ b/zebra-chain/src/lib.rs @@ -6,7 +6,6 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] #![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_chain")] -#![allow(rustdoc::private_intra_doc_links)] // Required by bitvec! macro #![recursion_limit = "256"] diff --git a/zebra-consensus/src/lib.rs b/zebra-consensus/src/lib.rs index 43f1cbc74d1..392971e1b5d 100644 --- a/zebra-consensus/src/lib.rs +++ b/zebra-consensus/src/lib.rs @@ -33,7 +33,6 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] #![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_consensus")] -#![allow(rustdoc::private_intra_doc_links)] mod block; mod checkpoint; diff --git a/zebra-network/src/lib.rs b/zebra-network/src/lib.rs index 172fe7f8caf..a8309fa4042 100644 --- a/zebra-network/src/lib.rs +++ b/zebra-network/src/lib.rs @@ -124,7 +124,6 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] #![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_network")] -#![allow(rustdoc::private_intra_doc_links)] #[macro_use] extern crate pin_project; diff --git a/zebra-state/src/lib.rs b/zebra-state/src/lib.rs index 089c44c451c..3e8601bd934 100644 --- a/zebra-state/src/lib.rs +++ b/zebra-state/src/lib.rs @@ -11,7 +11,6 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] #![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_state")] -#![allow(rustdoc::private_intra_doc_links)] #[macro_use] extern crate tracing; diff --git a/zebra-test/src/lib.rs b/zebra-test/src/lib.rs index a9476529cb7..9cd2aa076cb 100644 --- a/zebra-test/src/lib.rs +++ b/zebra-test/src/lib.rs @@ -2,7 +2,6 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] #![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_test")] -#![allow(rustdoc::private_intra_doc_links)] // Each lazy_static variable uses additional recursion #![recursion_limit = "512"] use color_eyre::section::PanicMessage; diff --git a/zebrad/src/lib.rs b/zebrad/src/lib.rs index 5711c50c098..b697612578d 100644 --- a/zebrad/src/lib.rs +++ b/zebrad/src/lib.rs @@ -17,7 +17,6 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] #![doc(html_root_url = "https://doc.zebra.zfnd.org/zebrad")] -#![allow(rustdoc::private_intra_doc_links)] // Tracing causes false positives on this lint: // https://github.com/tokio-rs/tracing/issues/553 #![allow(clippy::cognitive_complexity)] From 29e73b3f3ea296aff4ab08c892bd5121d12b8231 Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 17 Jun 2022 05:56:40 +1000 Subject: [PATCH 08/91] breaking(diagnostics): make zebrad diagnostics into optional compile-time features (#4539) * Disable the flamegraph feature by default at compile time * Disable the journald feature by default at compile time * Also disable inferno dependency, and rearrange features * Disable the prometheus feature by default at compile time * Disable the tracing filter reload feature by default at compile time * Disable tests when corresponding features are disabled * Add compile-time tracing features to user docs * Add compile-time features to the metrics user docs * Document diagnostics as part of the start command tasks and services * breaking(diagnostics): rename "enable-sentry" feature to "sentry" (#4623) * Also skip conflict tests when those ports are disabled * breaking(diagnostics): rename "enable-sentry" feature to "sentry" This is mostly: ```sh fastmod enable-sentry sentry ``` Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- book/src/user/metrics.md | 3 +- book/src/user/requirements.md | 2 +- book/src/user/tracing.md | 26 ++++++--- docker/Dockerfile | 6 +- docker/zcash-params/Dockerfile | 4 +- zebrad/Cargo.toml | 33 ++++++++--- zebrad/src/application.rs | 4 +- zebrad/src/commands/start.rs | 6 ++ zebrad/src/components/metrics.rs | 16 ++++++ zebrad/src/components/tracing.rs | 4 ++ zebrad/src/components/tracing/component.rs | 64 ++++++++++++++++++---- zebrad/src/components/tracing/endpoint.rs | 33 +++++++++-- zebrad/src/lib.rs | 2 +- zebrad/tests/acceptance.rs | 6 +- 14 files changed, 168 insertions(+), 41 deletions(-) diff --git a/book/src/user/metrics.md b/book/src/user/metrics.md index 5fc69e9304d..c2fb19ed742 100644 --- a/book/src/user/metrics.md +++ b/book/src/user/metrics.md @@ -1,6 +1,7 @@ # Zebra Metrics -Zebra has support for Prometheus, configured using the [`MetricsSection`][metrics_section]. +Zebra has support for Prometheus, configured using the `prometheus` compile-time feature, +and the [`MetricsSection`][metrics_section] runtime configuration. This requires supporting infrastructure to collect and visualize metrics, for example: diff --git a/book/src/user/requirements.md b/book/src/user/requirements.md index f8421eb399c..44adf0be3a9 100644 --- a/book/src/user/requirements.md +++ b/book/src/user/requirements.md @@ -26,7 +26,7 @@ tested its exact limits yet. ## Sentry Production Monitoring -Compile Zebra with `--features enable-sentry` to monitor it using Sentry in production. +Compile Zebra with `--features sentry` to monitor it using Sentry in production. ## Lightwalletd Test Requirements diff --git a/book/src/user/tracing.md b/book/src/user/tracing.md index 620ef406583..97d5b4e863a 100644 --- a/book/src/user/tracing.md +++ b/book/src/user/tracing.md @@ -1,7 +1,12 @@ # Tracing Zebra +## Dynamic Tracing + Zebra supports dynamic tracing, configured using the config's -[`TracingSection`][tracing_section] and (optionally) an HTTP RPC endpoint. +[`TracingSection`][tracing_section] and an HTTP RPC endpoint. + +Activate this feature using the `filter-reload` compile-time feature, +and the [`filter`][filter] and `endpoint_addr` runtime config options. If the `endpoint_addr` is specified, `zebrad` will open an HTTP endpoint allowing dynamic runtime configuration of the tracing filter. For instance, @@ -12,13 +17,20 @@ if the config had `endpoint_addr = '127.0.0.1:3000'`, then See the [`filter`][filter] documentation for more details. -Zebra also has support for: +## `journald` Logging + +Zebra can send tracing spans and events to [systemd-journald][systemd_journald], +on Linux distributions that use `systemd`. + +Activate `journald` logging using the `journald` compile-time feature, +and the [`use_journald`][use_journald] runtime config option. + +## Flamegraphs + +Zebra can generate [flamegraphs] of tracing spans. -* Generating [flamegraphs] of tracing spans, configured using the -[`flamegraph`][flamegraph] option. -* Sending tracing spans and events to [systemd-journald][systemd_journald], -on Linux distributions that use `systemd`. Configured using the -[`use_journald`][use_journald] option. +Activate flamegraphs using the `flamegraph` compile-time feature, +and the [`flamegraph`][flamegraph] runtime config option. [tracing_section]: https://doc.zebra.zfnd.org/zebrad/config/struct.TracingSection.html [filter]: https://doc.zebra.zfnd.org/zebrad/config/struct.TracingSection.html#structfield.filter diff --git a/docker/Dockerfile b/docker/Dockerfile index a564599e5e9..5bb39b96361 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -100,7 +100,7 @@ COPY --from=us-docker.pkg.dev/zealous-zebra/zebra/lightwalletd /lightwalletd /us # This is the caching Docker layer for Rust! # # TODO: is it faster to use --tests here? -RUN cargo chef cook --release --features enable-sentry,lightwalletd-grpc-tests --workspace --recipe-path recipe.json +RUN cargo chef cook --release --features sentry,lightwalletd-grpc-tests --workspace --recipe-path recipe.json COPY . . RUN cargo test --locked --release --features lightwalletd-grpc-tests --workspace --no-run @@ -118,11 +118,11 @@ CMD [ "cargo"] # `test` stage. This step is a dependency for the `runtime` stage, which uses the resulting # zebrad binary from this step. FROM deps AS release -RUN cargo chef cook --release --features enable-sentry --recipe-path recipe.json +RUN cargo chef cook --release --features sentry --recipe-path recipe.json COPY . . # Build zebra -RUN cargo build --locked --release --features enable-sentry --package zebrad --bin zebrad +RUN cargo build --locked --release --features sentry --package zebrad --bin zebrad # This stage is only used when deploying nodes or when only the resulting zebrad binary is needed # diff --git a/docker/zcash-params/Dockerfile b/docker/zcash-params/Dockerfile index 65da3839c32..c441e171951 100644 --- a/docker/zcash-params/Dockerfile +++ b/docker/zcash-params/Dockerfile @@ -23,7 +23,7 @@ RUN apt-get -qq update && \ ENV CARGO_HOME /app/.cargo/ # Build dependencies - this is the caching Docker layer! -RUN cargo chef cook --release --features enable-sentry --package zebrad --recipe-path recipe.json +RUN cargo chef cook --release --features sentry --package zebrad --recipe-path recipe.json ARG RUST_BACKTRACE=0 ENV RUST_BACKTRACE ${RUST_BACKTRACE} @@ -36,4 +36,4 @@ ENV COLORBT_SHOW_HIDDEN ${COLORBT_SHOW_HIDDEN} COPY . . # Pre-download Zcash Sprout and Sapling parameters -RUN cargo run --locked --release --features enable-sentry --package zebrad --bin zebrad download +RUN cargo run --locked --release --features sentry --package zebrad --bin zebrad download diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 1c4c3a29c6b..bc81c110a7b 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -17,7 +17,13 @@ default-run = "zebrad" default = ["release_max_level_info"] # Production features that activate extra dependencies -enable-sentry = ["sentry", "sentry-tracing"] + +sentry = ["dep:sentry", "sentry-tracing"] +flamegraph = ["tracing-flame", "inferno"] +journald = ["tracing-journald"] +filter-reload = ["hyper"] + +prometheus = ["metrics-exporter-prometheus"] # Production features that modify dependency behaviour @@ -78,7 +84,6 @@ lazy_static = "1.4.0" serde = { version = "1.0.137", features = ["serde_derive"] } toml = "0.5.9" -hyper = { version = "0.14.19", features = ["full"] } futures = "0.3.21" tokio = { version = "1.19.2", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } tower = { version = "0.4.12", features = ["hedge", "limit"] } @@ -87,27 +92,36 @@ pin-project = "1.0.10" color-eyre = { version = "0.6.1", default_features = false, features = ["issue-url"] } thiserror = "1.0.31" -tracing-flame = "0.2.0" -tracing-journald = "0.3.0" tracing-subscriber = { version = "0.3.11", features = ["env-filter"] } tracing-error = "0.2.0" tracing-futures = "0.2.5" tracing = "0.1.31" metrics = "0.18.1" -metrics-exporter-prometheus = { version = "0.9.0", default-features = false, features = ["http-listener"] } dirs = "4.0.0" -inferno = { version = "0.11.4", default-features = false } atty = "0.2.14" num-integer = "0.1.45" rand = { version = "0.8.5", package = "rand" } -# prod feature enable-sentry +# prod feature sentry sentry-tracing = { version = "0.26.0", optional = true } sentry = { version = "0.26.0", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls"], optional = true } +# prod feature flamegraph +tracing-flame = { version = "0.2.0", optional = true } +inferno = { version = "0.11.4", default-features = false, optional = true } + +# prod feature journald +tracing-journald = { version = "0.3.0", optional = true } + +# prod feature filter-reload +hyper = { version = "0.14.19", features = ["http1", "http2", "server"], optional = true } + +# prod feature prometheus +metrics-exporter-prometheus = { version = "0.9.0", default-features = false, features = ["http-listener"], optional = true } + # prod feature release_max_level_info # # zebrad uses tracing for logging, @@ -132,12 +146,15 @@ abscissa_core = { version = "0.5", features = ["testing"] } hex = "0.4.3" once_cell = "1.12.0" regex = "1.5.6" -reqwest = "0.11.11" semver = "1.0.10" + # zebra-rpc needs the preserve_order feature, it also makes test results more stable serde_json = { version = "1.0.81", features = ["preserve_order"] } tempfile = "3.3.0" +hyper = { version = "0.14.18", features = ["http1", "http2", "server"]} +reqwest = "0.11.11" + tokio = { version = "1.19.2", features = ["full", "tracing", "test-util"] } tokio-stream = "0.1.9" diff --git a/zebrad/src/application.rs b/zebrad/src/application.rs index e78bee43c31..494367216f0 100644 --- a/zebrad/src/application.rs +++ b/zebrad/src/application.rs @@ -314,7 +314,7 @@ impl Application for ZebradApp { // The Sentry default config pulls in the DSN from the `SENTRY_DSN` // environment variable. - #[cfg(feature = "enable-sentry")] + #[cfg(feature = "sentry")] let guard = sentry::init(sentry::ClientOptions { debug: true, release: Some(app_version().to_string().into()), @@ -325,7 +325,7 @@ impl Application for ZebradApp { let panic_report = panic_hook.panic_report(panic_info); eprintln!("{}", panic_report); - #[cfg(feature = "enable-sentry")] + #[cfg(feature = "sentry")] { let event = crate::sentry::panic_event_from(panic_report); sentry::capture_event(event); diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index 05b4d726fcb..9a1bce4b3b1 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -58,6 +58,12 @@ //! * JSON-RPC Service //! * answers RPC client requests using the State Service and Mempool Service //! * submits client transactions to the node's mempool +//! +//! Zebra also has diagnostic support +//! * [metrics](https://github.com/ZcashFoundation/zebra/blob/main/book/src/user/metrics.md) +//! * [tracing](https://github.com/ZcashFoundation/zebra/blob/main/book/src/user/tracing.md) +//! +//! Some of the diagnostic features are optional, and need to be enabled at compile-time. use std::{cmp::max, ops::Add, time::Duration}; diff --git a/zebrad/src/components/metrics.rs b/zebrad/src/components/metrics.rs index 157e4646fc4..3697561471d 100644 --- a/zebrad/src/components/metrics.rs +++ b/zebrad/src/components/metrics.rs @@ -10,6 +10,7 @@ pub struct MetricsEndpoint {} impl MetricsEndpoint { /// Create the component. + #[cfg(feature = "prometheus")] pub fn new(config: &ZebradConfig) -> Result { if let Some(addr) = config.metrics.endpoint_addr { info!("Trying to open metrics endpoint at {}...", addr); @@ -38,6 +39,21 @@ impl MetricsEndpoint { ), } } + + Ok(Self {}) + } + + /// Create the component. + #[cfg(not(feature = "prometheus"))] + pub fn new(config: &ZebradConfig) -> Result { + if let Some(addr) = config.metrics.endpoint_addr { + warn!( + ?addr, + "unable to activate configured metrics endpoint: \ + enable the 'prometheus' feature when compiling zebrad", + ); + } + Ok(Self {}) } } diff --git a/zebrad/src/components/tracing.rs b/zebrad/src/components/tracing.rs index 804b54739af..d78739cba7a 100644 --- a/zebrad/src/components/tracing.rs +++ b/zebrad/src/components/tracing.rs @@ -2,8 +2,12 @@ mod component; mod endpoint; + +#[cfg(feature = "flamegraph")] mod flame; pub use component::Tracing; pub use endpoint::TracingEndpoint; + +#[cfg(feature = "flamegraph")] pub use flame::{layer, Grapher}; diff --git a/zebrad/src/components/tracing/component.rs b/zebrad/src/components/tracing/component.rs index fd23b3cdda4..bab5675d100 100644 --- a/zebrad/src/components/tracing/component.rs +++ b/zebrad/src/components/tracing/component.rs @@ -1,6 +1,6 @@ //! The Abscissa component for Zebra's `tracing` implementation. -use abscissa_core::{Component, FrameworkError, FrameworkErrorKind, Shutdown}; +use abscissa_core::{Component, FrameworkError, Shutdown}; use tracing_error::ErrorLayer; use tracing_subscriber::{ fmt::Formatter, layer::SubscriberExt, reload::Handle, util::SubscriberInitExt, EnvFilter, @@ -8,6 +8,7 @@ use tracing_subscriber::{ use crate::{application::app_version, config::TracingSection}; +#[cfg(feature = "flamegraph")] use super::flame; /// Abscissa component for initializing the `tracing` subsystem @@ -21,6 +22,7 @@ pub struct Tracing { initial_filter: String, /// The installed flame graph collector, if enabled. + #[cfg(feature = "flamegraph")] flamegrapher: Option, } @@ -35,7 +37,9 @@ impl Tracing { let use_color = config.force_use_color || (config.use_color && atty::is(atty::Stream::Stdout)); - // Construct a format subscriber with the supplied global logging filter, and enable reloading. + // Construct a format subscriber with the supplied global logging filter, + // and optionally enable reloading. + // // TODO: when fmt::Subscriber supports per-layer filtering, always enable this code #[cfg(not(all(feature = "tokio-console", tokio_unstable)))] let (subscriber, filter_handle) = { @@ -43,13 +47,22 @@ impl Tracing { let logger = FmtSubscriber::builder() .with_ansi(use_color) - .with_env_filter(&filter) - .with_filter_reloading(); + .with_env_filter(&filter); + + // Enable reloading if that feature is selected. + #[cfg(feature = "filter-reload")] + let (filter_handle, logger) = { + let logger = logger.with_filter_reloading(); + + (Some(logger.reload_handle()), logger) + }; + + #[cfg(not(feature = "filter-reload"))] + let filter_handle = None; - let filter_handle = logger.reload_handle(); let subscriber = logger.finish().with(ErrorLayer::default()); - (subscriber, Some(filter_handle)) + (subscriber, filter_handle) }; // Construct a tracing registry with the supplied per-layer logging filter, @@ -82,6 +95,7 @@ impl Tracing { // Add optional layers based on dynamic and compile-time configs // Add a flamegraph + #[cfg(feature = "flamegraph")] let (flamelayer, flamegrapher) = if let Some(path) = flame_root { let (flamelayer, flamegrapher) = flame::layer(path); @@ -89,9 +103,13 @@ impl Tracing { } else { (None, None) }; + #[cfg(feature = "flamegraph")] let subscriber = subscriber.with(flamelayer); + #[cfg(feature = "journald")] let journaldlayer = if config.use_journald { + use abscissa_core::FrameworkErrorKind; + let layer = tracing_journald::layer() .map_err(|e| FrameworkErrorKind::ComponentError.context(e))?; @@ -107,9 +125,10 @@ impl Tracing { } else { None }; + #[cfg(feature = "journald")] let subscriber = subscriber.with(journaldlayer); - #[cfg(feature = "enable-sentry")] + #[cfg(feature = "sentry")] let subscriber = subscriber.with(sentry_tracing::layer()); // spawn the console server in the background, and apply the console layer @@ -127,14 +146,33 @@ impl Tracing { LOG_STATIC_MAX_LEVEL = ?log::STATIC_MAX_LEVEL, "started tracing component", ); + if flame_root.is_some() { - info!("installed flamegraph tracing layer"); + if cfg!(feature = "flamegraph") { + info!(flamegraph = ?flame_root, "installed flamegraph tracing layer"); + } else { + warn!( + flamegraph = ?flame_root, + "unable to activate configured flamegraph: \ + enable the 'flamegraph' feature when compiling zebrad", + ); + } } + if config.use_journald { - info!(?filter, "installed journald tracing layer"); + if cfg!(feature = "journald") { + info!("installed journald tracing layer"); + } else { + warn!( + "unable to activate configured journald tracing: \ + enable the 'journald' feature when compiling zebrad", + ); + } } - #[cfg(feature = "enable-sentry")] + + #[cfg(feature = "sentry")] info!("installed sentry tracing layer"); + #[cfg(all(feature = "tokio-console", tokio_unstable))] info!( TRACING_STATIC_MAX_LEVEL = ?tracing::level_filters::STATIC_MAX_LEVEL, @@ -145,6 +183,7 @@ impl Tracing { Ok(Self { filter_handle, initial_filter: filter, + #[cfg(feature = "flamegraph")] flamegrapher, }) } @@ -204,12 +243,17 @@ impl Component for Tracing { } fn before_shutdown(&self, _kind: Shutdown) -> Result<(), FrameworkError> { + #[cfg(feature = "flamegraph")] if let Some(ref grapher) = self.flamegrapher { + use abscissa_core::FrameworkErrorKind; + info!("writing flamegraph"); + grapher .write_flamegraph() .map_err(|e| FrameworkErrorKind::ComponentError.context(e))? } + Ok(()) } } diff --git a/zebrad/src/components/tracing/endpoint.rs b/zebrad/src/components/tracing/endpoint.rs index 706a5fb73a5..56b5ef8288d 100644 --- a/zebrad/src/components/tracing/endpoint.rs +++ b/zebrad/src/components/tracing/endpoint.rs @@ -3,20 +3,27 @@ use std::net::SocketAddr; use abscissa_core::{Component, FrameworkError}; -use hyper::service::{make_service_fn, service_fn}; -use hyper::{Body, Request, Response, Server}; -use crate::{components::tokio::TokioComponent, config::ZebradConfig, prelude::*}; +use crate::config::ZebradConfig; -use super::Tracing; +#[cfg(feature = "filter-reload")] +use hyper::{Body, Request, Response}; + +#[cfg(feature = "filter-reload")] +use crate::{components::tokio::TokioComponent, prelude::*}; /// Abscissa component which runs a tracing filter endpoint. #[derive(Debug, Component)] -#[component(inject = "init_tokio(zebrad::components::tokio::TokioComponent)")] +#[cfg_attr( + feature = "filter-reload", + component(inject = "init_tokio(zebrad::components::tokio::TokioComponent)") +)] pub struct TracingEndpoint { + #[allow(dead_code)] addr: Option, } +#[cfg(feature = "filter-reload")] async fn read_filter(req: Request) -> Result { std::str::from_utf8( &hyper::body::to_bytes(req.into_body()) @@ -30,12 +37,25 @@ async fn read_filter(req: Request) -> Result { impl TracingEndpoint { /// Create the component. pub fn new(config: &ZebradConfig) -> Result { + if !cfg!(feature = "filter-reload") { + warn!(addr = ?config.tracing.endpoint_addr, + "unable to activate configured tracing filter endpoint: \ + enable the 'filter-reload' feature when compiling zebrad", + ); + } + Ok(Self { addr: config.tracing.endpoint_addr, }) } + #[cfg(feature = "filter-reload")] pub fn init_tokio(&mut self, tokio_component: &TokioComponent) -> Result<(), FrameworkError> { + use hyper::{ + service::{make_service_fn, service_fn}, + Server, + }; + let addr = if let Some(addr) = self.addr { addr } else { @@ -75,10 +95,13 @@ impl TracingEndpoint { } } +#[cfg(feature = "filter-reload")] #[instrument] async fn request_handler(req: Request) -> Result, hyper::Error> { use hyper::{Method, StatusCode}; + use super::Tracing; + let rsp = match (req.method(), req.uri().path()) { (&Method::GET, "/") => Response::new(Body::from( r#" diff --git a/zebrad/src/lib.rs b/zebrad/src/lib.rs index b697612578d..2ce4ed1e6db 100644 --- a/zebrad/src/lib.rs +++ b/zebrad/src/lib.rs @@ -37,5 +37,5 @@ pub mod components; pub mod config; pub mod prelude; -#[cfg(feature = "enable-sentry")] +#[cfg(feature = "sentry")] pub mod sentry; diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 7c4e9022dd7..e3eaf37ee99 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -103,7 +103,7 @@ //! //! Please refer to the documentation of each test for more information. -use std::{collections::HashSet, convert::TryInto, env, path::PathBuf}; +use std::{collections::HashSet, env, path::PathBuf}; use color_eyre::{ eyre::{Result, WrapErr}, @@ -866,6 +866,7 @@ fn full_sync_testnet() -> Result<()> { full_sync_test(Testnet, "FULL_SYNC_TESTNET_TIMEOUT_MINUTES") } +#[cfg(feature = "prometheus")] #[tokio::test] async fn metrics_endpoint() -> Result<()> { use hyper::Client; @@ -921,6 +922,7 @@ async fn metrics_endpoint() -> Result<()> { Ok(()) } +#[cfg(feature = "filter-reload")] #[tokio::test] async fn tracing_endpoint() -> Result<()> { use hyper::{Body, Client, Request}; @@ -1386,6 +1388,7 @@ fn zebra_zcash_listener_conflict() -> Result<()> { /// exclusive use of the port. The second node will panic with the Zcash metrics /// conflict hint added in #1535. #[test] +#[cfg(feature = "prometheus")] fn zebra_metrics_conflict() -> Result<()> { zebra_test::init(); @@ -1414,6 +1417,7 @@ fn zebra_metrics_conflict() -> Result<()> { /// exclusive use of the port. The second node will panic with the Zcash tracing /// conflict hint added in #1535. #[test] +#[cfg(feature = "filter-reload")] fn zebra_tracing_conflict() -> Result<()> { zebra_test::init(); From e5c92084bae4ddaebe12ff3fdebb0eac6046d7ac Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 17 Jun 2022 09:12:35 +1000 Subject: [PATCH 09/91] Group PeerSet fields into themes (#4618) Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-network/src/peer_set/set.rs | 59 +++++++++++++------- zebra-network/src/peer_set/set/tests.rs | 2 + zebra-network/src/peer_set/set/tests/prop.rs | 2 + 3 files changed, 42 insertions(+), 21 deletions(-) diff --git a/zebra-network/src/peer_set/set.rs b/zebra-network/src/peer_set/set.rs index d12b60b9108..62ba42173b7 100644 --- a/zebra-network/src/peer_set/set.rs +++ b/zebra-network/src/peer_set/set.rs @@ -170,14 +170,23 @@ where D::Error: Into, C: ChainTip, { + // Peer Tracking: New Peers + // /// Provides new and deleted peer [`Change`]s to the peer set, /// via the [`Discover`] trait implementation. discover: D, + /// A channel that asks the peer crawler task to connect to more peers. + demand_signal: mpsc::Sender, + + // Peer Tracking: Ready Peers + // /// Connected peers that are ready to receive requests from Zebra, /// or send requests to Zebra. ready_services: HashMap, + // Request Routing + // /// A preselected ready service. /// /// # Correctness @@ -194,6 +203,8 @@ where /// Used to route inventory requests to peers that are likely to have it. inventory_registry: InventoryRegistry, + // Peer Tracking: Busy Peers + // /// Connected peers that are handling a Zebra request, /// or Zebra is handling one of their requests. unready_services: FuturesUnordered>, @@ -201,9 +212,22 @@ where /// Channels used to cancel the request that an unready service is doing. cancel_handles: HashMap>, - /// A channel that asks the peer crawler task to connect to more peers. - demand_signal: mpsc::Sender, + // Peer Validation + // + /// An endpoint to see the minimum peer protocol version in real time. + /// + /// The minimum version depends on the block height, and [`MinimumPeerVersion`] listens for + /// height changes and determines the correct minimum version. + minimum_peer_version: MinimumPeerVersion, + /// The configured limit for inbound and outbound connections. + /// + /// The peer set panics if this size is exceeded. + /// If that happens, our connection limit code has a bug. + peerset_total_connection_limit: usize, + + // Background Tasks + // /// Channel for passing ownership of tokio JoinHandles from PeerSet's background tasks /// /// The join handles passed into the PeerSet are used populate the `guards` member @@ -215,6 +239,8 @@ where /// the `PeerSet` propagate errors from background tasks back to the user guards: futures::stream::FuturesUnordered>>, + // Metrics and Logging + // /// Address book metrics watch channel. /// /// Used for logging diagnostics. @@ -222,18 +248,6 @@ where /// The last time we logged a message about the peer set size last_peer_log: Option, - - /// The configured limit for inbound and outbound connections. - /// - /// The peer set panics if this size is exceeded. - /// If that happens, our connection limit code has a bug. - peerset_total_connection_limit: usize, - - /// An endpoint to see the minimum peer protocol version in real time. - /// - /// The minimum version depends on the block height, and [`MinimumPeerVersion`] listens for - /// height changes and determines the correct minimum version. - minimum_peer_version: MinimumPeerVersion, } impl Drop for PeerSet @@ -275,16 +289,23 @@ where minimum_peer_version: MinimumPeerVersion, ) -> Self { Self { - // Ready peers + // New peers discover, + demand_signal, + + // Ready peers ready_services: HashMap::new(), + // Request Routing preselected_p2c_peer: None, inventory_registry: InventoryRegistry::new(inv_stream), - // Unready peers + // Busy peers unready_services: FuturesUnordered::new(), cancel_handles: HashMap::new(), - demand_signal, + + // Peer validation + minimum_peer_version, + peerset_total_connection_limit: config.peerset_total_connection_limit(), // Background tasks handle_rx, @@ -293,10 +314,6 @@ where // Metrics last_peer_log: None, address_metrics, - peerset_total_connection_limit: config.peerset_total_connection_limit(), - - // Real-time parameters - minimum_peer_version, } } diff --git a/zebra-network/src/peer_set/set/tests.rs b/zebra-network/src/peer_set/set/tests.rs index b00da5011db..bc01d978298 100644 --- a/zebra-network/src/peer_set/set/tests.rs +++ b/zebra-network/src/peer_set/set/tests.rs @@ -1,3 +1,5 @@ +//! Peer set unit tests, and test setup code. + use std::{net::SocketAddr, sync::Arc}; use futures::{channel::mpsc, stream, Stream, StreamExt}; diff --git a/zebra-network/src/peer_set/set/tests/prop.rs b/zebra-network/src/peer_set/set/tests/prop.rs index aa48a457bc1..2cc65436c61 100644 --- a/zebra-network/src/peer_set/set/tests/prop.rs +++ b/zebra-network/src/peer_set/set/tests/prop.rs @@ -1,3 +1,5 @@ +//! Randomised property tests for the peer set. + use std::net::SocketAddr; use futures::FutureExt; From e1a7a305d2ff17d5e7dd9a7374277fb763cc1514 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Fri, 17 Jun 2022 01:29:50 -0400 Subject: [PATCH 10/91] imp(docs): manage`cargo-mdbook` as a GitHub action (#4636) * imp(docs): manage`cargo-mdbook` as a GitHub action * fix(docs): keep mdbook-mermaid --- .github/workflows/docs.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index ba57228e7b5..f989f57b488 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -37,9 +37,14 @@ jobs: - uses: Swatinem/rust-cache@v1 + - name: Setup mdBook + uses: peaceiris/actions-mdbook@v1.1.14 + with: + mdbook-version: '0.4.18' + + # TODO: actions-mdbook does not yet have an option to install mdbook-mermaid https://github.com/peaceiris/actions-mdbook/issues/426 - name: Install mdbook run: | - cargo install mdbook cargo install mdbook-mermaid - name: Build Zebra book From 3b3b59f71ae086c0a1ae8b9236c15086c3c50796 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Sun, 19 Jun 2022 22:10:13 -0400 Subject: [PATCH 11/91] ci(docs): test `cargo doc` warnings and errors (#4635) * ci(docs): test `cargo doc` warnings and errors * ci(docs): remove minimal profile * ci(docs): revert minimal and fix component name * ci(docs): try cargo doc alone --- .github/workflows/lint.yml | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 3b74716df11..9c389b98136 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -95,7 +95,7 @@ jobs: with: persist-credentials: false - - uses: actions-rs/toolchain@v1.0.1 + - uses: actions-rs/toolchain@v1.0.6 with: toolchain: stable components: rustfmt @@ -108,6 +108,29 @@ jobs: command: fmt args: --all -- --check + docs: + name: Rust doc + timeout-minutes: 30 + runs-on: ubuntu-latest + needs: changed-files + if: ${{ needs.changed-files.outputs.rust == 'true' }} + + steps: + - uses: actions/checkout@v3.0.2 + with: + persist-credentials: false + + - uses: actions-rs/toolchain@v1.0.6 + with: + toolchain: stable + profile: minimal + override: true + + - uses: actions-rs/cargo@v1.0.3 + with: + command: doc + args: --no-deps --document-private-items --all-features + actionlint: runs-on: ubuntu-latest continue-on-error: true From b7536c7f7e2bf92587145e17072a513c9ae8bda9 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Mon, 20 Jun 2022 00:57:41 -0300 Subject: [PATCH 12/91] introduce `fallible_impl_from` lint with exeptions (#4609) Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .cargo/config.toml | 5 ++++- zebra-chain/src/orchard/keys.rs | 6 ++++-- zebra-chain/src/sapling/keys.rs | 5 ++++- zebra-chain/src/work/u256.rs | 1 + zebra-network/src/protocol/external/message.rs | 6 +++++- 5 files changed, 18 insertions(+), 5 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index f126caa6279..f6f39cbb9a1 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -50,7 +50,10 @@ rustflags = [ "-Aclippy::try_err", # Links in public docs can point to private items. - "-Arustdoc::private_intra_doc_links" + "-Arustdoc::private_intra_doc_links", + + # Panics + "-Wclippy::fallible_impl_from", # TODOs: # `cargo fix` might help do these fixes, diff --git a/zebra-chain/src/orchard/keys.rs b/zebra-chain/src/orchard/keys.rs index e0601b6f01f..11ffba04f7b 100644 --- a/zebra-chain/src/orchard/keys.rs +++ b/zebra-chain/src/orchard/keys.rs @@ -1,7 +1,7 @@ //! Orchard key types. //! //! - +#![allow(clippy::fallible_impl_from)] #![allow(dead_code)] #[cfg(test)] @@ -308,7 +308,9 @@ impl From for [u8; 32] { impl From for SpendValidatingKey { fn from(ask: SpendAuthorizingKey) -> Self { - let sk = redpallas::SigningKey::::try_from(<[u8; 32]>::from(ask)).unwrap(); + let sk = redpallas::SigningKey::::try_from(<[u8; 32]>::from(ask)).expect( + "a scalar converted to byte array and then converted back to a scalar should not fail", + ); Self(redpallas::VerificationKey::from(&sk)) } diff --git a/zebra-chain/src/sapling/keys.rs b/zebra-chain/src/sapling/keys.rs index 897dd309d31..cd3a3f27871 100644 --- a/zebra-chain/src/sapling/keys.rs +++ b/zebra-chain/src/sapling/keys.rs @@ -8,6 +8,7 @@ //! [ps]: https://zips.z.cash/protocol/protocol.pdf#saplingkeycomponents //! [3.1]: https://zips.z.cash/protocol/protocol.pdf#addressesandkeys #![allow(clippy::unit_arg)] +#![allow(clippy::fallible_impl_from)] #![allow(dead_code)] #[cfg(test)] @@ -507,7 +508,9 @@ impl From for [u8; 32] { impl From for AuthorizingKey { fn from(ask: SpendAuthorizingKey) -> Self { - let sk = redjubjub::SigningKey::::try_from(<[u8; 32]>::from(ask)).unwrap(); + let sk = redjubjub::SigningKey::::try_from(<[u8; 32]>::from(ask)).expect( + "a scalar converted to byte array and then converted back to a scalar should not fail", + ); Self(redjubjub::VerificationKey::from(&sk)) } } diff --git a/zebra-chain/src/work/u256.rs b/zebra-chain/src/work/u256.rs index 486fcf1e05b..1897f9ecd1c 100644 --- a/zebra-chain/src/work/u256.rs +++ b/zebra-chain/src/work/u256.rs @@ -3,6 +3,7 @@ // it raises a lot of issues in the macro. #![allow(clippy::all)] #![allow(clippy::range_plus_one)] +#![allow(clippy::fallible_impl_from)] use uint::construct_uint; diff --git a/zebra-network/src/protocol/external/message.rs b/zebra-network/src/protocol/external/message.rs index 11ee9e55c38..743212b4194 100644 --- a/zebra-network/src/protocol/external/message.rs +++ b/zebra-network/src/protocol/external/message.rs @@ -352,7 +352,11 @@ where // use specific varieties of `RejectReason`. ccode: RejectReason::Other, - reason: e.source().unwrap().to_string(), + reason: if let Some(reason) = e.source() { + reason.to_string() + } else { + String::from("") + }, // Allow this to be overridden but not populated by default, methinks. data: None, From 961fcb621edba900a1f51450af1b462959141da4 Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 21 Jun 2022 09:30:48 +1000 Subject: [PATCH 13/91] Add a "Rust doc" patch job (#4654) --- .github/workflows/lint.patch.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/lint.patch.yml b/.github/workflows/lint.patch.yml index cbb85228d72..d6462a1a43d 100644 --- a/.github/workflows/lint.patch.yml +++ b/.github/workflows/lint.patch.yml @@ -17,3 +17,9 @@ jobs: runs-on: ubuntu-latest steps: - run: 'echo "No build required"' + + docs: + name: Rust doc + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' From 769d069d0a7e6c5ace25838046fec9b0cabea421 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Mon, 20 Jun 2022 21:59:51 -0300 Subject: [PATCH 14/91] feat(state): delete old database directories (#4586) * delete old database directories * check if state directory exists * skip deleting when ephemeral * split `check_and_delete_old_databases` * move `check_and_delete_old_databases` to state * spawn `check_and_delete_old_databases` * simplity a bit * fix(state): only delete old database directories inside the cache directory (#4631) * Add function comments, tweak log * Simplify version parsing * Use spawn_blocking to launch the task on a separate thread, do the cleanup last * Abort the cleanup task when Zebra exits * Split directory deletion into its own function, handle ownership * Rename cache_dir to state_dir * If an outdated state directory is outside the cache directory, don't delete it * Minimise diffs * add test * fix typos Co-authored-by: teor * add `canonicalize` to test regex * add another match to test Co-authored-by: teor Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-state/src/config.rs | 135 ++++++++++++++++++++++++++++++++++- zebra-state/src/lib.rs | 2 +- zebrad/src/commands/start.rs | 18 +++++ zebrad/tests/acceptance.rs | 64 +++++++++++++++++ 4 files changed, 217 insertions(+), 2 deletions(-) diff --git a/zebra-state/src/config.rs b/zebra-state/src/config.rs index e84417099e2..555bd2a5c84 100644 --- a/zebra-state/src/config.rs +++ b/zebra-state/src/config.rs @@ -1,6 +1,13 @@ -use std::path::PathBuf; +//! Cached state configuration for Zebra. + +use std::{ + fs::{canonicalize, remove_dir_all, DirEntry, ReadDir}, + path::{Path, PathBuf}, +}; use serde::{Deserialize, Serialize}; +use tokio::task::{spawn_blocking, JoinHandle}; +use tracing::Span; use zebra_chain::parameters::Network; @@ -57,6 +64,13 @@ pub struct Config { /// /// Set to `None` by default: Zebra continues syncing indefinitely. pub debug_stop_at_height: Option, + + /// Whether to delete the old database directories when present. + /// + /// Set to `true` by default. If this is set to `false`, + /// no check for old database versions will be made and nothing will be + /// deleted. + pub delete_old_database: bool, } fn gen_temp_path(prefix: &str) -> PathBuf { @@ -108,6 +122,125 @@ impl Default for Config { cache_dir, ephemeral: false, debug_stop_at_height: None, + delete_old_database: true, + } + } +} + +// Cleaning up old database versions + +/// Spawns a task that checks if there are old database folders, +/// and deletes them from the filesystem. +/// +/// Iterate over the files and directories in the databases folder and delete if: +/// - The state directory exists. +/// - The entry is a directory. +/// - The directory name has a prefix `v`. +/// - The directory name without the prefix can be parsed as an unsigned number. +/// - The parsed number is lower than the hardcoded `DATABASE_FORMAT_VERSION`. +pub fn check_and_delete_old_databases(config: Config) -> JoinHandle<()> { + let current_span = Span::current(); + + spawn_blocking(move || { + current_span.in_scope(|| { + delete_old_databases(config); + info!("finished old database version cleanup task"); + }) + }) +} + +/// Check if there are old database folders and delete them from the filesystem. +/// +/// See [`check_and_delete_old_databases`] for details. +fn delete_old_databases(config: Config) { + if config.ephemeral || !config.delete_old_database { + return; + } + + info!("checking for old database versions"); + + let state_dir = config.cache_dir.join("state"); + if let Some(state_dir) = read_dir(&state_dir) { + for entry in state_dir.flatten() { + let deleted_state = check_and_delete_database(&config, &entry); + + if let Some(deleted_state) = deleted_state { + info!(?deleted_state, "deleted outdated state directory"); + } } } } + +/// Return a `ReadDir` for `dir`, after checking that `dir` exists and can be read. +/// +/// Returns `None` if any operation fails. +fn read_dir(dir: &Path) -> Option { + if dir.exists() { + if let Ok(read_dir) = dir.read_dir() { + return Some(read_dir); + } + } + None +} + +/// Check if `entry` is an old database directory, and delete it from the filesystem. +/// See [`check_and_delete_old_databases`] for details. +/// +/// If the directory was deleted, returns its path. +fn check_and_delete_database(config: &Config, entry: &DirEntry) -> Option { + let dir_name = parse_dir_name(entry)?; + let version_number = parse_version_number(&dir_name)?; + + if version_number >= crate::constants::DATABASE_FORMAT_VERSION { + return None; + } + + let outdated_path = entry.path(); + + // # Correctness + // + // Check that the path we're about to delete is inside the cache directory. + // If the user has symlinked the outdated state directory to a non-cache directory, + // we don't want to delete it, because it might contain other files. + // + // We don't attempt to guard against malicious symlinks created by attackers + // (TOCTOU attacks). Zebra should not be run with elevated privileges. + let cache_path = canonicalize(&config.cache_dir).ok()?; + let outdated_path = canonicalize(outdated_path).ok()?; + + if !outdated_path.starts_with(&cache_path) { + info!( + skipped_path = ?outdated_path, + ?cache_path, + "skipped cleanup of outdated state directory: state is outside cache directory", + ); + + return None; + } + + remove_dir_all(&outdated_path).ok().map(|()| outdated_path) +} + +/// Check if `entry` is a directory with a valid UTF-8 name. +/// (State directory names are guaranteed to be UTF-8.) +/// +/// Returns `None` if any operation fails. +fn parse_dir_name(entry: &DirEntry) -> Option { + if let Ok(file_type) = entry.file_type() { + if file_type.is_dir() { + if let Ok(dir_name) = entry.file_name().into_string() { + return Some(dir_name); + } + } + } + None +} + +/// Parse the state version number from `dir_name`. +/// +/// Returns `None` if parsing fails, or the directory name is not in the expected format. +fn parse_version_number(dir_name: &str) -> Option { + dir_name + .strip_prefix('v') + .and_then(|version| version.parse().ok()) +} diff --git a/zebra-state/src/lib.rs b/zebra-state/src/lib.rs index 3e8601bd934..86d87d9aeda 100644 --- a/zebra-state/src/lib.rs +++ b/zebra-state/src/lib.rs @@ -28,7 +28,7 @@ mod util; #[cfg(test)] mod tests; -pub use config::Config; +pub use config::{check_and_delete_old_databases, Config}; pub use constants::MAX_BLOCK_REORG_HEIGHT; pub use error::{BoxError, CloneError, CommitBlockError, ValidateContextError}; pub use request::{FinalizedBlock, HashOrHeight, PreparedBlock, ReadRequest, Request}; diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index 9a1bce4b3b1..ff1d592e6a7 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -37,6 +37,8 @@ //! * contextually verifies blocks //! * handles in-memory storage of multiple non-finalized chains //! * handles permanent storage of the best finalized chain +//! * Old State Version Cleanup Task +//! * deletes outdated state versions //! * Block Gossip Task //! * runs in the background and continuously queries the state for //! newly committed blocks to be gossiped to peers @@ -218,6 +220,9 @@ impl StartCmd { .in_current_span(), ); + let mut old_databases_task_handle = + zebra_state::check_and_delete_old_databases(config.state.clone()); + info!("spawned initial Zebra tasks"); // TODO: put tasks into an ongoing FuturesUnordered and a startup FuturesUnordered? @@ -235,6 +240,8 @@ impl StartCmd { // startup tasks let groth16_download_handle_fused = (&mut groth16_download_handle).fuse(); pin!(groth16_download_handle_fused); + let old_databases_task_handle_fused = (&mut old_databases_task_handle).fuse(); + pin!(old_databases_task_handle_fused); // Wait for tasks to finish let exit_status = loop { @@ -297,6 +304,16 @@ impl StartCmd { exit_when_task_finishes = false; Ok(()) } + + // The same for the old databases task, we expect it to finish while Zebra is running. + old_databases_result = &mut old_databases_task_handle_fused => { + old_databases_result + .unwrap_or_else(|_| panic!( + "unexpected panic deleting old database directories")); + + exit_when_task_finishes = false; + Ok(()) + } }; // Stop Zebra if a task finished and returned an error, @@ -324,6 +341,7 @@ impl StartCmd { // startup tasks groth16_download_handle.abort(); + old_databases_task_handle.abort(); exit_status } diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index e3eaf37ee99..f969febd87e 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -1642,6 +1642,70 @@ async fn fully_synced_rpc_test() -> Result<()> { Ok(()) } +#[tokio::test] +async fn delete_old_databases() -> Result<()> { + use std::fs::{canonicalize, create_dir}; + + zebra_test::init(); + + let mut config = default_test_config()?; + let run_dir = testdir()?; + let cache_dir = run_dir.path().join("state"); + + // create cache dir + create_dir(cache_dir.clone())?; + + // create a v1 dir outside cache dir that should not be deleted + let outside_dir = run_dir.path().join("v1"); + create_dir(&outside_dir)?; + assert!(outside_dir.as_path().exists()); + + // create a `v1` dir inside cache dir that should be deleted + let inside_dir = cache_dir.join("v1"); + create_dir(&inside_dir)?; + let canonicalized_inside_dir = canonicalize(inside_dir.clone()).ok().unwrap(); + assert!(inside_dir.as_path().exists()); + + // modify config with our cache dir and not ephemeral configuration + // (delete old databases function will not run when ephemeral = true) + config.state.cache_dir = cache_dir; + config.state.ephemeral = false; + + // run zebra with our config + let mut child = run_dir + .with_config(&mut config)? + .spawn_child(args!["start"])?; + + // delete checker running + child.expect_stdout_line_matches("checking for old database versions".to_string())?; + + // inside dir was deleted + child.expect_stdout_line_matches(format!( + "deleted outdated state directory deleted_state={:?}", + canonicalized_inside_dir + ))?; + assert!(!inside_dir.as_path().exists()); + + // deleting old databases task ended + child.expect_stdout_line_matches("finished old database version cleanup task".to_string())?; + + // outside dir was not deleted + assert!(outside_dir.as_path().exists()); + + // finish + child.kill()?; + + let output = child.wait_with_output()?; + let output = output.assert_failure()?; + + // [Note on port conflict](#Note on port conflict) + output + .assert_was_killed() + .wrap_err("Possible port conflict. Are there other acceptance tests running?")?; + + Ok(()) +} + /// Test sending transactions using a lightwalletd instance connected to a zebrad instance. /// /// See [`common::lightwalletd::send_transaction_test`] for more information. From b6a59a9f66e6839486e4b098fd6535d211a50e25 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 20 Jun 2022 22:30:45 -0400 Subject: [PATCH 15/91] feat(ci): build each crate individually (#4640) * feat(ci): build each crate individually * fix(ci): use valid names for each job * feat(ci): builds and checks with and without all features * refactor(ci): build job matrix dinamically * fix: use a JSON_CRATES variable with resulting values * test: check-matrix * fix(ci): use "crate" in singular for reference * imp(ci): use a matrix for feature build arguments * fix(ci): use correct naming and includes * fix(ci): implement most recommendations given in review * fix(ci): use simpler shell script * fix: typo * fix: add string to file, not cmd * fix: some shellchecks * fix(ci): remove warnings and errors from shellcheck * imp(ci): add patch file for `Build crates individually` workflow * Remove unused configs in patch job Co-authored-by: teor --- .../build-crates-individually.patch.yml | 69 ++++++++++ .../workflows/build-crates-individually.yml | 125 ++++++++++++++++++ .../workflows/continous-integration-os.yml | 15 +-- 3 files changed, 200 insertions(+), 9 deletions(-) create mode 100644 .github/workflows/build-crates-individually.patch.yml create mode 100644 .github/workflows/build-crates-individually.yml diff --git a/.github/workflows/build-crates-individually.patch.yml b/.github/workflows/build-crates-individually.patch.yml new file mode 100644 index 00000000000..f78e69a2731 --- /dev/null +++ b/.github/workflows/build-crates-individually.patch.yml @@ -0,0 +1,69 @@ +name: Build crates individually + +# We need to keep the `matrix` job in this workflow as-is, as we need the results +# to actually match the same `build` job names from the original file. +on: + pull_request: + paths-ignore: + # production code and test code + - '**/*.rs' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # workflow definitions + - '.github/workflows/build-crates-individually.yml' + +jobs: + matrix: + name: Crates matrix + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + steps: + - uses: actions/checkout@v3.0.2 + + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + profile: minimal + override: true + + - uses: actions-rs/cargo@v1.0.3 + # This step is meant to dynamically create a JSON containing the values of each crate + # available in this repo in the root directory. We use `cargo tree` to accomplish this task. + # + # The result from `cargo tree` is then transform to JSON values between double quotes, + # and separated by commas, then added to a `crates.txt` and assigned to a $JSON_CRATES variable. + # + # A JSON object is created and assigned to a $MATRIX variable, which is use to create an output + # named `matrix`, which is then used as the input in following steps, + # using ` ${{ fromJson(needs.matrix.outputs.matrix) }}` + - id: set-matrix + name: Dynamically build crates JSON + run: | + TEMP_DIR=$(mktemp -d) + echo "$(cargo tree --depth 0 --edges no-normal,no-dev,no-build,no-proc-macro --prefix none | cut -d ' ' -f1 | sed '/^$/d' | awk '{ printf "\"%s\",\n", $0 }' | sed '$ s/.$//')" > $TEMP_DIR/crates.txt + MATRIX=$( ( + echo '{ "crate" : [' + echo "$(cat $TEMP_DIR/crates.txt)" + echo " ]}" + ) | jq -c .) + echo $MATRIX + echo $MATRIX | jq . + echo "::set-output name=matrix::$MATRIX" + + check-matrix: + runs-on: ubuntu-latest + needs: [ matrix ] + steps: + - run: 'echo "No job required"' + + build: + name: Build ${{ matrix.crate }} crate + needs: [ matrix, check-matrix ] + runs-on: ubuntu-latest + strategy: + matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} + + steps: + - run: 'echo "No job required"' \ No newline at end of file diff --git a/.github/workflows/build-crates-individually.yml b/.github/workflows/build-crates-individually.yml new file mode 100644 index 00000000000..4e65f6b8ad8 --- /dev/null +++ b/.github/workflows/build-crates-individually.yml @@ -0,0 +1,125 @@ +name: Build crates individually + +on: + workflow_dispatch: + push: + branches: + - main + paths: + # production code and test code + - '**/*.rs' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # workflow definitions + - '.github/workflows/build-crates-individually.yml' + pull_request: + paths: + # production code and test code + - '**/*.rs' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # workflow definitions + - '.github/workflows/build-crates-individually.yml' + +env: + CARGO_INCREMENTAL: 0 + RUST_BACKTRACE: full + RUST_LIB_BACKTRACE: full + COLORBT_SHOW_HIDDEN: '1' + +jobs: + matrix: + name: Crates matrix + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + steps: + - uses: actions/checkout@v3.0.2 + + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + profile: minimal + override: true + + - uses: actions-rs/cargo@v1.0.3 + # This step is meant to dynamically create a JSON containing the values of each crate + # available in this repo in the root directory. We use `cargo tree` to accomplish this task. + # + # The result from `cargo tree` is then transform to JSON values between double quotes, + # and separated by commas, then added to a `crates.txt` and assigned to a $JSON_CRATES variable. + # + # A JSON object is created and assigned to a $MATRIX variable, which is use to create an output + # named `matrix`, which is then used as the input in following steps, + # using ` ${{ fromJson(needs.matrix.outputs.matrix) }}` + - id: set-matrix + name: Dynamically build crates JSON + run: | + TEMP_DIR=$(mktemp -d) + echo "$(cargo tree --depth 0 --edges no-normal,no-dev,no-build,no-proc-macro --prefix none | cut -d ' ' -f1 | sed '/^$/d' | awk '{ printf "\"%s\",\n", $0 }' | sed '$ s/.$//')" > $TEMP_DIR/crates.txt + MATRIX=$( ( + echo '{ "crate" : [' + echo "$(cat $TEMP_DIR/crates.txt)" + echo " ]}" + ) | jq -c .) + echo $MATRIX + echo $MATRIX | jq . + echo "::set-output name=matrix::$MATRIX" + + check-matrix: + runs-on: ubuntu-latest + needs: [ matrix ] + steps: + - name: Install json2yaml + run: | + sudo npm install -g json2yaml + + - name: Check matrix definition + run: | + matrix='${{ needs.matrix.outputs.matrix }}' + echo $matrix + echo $matrix | jq . + echo $matrix | json2yaml + + build: + name: Build ${{ matrix.crate }} crate + needs: [ matrix, check-matrix ] + runs-on: ubuntu-latest + strategy: + fail-fast: true + matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} + + steps: + - uses: actions/checkout@v3.0.2 + with: + persist-credentials: false + + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + profile: minimal + override: true + + # We could use `features: ['', '--all-features', '--no-default-features']` as a matrix argument, + # but it's faster to run these commands sequentially, so they can re-use the local cargo cache. + # + # Some Zebra crates do not have any features, and most don't have any default features. + - name: Build ${{ matrix.crate }} crate with no default features + uses: actions-rs/cargo@v1.0.3 + with: + command: build + args: --package ${{ matrix.crate }} --no-default-features + + - name: Build ${{ matrix.crate }} crate normally + uses: actions-rs/cargo@v1.0.3 + with: + command: build + args: --package ${{ matrix.crate }} + + - name: Build ${{ matrix.crate }} crate with all features + uses: actions-rs/cargo@v1.0.3 + with: + command: build + args: --package ${{ matrix.crate }} --all-features diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index ba29bdba073..5882d638059 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -267,13 +267,14 @@ jobs: args: --locked --all-features --all-targets cargo-deny: - name: Check deny.toml ${{ matrix.checks }} + name: Check deny.toml ${{ matrix.checks }} ${{ matrix.features }} runs-on: ubuntu-latest strategy: matrix: checks: - bans - sources + features: ['', '--all-features', '--no-default-features'] # Prevent sudden announcement of a new advisory from failing ci: continue-on-error: ${{ matrix.checks == 'advisories' }} @@ -283,14 +284,10 @@ jobs: with: persist-credentials: false - - uses: EmbarkStudios/cargo-deny-action@v1 - with: - command: check ${{ matrix.checks }} - args: --all-features --workspace - - # this check runs with optional features off + # this check also runs with optional features off # so we expect some warnings about "skip tree root was not found" - - uses: EmbarkStudios/cargo-deny-action@v1 + - name: Check ${{ matrix.checks }} with features ${{ matrix.features }} + uses: EmbarkStudios/cargo-deny-action@v1 with: command: check ${{ matrix.checks }} - args: --workspace + args: --workspace with features ${{ matrix.features }} From 21af125bfa453d0f3a04a3aba8f8e62aaff09c56 Mon Sep 17 00:00:00 2001 From: Deirdre Connolly Date: Mon, 20 Jun 2022 23:38:12 -0400 Subject: [PATCH 16/91] doc: add tokio-console page in book dev section (#4641) * doc: add tokio-console page in dev section * Add screenshot Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- book/src/dev/tokio-console.md | 38 +++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 book/src/dev/tokio-console.md diff --git a/book/src/dev/tokio-console.md b/book/src/dev/tokio-console.md new file mode 100644 index 00000000000..4a10d27e5cf --- /dev/null +++ b/book/src/dev/tokio-console.md @@ -0,0 +1,38 @@ +# `tokio-console` support + +`tokio-console` is a diagnostics and debugging tool for asynchronous Rust programs. This tool can be +useful to lint runtime behavior, collect diagnostic data from processes, and debugging performance +issues. ["it's like top(1) for tasks!"][top] + +### Setup + +Support for `tokio-console` is not enabled by default for zebrad. To activate this feature, run: + ```sh + $ RUSTFLAGS="--cfg tokio_unstable" cargo build --no-default-features --features="tokio-console" --bin zebrad + ``` + +Install [`tokio-console`][install]. + +Then start `zebrad` however you wish. + +When `zebrad` is running, run: +``` +$ tokio-console +``` + +The default options are used, so `tokio-console` should connect to the running `zebrad` without other configuration. + +### Example + +image + + +### More + +For more details, see the [`tokio` docs][enabling_tokio_instrumentation]. + + +[top]: https://github.com/tokio-rs/console#extremely-cool-and-amazing-screenshots +[install]: https://github.com/tokio-rs/console#running-the-console] +[enabling_tokio_instrumentation]: https://github.com/tokio-rs/console/blob/main/console-subscriber/README.md#enabling-tokio-instrumentation + From 25396bc6e904ac9d5c081bb37661e0ba0d5bbc80 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Jun 2022 03:38:29 +0000 Subject: [PATCH 17/91] build(deps): bump insta from 1.14.1 to 1.15.0 (#4645) Bumps [insta](https://github.com/mitsuhiko/insta) from 1.14.1 to 1.15.0. - [Release notes](https://github.com/mitsuhiko/insta/releases) - [Changelog](https://github.com/mitsuhiko/insta/blob/master/CHANGELOG.md) - [Commits](https://github.com/mitsuhiko/insta/compare/1.14.1...1.15.0) --- updated-dependencies: - dependency-name: insta dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebra-rpc/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dcd650d51c0..c224307b098 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2221,9 +2221,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.14.1" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcc3e639bcba360d9237acabd22014c16f3df772db463b7446cd81b070714767" +checksum = "4126dd76ebfe2561486a1bd6738a33d2029ffb068a99ac446b7f8c77b2e58dbc" dependencies = [ "console", "once_cell", diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index e46c4210fa1..41d45051e8e 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -43,7 +43,7 @@ proptest = { version = "0.10.1", optional = true } proptest-derive = { version = "0.3.0", optional = true } [dev-dependencies] -insta = { version = "1.14.1", features = ["redactions"] } +insta = { version = "1.15.0", features = ["redactions"] } proptest = "0.10.1" proptest-derive = "0.3.0" serde_json = "1.0.81" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 504cdffde2d..37d0804b52d 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -41,7 +41,7 @@ once_cell = "1.12.0" spandoc = "0.2.2" hex = { version = "0.4.3", features = ["serde"] } -insta = { version = "1.14.1", features = ["ron"] } +insta = { version = "1.15.0", features = ["ron"] } proptest = "0.10.1" proptest-derive = "0.3.0" diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 0041a980d3a..01b21830ee7 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -11,7 +11,7 @@ edition = "2021" hex = "0.4.3" indexmap = "1.8.2" lazy_static = "1.4.0" -insta = "1.14.1" +insta = "1.15.0" proptest = "0.10.1" once_cell = "1.12.0" rand = { version = "0.8.5", package = "rand" } From ee50f3ae4575dfff66051b7afd7bbd1ccc3a4671 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Jun 2022 03:38:52 +0000 Subject: [PATCH 18/91] build(deps): bump inferno from 0.11.4 to 0.11.5 (#4647) Bumps [inferno](https://github.com/jonhoo/inferno) from 0.11.4 to 0.11.5. - [Release notes](https://github.com/jonhoo/inferno/releases) - [Changelog](https://github.com/jonhoo/inferno/blob/master/CHANGELOG.md) - [Commits](https://github.com/jonhoo/inferno/compare/v0.11.4...v0.11.5) --- updated-dependencies: - dependency-name: inferno dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 6 +++--- zebrad/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c224307b098..57aba2a146b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2204,16 +2204,16 @@ dependencies = [ [[package]] name = "inferno" -version = "0.11.4" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a3cb215599901c8f491666421d44ffaed08d6872b4c7ced6f425683b951271e" +checksum = "244ae9456c246081801028c893ee0e2b71be4f0380894e2cd53974c3beea0c64" dependencies = [ "ahash", "atty", "itoa 1.0.1", - "lazy_static", "log", "num-format", + "once_cell", "quick-xml", "rgb", "str_stack", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index bc81c110a7b..f52c35e838d 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -111,7 +111,7 @@ sentry = { version = "0.26.0", default-features = false, features = ["backtrace" # prod feature flamegraph tracing-flame = { version = "0.2.0", optional = true } -inferno = { version = "0.11.4", default-features = false, optional = true } +inferno = { version = "0.11.5", default-features = false, optional = true } # prod feature journald tracing-journald = { version = "0.3.0", optional = true } From 3a7c2c8926f5063273b13eef84ecd42f5fb386b1 Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 21 Jun 2022 21:07:32 +1000 Subject: [PATCH 19/91] Replace the lookahead limit panic with a warning (#4662) And decrease the minimum to 400 blocks --- zebrad/src/components/sync.rs | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/zebrad/src/components/sync.rs b/zebrad/src/components/sync.rs index 21126d1f4a7..6d49c0dfc83 100644 --- a/zebrad/src/components/sync.rs +++ b/zebrad/src/components/sync.rs @@ -57,8 +57,8 @@ const BLOCK_DOWNLOAD_RETRY_LIMIT: usize = 3; /// A lower bound on the user-specified lookahead limit. /// -/// Set to two checkpoint intervals, so that we're sure that the lookahead -/// limit always contains at least one complete checkpoint. +/// Set to the maximum checkpoint interval, so the pipeline holds at least one checkpoint's +/// worth of blocks. /// /// ## Security /// @@ -74,7 +74,7 @@ const BLOCK_DOWNLOAD_RETRY_LIMIT: usize = 3; /// Once these malicious blocks start failing validation, the syncer will cancel all /// the pending download and verify tasks, drop all the blocks, and start a new /// ObtainTips with a new set of peers. -pub const MIN_LOOKAHEAD_LIMIT: usize = zebra_consensus::MAX_CHECKPOINT_HEIGHT_GAP * 2; +pub const MIN_LOOKAHEAD_LIMIT: usize = zebra_consensus::MAX_CHECKPOINT_HEIGHT_GAP; /// The default for the user-specified lookahead limit. /// @@ -318,24 +318,27 @@ where // We apply a timeout to the verifier to avoid hangs due to missing earlier blocks. let verifier = Timeout::new(verifier, BLOCK_VERIFY_TIMEOUT); - assert!( - config.sync.lookahead_limit >= MIN_LOOKAHEAD_LIMIT, - "configured lookahead limit {} too low, must be at least {}", - config.sync.lookahead_limit, - MIN_LOOKAHEAD_LIMIT - ); + let mut lookahead_limit = config.sync.lookahead_limit; + if lookahead_limit < MIN_LOOKAHEAD_LIMIT { + warn!( + "configured lookahead limit {} too low, increasing to {}", + config.sync.lookahead_limit, MIN_LOOKAHEAD_LIMIT, + ); + + lookahead_limit = MIN_LOOKAHEAD_LIMIT; + } let (sync_status, recent_syncs) = SyncStatus::new(); let new_syncer = Self { genesis_hash: genesis_hash(config.network.network), - lookahead_limit: config.sync.lookahead_limit, + lookahead_limit, tip_network, downloads: Box::pin(Downloads::new( block_network, verifier, latest_chain_tip.clone(), - config.sync.lookahead_limit, + lookahead_limit, )), state, latest_chain_tip, From 1f7e621e34846d800ead7f7181a4e5a59c76f4b1 Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 21 Jun 2022 21:31:34 +1000 Subject: [PATCH 20/91] change(test): add an identifiable suffix to zcash-rpc-diff temp directories (#4577) * Add an identifiable suffix to zcash-rpc-diff temp directories * Explain why we need a fallback temp command Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-utils/zcash-rpc-diff | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/zebra-utils/zcash-rpc-diff b/zebra-utils/zcash-rpc-diff index 41348c4a5f6..885c773ee69 100755 --- a/zebra-utils/zcash-rpc-diff +++ b/zebra-utils/zcash-rpc-diff @@ -27,7 +27,9 @@ fi ZEBRAD_RPC_PORT=$1 shift -ZCASH_RPC_TMP_DIR=$(mktemp -d) +# Use an easily identified temp directory name, +# but fall back to the default temp name if `mktemp` does not understand `--suffix`. +ZCASH_RPC_TMP_DIR=$(mktemp --suffix=.rpc-diff -d 2>/dev/null || mktemp -d) ZEBRAD_RELEASE_INFO="$ZCASH_RPC_TMP_DIR/first-check-getinfo.json" ZCASHD_RELEASE_INFO="$ZCASH_RPC_TMP_DIR/second-check-getinfo.json" From 82b8d7f95de3f798be6b88882f550d793fa5516c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Jun 2022 17:12:19 +0000 Subject: [PATCH 21/91] build(deps): bump tower from 0.4.12 to 0.4.13 (#4644) Bumps [tower](https://github.com/tower-rs/tower) from 0.4.12 to 0.4.13. - [Release notes](https://github.com/tower-rs/tower/releases) - [Commits](https://github.com/tower-rs/tower/compare/tower-0.4.12...tower-0.4.13) --- updated-dependencies: - dependency-name: tower dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 5 ++--- tower-batch/Cargo.toml | 2 +- tower-fallback/Cargo.toml | 2 +- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebra-rpc/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 9 files changed, 10 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 57aba2a146b..420df53d30d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1939,7 +1939,6 @@ checksum = "31672b7011be2c4f7456c4ddbcb40e7e9a4a9fad8efe49a6ebaf5f307d0109c0" dependencies = [ "base64", "byteorder", - "crossbeam-channel", "flate2", "nom", "num-traits", @@ -5342,9 +5341,9 @@ dependencies = [ [[package]] name = "tower" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", diff --git a/tower-batch/Cargo.toml b/tower-batch/Cargo.toml index 550b88b08f8..d8fbee8c1cc 100644 --- a/tower-batch/Cargo.toml +++ b/tower-batch/Cargo.toml @@ -10,7 +10,7 @@ futures = "0.3.21" futures-core = "0.3.21" pin-project = "1.0.10" tokio = { version = "1.19.2", features = ["time", "sync", "tracing", "macros"] } -tower = { version = "0.4.12", features = ["util", "buffer"] } +tower = { version = "0.4.13", features = ["util", "buffer"] } tracing = "0.1.31" tracing-futures = "0.2.5" diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index 0a617cc2347..85f45351459 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [dependencies] pin-project = "0.4.29" -tower = "0.4.12" +tower = "0.4.13" futures-core = "0.3.21" tracing = "0.1.31" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index eadbb5ffeca..ebdd253a51f 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -30,7 +30,7 @@ futures-util = "0.3.21" metrics = "0.18.1" thiserror = "1.0.31" tokio = { version = "1.19.2", features = ["time", "sync", "tracing"] } -tower = { version = "0.4.12", features = ["timeout", "util", "buffer"] } +tower = { version = "0.4.13", features = ["timeout", "util", "buffer"] } tracing = "0.1.31" tracing-futures = "0.2.5" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index a24abbbe901..602d52cee9a 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -31,7 +31,7 @@ futures = "0.3.21" tokio = { version = "1.19.2", features = ["net", "time", "tracing", "macros", "rt-multi-thread"] } tokio-stream = { version = "0.1.9", features = ["sync", "time"] } tokio-util = { version = "0.7.3", features = ["codec"] } -tower = { version = "0.4.12", features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] } +tower = { version = "0.4.13", features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] } metrics = "0.18.1" tracing-futures = "0.2.5" diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 41d45051e8e..52ee2fbb1fc 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -31,7 +31,7 @@ serde_json = { version = "1.0.81", features = ["preserve_order"] } indexmap = { version = "1.8.2", features = ["serde"] } tokio = { version = "1.19.2", features = ["time", "rt-multi-thread", "macros", "tracing"] } -tower = "0.4.12" +tower = "0.4.13" tracing = "0.1.31" tracing-futures = "0.2.5" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 37d0804b52d..633c0b8bda9 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -29,7 +29,7 @@ tempfile = "3.3.0" thiserror = "1.0.31" tokio = { version = "1.19.2", features = ["sync", "tracing"] } -tower = { version = "0.4.12", features = ["buffer", "util"] } +tower = { version = "0.4.13", features = ["buffer", "util"] } tracing = "0.1.31" zebra-chain = { path = "../zebra-chain" } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 01b21830ee7..0430c5a95b9 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -18,7 +18,7 @@ rand = { version = "0.8.5", package = "rand" } regex = "1.5.6" tokio = { version = "1.19.2", features = ["full", "tracing", "test-util"] } -tower = { version = "0.4.12", features = ["util"] } +tower = { version = "0.4.13", features = ["util"] } futures = "0.3.21" color-eyre = "0.6.1" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index f52c35e838d..8c5d609c2c9 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -86,7 +86,7 @@ toml = "0.5.9" futures = "0.3.21" tokio = { version = "1.19.2", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } -tower = { version = "0.4.12", features = ["hedge", "limit"] } +tower = { version = "0.4.13", features = ["hedge", "limit"] } pin-project = "1.0.10" color-eyre = { version = "0.6.1", default_features = false, features = ["issue-url"] } From 4f4c9c42c9072c76599f0f1ce8c32326a3a3bd8e Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 22 Jun 2022 11:18:49 +1000 Subject: [PATCH 22/91] fix(ci): update OS patch jobs for Rust & feature changes (#4668) * Update OS patch jobs for Rust & feature changes * Restore a patch job that is actually deleted in another PR --- .../continous-integration-os.patch.yml | 29 ++++--------------- 1 file changed, 6 insertions(+), 23 deletions(-) diff --git a/.github/workflows/continous-integration-os.patch.yml b/.github/workflows/continous-integration-os.patch.yml index 88d05fa12a3..21f592b6217 100644 --- a/.github/workflows/continous-integration-os.patch.yml +++ b/.github/workflows/continous-integration-os.patch.yml @@ -14,28 +14,15 @@ on: jobs: test: name: Test ${{ matrix.rust }} on ${{ matrix.os }} - # The large timeout is to accommodate: - # - Windows builds (75 minutes, typically 30-50 minutes) - # - parameter downloads (40 minutes, but only when the cache expires) - timeout-minutes: 115 runs-on: ${{ matrix.os }} strategy: - fail-fast: false matrix: # TODO: Windows was removed for now, see https://github.com/ZcashFoundation/zebra/issues/3801 os: [ubuntu-latest, macos-latest] - rust: [stable] - - steps: - - run: 'echo "No build required"' - - test-fake-activation-heights: - name: Test ${{ matrix.rust }} zebra-state with fake activation heights on ubuntu-latest - timeout-minutes: 60 - runs-on: ubuntu-latest - strategy: - matrix: - rust: [stable] + rust: [stable, beta] + exclude: + - os: macos-latest + rust: beta steps: - run: 'echo "No build required"' @@ -53,7 +40,6 @@ jobs: install-from-lockfile-no-cache: name: Install zebrad from lockfile without cache on ubuntu-latest - timeout-minutes: 60 runs-on: ubuntu-latest steps: @@ -61,23 +47,20 @@ jobs: check-cargo-lock: name: Check Cargo.lock is up to date - timeout-minutes: 60 runs-on: ubuntu-latest steps: - run: 'echo "No build required"' cargo-deny: - name: Check deny.toml ${{ matrix.checks }} + name: Check deny.toml ${{ matrix.checks }} ${{ matrix.features }} runs-on: ubuntu-latest strategy: matrix: checks: - bans - sources - - # Prevent sudden announcement of a new advisory from failing ci: - continue-on-error: ${{ matrix.checks == 'advisories' }} + features: ['', '--all-features', '--no-default-features'] steps: - run: 'echo "No build required"' From d2d4f5a67e4d2978369a10fc8a2271102ab0c070 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 22 Jun 2022 00:35:18 -0400 Subject: [PATCH 23/91] fix(build): add `beta` rust to CI (#4637) * fix(build): add `beta` rust to CI * fix(ci): remove macos-latest with beta Rust combination --- .../workflows/continous-integration-os.yml | 50 +++---------------- 1 file changed, 8 insertions(+), 42 deletions(-) diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index 5882d638059..a6758c727e2 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -53,7 +53,14 @@ jobs: matrix: # TODO: Windows was removed for now, see https://github.com/ZcashFoundation/zebra/issues/3801 os: [ubuntu-latest, macos-latest] - rust: [stable] + rust: [stable, beta] + # We're excluding macOS for the following reasons: + # - the concurrent macOS runner limit is much lower than the Linux limit + # - macOS is slower than Linux, and shouldn't have a build or test difference with Linux + # - macOS is a second-tier Zebra support platform + exclude: + - os: macos-latest + rust: beta steps: - uses: actions/checkout@v3.0.2 @@ -146,47 +153,6 @@ jobs: # Note: this only runs the zebrad acceptance tests, because re-running all the test binaries is slow on Windows args: --verbose --package zebrad --test acceptance -- --nocapture --include-ignored sync_large_checkpoints_ - test-fake-activation-heights: - name: Test ${{ matrix.rust }} zebra-state with fake activation heights on ubuntu-latest - timeout-minutes: 60 - runs-on: ubuntu-latest - strategy: - matrix: - rust: [stable] - - steps: - - uses: actions/checkout@v3.0.2 - with: - persist-credentials: false - - - uses: actions-rs/toolchain@v1 - with: - toolchain: ${{ matrix.rust }} - profile: minimal - override: true - - - uses: Swatinem/rust-cache@v1 - - - name: cargo fetch - uses: actions-rs/cargo@v1.0.3 - with: - command: fetch - - # This test changes zebra-chain's activation heights, - # which can recompile all the Zebra crates, - # so we want its build products to be cached separately. - # - # Also, we don't want to accidentally use the fake heights in other tests. - - name: Run tests with fake activation heights - uses: actions-rs/cargo@v1.0.3 - env: - TEST_FAKE_ACTIVATION_HEIGHTS: '' - with: - command: test - # Note: this only runs the zebra-state crate tests, - # because re-running all the test binaries can be slow - args: --verbose --package zebra-state --lib -- --nocapture --include-ignored with_fake_activation_heights - build-chain-no-features: name: Build ${{ matrix.rust }} zebra-chain w/o features on ubuntu-latest timeout-minutes: 60 From 257f0173825ab1c1f5dd036ce9f1422555dfc417 Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 22 Jun 2022 21:41:45 +1000 Subject: [PATCH 24/91] fix(ci): Remove redundant build-chain-no-features job (#4656) * Remove redundant build-chain-no-features * Remove redundant job from patch file Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: Gustavo Valverde --- .../continous-integration-os.patch.yml | 11 ------- .../workflows/continous-integration-os.yml | 30 ------------------- 2 files changed, 41 deletions(-) diff --git a/.github/workflows/continous-integration-os.patch.yml b/.github/workflows/continous-integration-os.patch.yml index 21f592b6217..ef965a6433a 100644 --- a/.github/workflows/continous-integration-os.patch.yml +++ b/.github/workflows/continous-integration-os.patch.yml @@ -27,17 +27,6 @@ jobs: steps: - run: 'echo "No build required"' - build-chain-no-features: - name: Build ${{ matrix.rust }} zebra-chain w/o features on ubuntu-latest - timeout-minutes: 60 - runs-on: ubuntu-latest - strategy: - matrix: - rust: [stable, beta] - - steps: - - run: 'echo "No build required"' - install-from-lockfile-no-cache: name: Install zebrad from lockfile without cache on ubuntu-latest runs-on: ubuntu-latest diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index a6758c727e2..18614b51c64 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -153,36 +153,6 @@ jobs: # Note: this only runs the zebrad acceptance tests, because re-running all the test binaries is slow on Windows args: --verbose --package zebrad --test acceptance -- --nocapture --include-ignored sync_large_checkpoints_ - build-chain-no-features: - name: Build ${{ matrix.rust }} zebra-chain w/o features on ubuntu-latest - timeout-minutes: 60 - runs-on: ubuntu-latest - strategy: - matrix: - rust: [stable, beta] - - steps: - - uses: actions/checkout@v3.0.2 - with: - persist-credentials: false - - - uses: actions-rs/toolchain@v1 - with: - toolchain: ${{ matrix.rust }} - profile: minimal - override: true - - - uses: Swatinem/rust-cache@v1 - - - name: cargo fetch - uses: actions-rs/cargo@v1.0.3 - with: - command: fetch - - - name: Run build without features enabled - working-directory: ./zebra-chain - run: cargo build --verbose --no-default-features - # Install Zebra with lockfile dependencies, with no caching and default features install-from-lockfile-no-cache: name: Install zebrad from lockfile without cache on ubuntu-latest From c75a68e655b773ae205fa948c33e31d5e9cf5fbe Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 23 Jun 2022 04:17:21 +1000 Subject: [PATCH 25/91] fix(sync): change default sync config to improve reliability (#4670) * Decrease the default lookahead limit to 400 * Increase the block verification timeout to 10 minutes * Halve the default concurrent downloads config * Try to run the spawned download task before queueing the next download * Allow verification to be cancelled if the verifier is busy --- zebrad/src/components/sync.rs | 10 +++++--- zebrad/src/components/sync/downloads.rs | 32 ++++++++++++++++++++++--- zebrad/src/config.rs | 15 ++++++++---- 3 files changed, 46 insertions(+), 11 deletions(-) diff --git a/zebrad/src/components/sync.rs b/zebrad/src/components/sync.rs index 6d49c0dfc83..118808f1951 100644 --- a/zebrad/src/components/sync.rs +++ b/zebrad/src/components/sync.rs @@ -57,7 +57,7 @@ const BLOCK_DOWNLOAD_RETRY_LIMIT: usize = 3; /// A lower bound on the user-specified lookahead limit. /// -/// Set to the maximum checkpoint interval, so the pipeline holds at least one checkpoint's +/// Set to the maximum checkpoint interval, so the pipeline holds around a checkpoint's /// worth of blocks. /// /// ## Security @@ -79,7 +79,9 @@ pub const MIN_LOOKAHEAD_LIMIT: usize = zebra_consensus::MAX_CHECKPOINT_HEIGHT_GA /// The default for the user-specified lookahead limit. /// /// See [`MIN_LOOKAHEAD_LIMIT`] for details. -pub const DEFAULT_LOOKAHEAD_LIMIT: usize = zebra_consensus::MAX_CHECKPOINT_HEIGHT_GAP * 5; +/// +/// TODO: increase to `MAX_CHECKPOINT_HEIGHT_GAP * 5`, after we implement orchard batching +pub const DEFAULT_LOOKAHEAD_LIMIT: usize = MIN_LOOKAHEAD_LIMIT; /// The expected maximum number of hashes in an ObtainTips or ExtendTips response. /// @@ -141,7 +143,9 @@ pub(super) const BLOCK_DOWNLOAD_TIMEOUT: Duration = Duration::from_secs(15); /// /// If this timeout is set too low, the syncer will sometimes get stuck in a /// failure loop. -pub(super) const BLOCK_VERIFY_TIMEOUT: Duration = Duration::from_secs(6 * 60); +/// +/// TODO: reduce to `6 * 60`, after we implement orchard batching? +pub(super) const BLOCK_VERIFY_TIMEOUT: Duration = Duration::from_secs(10 * 60); /// Controls how long we wait to restart syncing after finishing a sync run. /// diff --git a/zebrad/src/components/sync/downloads.rs b/zebrad/src/components/sync/downloads.rs index cc1477cddc8..02cf168fd54 100644 --- a/zebrad/src/components/sync/downloads.rs +++ b/zebrad/src/components/sync/downloads.rs @@ -118,6 +118,15 @@ pub enum BlockDownloadVerifyError { #[error("block download & verification was cancelled during download: {hash:?}")] CancelledDuringDownload { hash: block::Hash }, + #[error( + "block download & verification was cancelled while waiting for the verifier service: \ + to become ready: {height:?} {hash:?}" + )] + CancelledAwaitingVerifierReadiness { + height: block::Height, + hash: block::Hash, + }, + #[error( "block download & verification was cancelled during verification: {height:?} {hash:?}" )] @@ -282,6 +291,7 @@ where let task = tokio::spawn( async move { + // Download the block. // Prefer the cancel handle if both are ready. let rsp = tokio::select! { biased; @@ -393,12 +403,24 @@ where Err(BlockDownloadVerifyError::BehindTipHeightLimit { height: block_height, hash })?; } + // Wait for the verifier service to be ready. + let readiness = verifier.ready(); + // Prefer the cancel handle if both are ready. + let verifier = tokio::select! { + biased; + _ = &mut cancel_rx => { + trace!("task cancelled waiting for verifier service readiness"); + metrics::counter!("sync.cancelled.verify.ready.count", 1); + return Err(BlockDownloadVerifyError::CancelledAwaitingVerifierReadiness { height: block_height, hash }) + } + verifier = readiness => verifier, + }; + + // Verify the block. let rsp = verifier - .ready() - .await .map_err(|error| BlockDownloadVerifyError::VerifierServiceError { error })? .call(block); - // Prefer the cancel handle if both are ready. + let verification = tokio::select! { biased; _ = &mut cancel_rx => { @@ -408,6 +430,7 @@ where } verification = rsp => verification, }; + if verification.is_ok() { metrics::counter!("sync.verified.block.count", 1); } @@ -425,6 +448,9 @@ where .map_err(move |e| (e, hash)), ); + // Try to start the spawned task before queueing the next block request + tokio::task::yield_now().await; + self.pending.push(task); assert!( self.cancel_handles.insert(hash, cancel_tx).is_none(), diff --git a/zebrad/src/config.rs b/zebrad/src/config.rs index 910a4d7d0b3..1087b05e0dd 100644 --- a/zebrad/src/config.rs +++ b/zebrad/src/config.rs @@ -166,7 +166,7 @@ impl Default for MetricsSection { #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(deny_unknown_fields, default)] pub struct SyncSection { - /// The maximum number of concurrent block requests during sync. + /// The maximum number of concurrent block download requests during sync. /// /// This is set to a low value by default, to avoid task and /// network contention. Increasing this value may improve @@ -178,22 +178,27 @@ pub struct SyncSection { /// download before waiting for queued verifications to complete. /// /// Increasing this limit increases the buffer size, so it reduces - /// the impact of an individual block request failing. The block - /// size limit is 2MB, so in theory, this could represent multiple + /// the impact of an individual block request failing. However, it + /// also increases memory and CPU usage if block validation stalls, + /// or there are some large blocks in the pipeline. + /// + /// The block size limit is 2MB, so in theory, this could represent multiple /// gigabytes of data, if we downloaded arbitrary blocks. However, /// because we randomly load balance outbound requests, and separate /// block download from obtaining block hashes, an adversary would /// have to control a significant fraction of our peers to lead us /// astray. /// - /// This value is clamped to an implementation-defined lower bound. + /// For reliable checkpoint syncing, Zebra enforces a + /// [`MIN_LOOKAHEAD_LIMIT`](sync::MIN_LOOKAHEAD_LIMIT). pub lookahead_limit: usize, } impl Default for SyncSection { fn default() -> Self { Self { - max_concurrent_block_requests: 50, + // TODO: increase to 50, after we implement orchard batching + max_concurrent_block_requests: 25, lookahead_limit: sync::DEFAULT_LOOKAHEAD_LIMIT, } } From 20850b4cb4ff1b97e43e31205716f54055e361ea Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 23 Jun 2022 07:54:37 +1000 Subject: [PATCH 26/91] fix(ci): actually create a cached state image after running a sync (#4669) * Actually create a cached state image * fix(state): use same disk naming convention for all test instances Co-authored-by: Gustavo Valverde --- .github/workflows/deploy-gcp-tests.yml | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index 2ad0f0dfb5e..6c1777e5b11 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -223,7 +223,7 @@ jobs: gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ --boot-disk-size 100GB \ --boot-disk-type pd-ssd \ - --create-disk image=${{ env.CACHED_DISK_NAME }},name="${{ inputs.disk_prefix }}-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.disk_prefix }}-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=100GB,type=pd-ssd \ + --create-disk image=${{ env.CACHED_DISK_NAME }},name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=100GB,type=pd-ssd \ --container-image debian:buster \ --container-restart-policy=never \ --machine-type ${{ env.MACHINE_TYPE }} \ @@ -263,10 +263,10 @@ jobs: --command \ "\ docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ - ${{ inputs.disk_prefix }}-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ + ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ && \ docker run ${{ inputs.test_variables }} -t --name ${{ inputs.test_id }} \ - --mount type=volume,src=${{ inputs.disk_prefix }}-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ + --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ ${{ env.GAR_BASE }}/${{ env.IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }}" # SSH into the just created VM, and create a Docker container to run the incoming test @@ -309,18 +309,21 @@ jobs: --command \ "\ docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ - ${{ inputs.disk_prefix }}-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ + ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ && \ docker run ${{ inputs.test_variables }} -t --name ${{ inputs.test_id }} \ - --mount type=volume,src=${{ inputs.disk_prefix }}-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ - --mount type=volume,src=${{ inputs.disk_prefix }}-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }} \ + --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ + --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }} \ ${{ env.GAR_BASE }}/${{ env.IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }}" create-state-image: name: Create ${{ inputs.test_id }} cached state image runs-on: ubuntu-latest + # We run exactly one of without-cached-state or with-cached-state, and we always skip the other one. + # Normally, if a job is skipped, all the jobs that depend on it are also skipped. + # So we need to override the default success() check to make this job run. needs: [ test-without-cached-state, test-with-cached-state ] - if: ${{ inputs.saves_to_disk }} + if: ${{ !cancelled() && !failure() && inputs.saves_to_disk }} permissions: contents: 'read' id-token: 'write' @@ -394,7 +397,7 @@ jobs: run: | gcloud compute images create ${{ inputs.disk_prefix }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-v${{ env.STATE_VERSION }}-${{ env.NETWORK }}-${{ inputs.disk_suffix }} \ --force \ - --source-disk=${{ inputs.disk_prefix }}-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ + --source-disk=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ --source-disk-zone=${{ env.ZONE }} \ --storage-location=us \ --description="Created from commit ${{ env.GITHUB_SHA_SHORT }} with height ${{ env.SYNC_HEIGHT }}" From 6aea0fd9e8026e1985089418787cabfe5a7de904 Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 23 Jun 2022 17:46:02 +1000 Subject: [PATCH 27/91] Add some missing tracing spans (#4660) Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-network/src/address_book_updater.rs | 37 ++++++++++----------- zebra-network/src/peer_set/candidate_set.rs | 22 ++++++++---- 2 files changed, 33 insertions(+), 26 deletions(-) diff --git a/zebra-network/src/address_book_updater.rs b/zebra-network/src/address_book_updater.rs index dbb0eb16054..25e3d083a01 100644 --- a/zebra-network/src/address_book_updater.rs +++ b/zebra-network/src/address_book_updater.rs @@ -59,33 +59,32 @@ impl AddressBookUpdater { let address_book = Arc::new(std::sync::Mutex::new(address_book)); let worker_address_book = address_book.clone(); - let span = Span::current(); let worker = move || { - span.in_scope(|| { - info!("starting the address book updater"); + info!("starting the address book updater"); - while let Some(event) = worker_rx.blocking_recv() { - trace!(?event, "got address book change"); + while let Some(event) = worker_rx.blocking_recv() { + trace!(?event, "got address book change"); - // # Correctness - // - // Briefly hold the address book threaded mutex, to update the - // state for a single address. - worker_address_book - .lock() - .expect("mutex should be unpoisoned") - .update(event); - } + // # Correctness + // + // Briefly hold the address book threaded mutex, to update the + // state for a single address. + worker_address_book + .lock() + .expect("mutex should be unpoisoned") + .update(event); + } - let error = Err(AllAddressBookUpdaterSendersClosed.into()); - info!(?error, "stopping address book updater"); - error - }) + let error = Err(AllAddressBookUpdaterSendersClosed.into()); + info!(?error, "stopping address book updater"); + error }; // Correctness: spawn address book accesses on a blocking thread, // to avoid deadlocks (see #1976) - let address_book_updater_task_handle = tokio::task::spawn_blocking(worker); + let span = Span::current(); + let address_book_updater_task_handle = + tokio::task::spawn_blocking(move || span.in_scope(worker)); ( address_book, diff --git a/zebra-network/src/peer_set/candidate_set.rs b/zebra-network/src/peer_set/candidate_set.rs index 950f05672a7..e072b37e716 100644 --- a/zebra-network/src/peer_set/candidate_set.rs +++ b/zebra-network/src/peer_set/candidate_set.rs @@ -4,6 +4,7 @@ use chrono::Utc; use futures::stream::{FuturesUnordered, StreamExt}; use tokio::time::{sleep_until, timeout, Instant}; use tower::{Service, ServiceExt}; +use tracing::Span; use zebra_chain::serialization::DateTime32; @@ -333,9 +334,12 @@ where // // Extend handles duplicate addresses internally. let address_book = self.address_book.clone(); - tokio::task::spawn_blocking(move || address_book.lock().unwrap().extend(addrs)) - .await - .expect("panic in new peers address book update task"); + let span = Span::current(); + tokio::task::spawn_blocking(move || { + span.in_scope(|| address_book.lock().unwrap().extend(addrs)) + }) + .await + .expect("panic in new peers address book update task"); } /// Returns the next candidate for a connection attempt, if any are available. @@ -386,7 +390,8 @@ where }; // Correctness: Spawn address book accesses on a blocking thread, to avoid deadlocks (see #1976). - let next_peer = tokio::task::spawn_blocking(next_peer) + let span = Span::current(); + let next_peer = tokio::task::spawn_blocking(move || span.in_scope(next_peer)) .await .expect("panic in next peer address book task")?; @@ -406,9 +411,12 @@ where // Spawn address book accesses on a blocking thread, // to avoid deadlocks (see #1976). let address_book = self.address_book.clone(); - tokio::task::spawn_blocking(move || address_book.lock().unwrap().update(addr)) - .await - .expect("panic in peer failure address book update task"); + let span = Span::current(); + tokio::task::spawn_blocking(move || { + span.in_scope(|| address_book.lock().unwrap().update(addr)) + }) + .await + .expect("panic in peer failure address book update task"); } } From c2622462668c01d8766cf574147341ccb9e4c966 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 23 Jun 2022 21:09:06 +0000 Subject: [PATCH 28/91] build(deps): bump inferno from 0.11.5 to 0.11.6 (#4682) Bumps [inferno](https://github.com/jonhoo/inferno) from 0.11.5 to 0.11.6. - [Release notes](https://github.com/jonhoo/inferno/releases) - [Changelog](https://github.com/jonhoo/inferno/blob/master/CHANGELOG.md) - [Commits](https://github.com/jonhoo/inferno/compare/v0.11.5...v0.11.6) --- updated-dependencies: - dependency-name: inferno dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebrad/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 420df53d30d..7a254766b2b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2203,9 +2203,9 @@ dependencies = [ [[package]] name = "inferno" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "244ae9456c246081801028c893ee0e2b71be4f0380894e2cd53974c3beea0c64" +checksum = "9a262875c8e10820b9366e991ed6710cd80dc93578375e5d499fcbd408985937" dependencies = [ "ahash", "atty", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 8c5d609c2c9..84101723eb0 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -111,7 +111,7 @@ sentry = { version = "0.26.0", default-features = false, features = ["backtrace" # prod feature flamegraph tracing-flame = { version = "0.2.0", optional = true } -inferno = { version = "0.11.5", default-features = false, optional = true } +inferno = { version = "0.11.6", default-features = false, optional = true } # prod feature journald tracing-journald = { version = "0.3.0", optional = true } From b35ab67ef07428f61adf6c4eff8c4014ada64ec6 Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 24 Jun 2022 09:22:52 +1000 Subject: [PATCH 29/91] fix(ci): Split instance and volume creation out of the test job (#4675) * Split full sync into checkpoint and full validation * Sort workflow variables into categories and add descriptions * Split Create instance/volume and Run test into separate jobs * Copy initial conditions to all jobs in the series --- .github/workflows/deploy-gcp-tests.yml | 210 +++++++++++++++++++------ 1 file changed, 162 insertions(+), 48 deletions(-) diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index 6c1777e5b11..7a57cd8796d 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -3,62 +3,76 @@ name: Deploy GCP tests on: workflow_call: inputs: - network: - required: false - type: string - default: Mainnet - app_name: - required: false - type: string - default: 'zebra' + # Status and logging test_id: required: true type: string + description: 'Unique identifier for the test' test_description: required: true type: string + description: 'Explains what the test does' + # Test selection and parameters test_variables: required: true type: string + description: 'Environmental variables used to select and configure the test' + network: + required: false + type: string + default: Mainnet + description: 'Zcash network to test against' + # Cached state + # # TODO: find a better name root_state_path: required: false type: string default: '/zebrad-cache' + description: 'Cached state base directory path' # TODO: find a better name zebra_state_dir: required: false type: string default: '' - description: 'Name of the Zebra cached state directory and input image prefix to search in GCP' + description: 'Zebra cached state directory and input image prefix to search in GCP' # TODO: find a better name lwd_state_dir: required: false type: string default: '' - description: 'Name of the Lightwalletd cached state directory and input image prefix to search in GCP' + description: 'Lightwalletd cached state directory and input image prefix to search in GCP' disk_prefix: required: false type: string default: 'zebrad-cache' - description: 'Used to name the image, and for tests that do not use a `zebra_state_dir` to work, but builds a cached state' + description: 'Image name prefix, and `zebra_state_dir` name for newly created cached states' disk_suffix: required: false type: string + description: 'Image name suffix' needs_zebra_state: required: true type: boolean - description: 'Indicates if a test needs a disk with a Zebra cached state to run' + description: 'Does the test use Zebra cached state?' needs_lwd_state: required: false type: boolean - description: 'Indicates if a test needs a disk with Lightwalletd cached state to run (which also includes a Zebra cached state)' + description: 'Does the test use Lightwalletd and Zebra cached state?' saves_to_disk: required: true type: boolean + description: 'Does the test create a new cached state disk?' + # Metadata height_grep_text: required: false type: string + description: 'Regular expression to find the tip height in test logs, and add it to newly created cached state image metadata' + app_name: + required: false + type: string + default: 'zebra' + description: 'Application name for Google Cloud instance metadata' env: IMAGE_NAME: zebrad-test @@ -67,8 +81,10 @@ env: MACHINE_TYPE: c2d-standard-16 jobs: - test-without-cached-state: - name: Run ${{ inputs.test_id }} test + # set up the test without any cached state + # each test runs one of the *-with/without-cached-state job series, and skips the other + setup-without-cached-state: + name: Setup ${{ inputs.test_id }} test if: ${{ !inputs.needs_zebra_state }} runs-on: ubuntu-latest permissions: @@ -98,7 +114,8 @@ jobs: service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' - - name: Create GCP compute instance + # Create a Compute Engine virtual machine + - name: Create ${{ inputs.test_id }} GCP compute instance id: create-instance run: | gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ @@ -114,7 +131,7 @@ jobs: --zone ${{ env.ZONE }} sleep 60 - - name: Run ${{ inputs.test_id }} test + - name: Create ${{ inputs.test_id }} Docker volume run: | gcloud compute ssh \ ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ @@ -127,13 +144,60 @@ jobs: && \ docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ - && \ + " + + test-without-cached-state: + name: Run ${{ inputs.test_id }} test + needs: [ setup-without-cached-state ] + # if the previous step fails, we also want to run and fail this step, + # so that the branch protection rule fails in Mergify and GitHub + if: ${{ !cancelled() && !inputs.needs_zebra_state }} + runs-on: ubuntu-latest + permissions: + contents: 'read' + id-token: 'write' + steps: + - uses: actions/checkout@v3.0.2 + with: + persist-credentials: false + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Downcase network name for disks + run: | + NETWORK_CAPS=${{ inputs.network }} + echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v0.8.0 + with: + workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' + service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' + token_format: 'access_token' + + - name: Run ${{ inputs.test_id }} test + run: | + gcloud compute ssh \ + ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ env.ZONE }} \ + --quiet \ + --ssh-flag="-o ServerAliveInterval=5" \ + --command \ + "\ docker run ${{ inputs.test_variables }} -t --name ${{ inputs.test_id }} \ --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ - ${{ env.GAR_BASE }}/${{ env.IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }}" + ${{ env.GAR_BASE }}/${{ env.IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ + " - test-with-cached-state: - name: Run ${{ inputs.test_id }} test + # set up the test using cached state + # each test runs one of the *-with/without-cached-state job series, and skips the other + setup-with-cached-state: + name: Setup ${{ inputs.test_id }} test if: ${{ inputs.needs_zebra_state }} runs-on: ubuntu-latest permissions: @@ -180,7 +244,7 @@ jobs: # # Passes the disk name to subsequent steps using $CACHED_DISK_NAME env variable # Passes the state version to subsequent steps using $STATE_VERSION env variable - - name: Find cached state disk + - name: Find ${{ inputs.test_id }} cached state disk id: get-disk-name run: | LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "$GITHUB_WORKSPACE/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1) @@ -215,9 +279,9 @@ jobs: echo "STATE_VERSION=$LOCAL_STATE_VERSION" >> $GITHUB_ENV echo "CACHED_DISK_NAME=$CACHED_DISK_NAME" >> $GITHUB_ENV - # Creates Compute Engine virtual machine and attach a cached state disk using the + # Create a Compute Engine virtual machine and attach a cached state disk using the # $CACHED_DISK_NAME variable as the source image to populate the disk cached state - - name: Create GCP compute instance + - name: Create ${{ inputs.test_id }} GCP compute instance id: create-instance run: | gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ @@ -233,9 +297,62 @@ jobs: --zone ${{ env.ZONE }} sleep 60 - # SSH into the just created VM, and create a Docker container to run the incoming test - # from ${{ inputs.test_id }}, then create a docker volume with the recently attached disk. - # The disk will be mounted in ${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }}. + # Create a docker volume with the selected cached state. + # + # SSH into the just created VM, and create a docker volume with the recently attached disk. + - name: Create ${{ inputs.test_id }} Docker volume + run: | + gcloud compute ssh \ + ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ env.ZONE }} \ + --quiet \ + --ssh-flag="-o ServerAliveInterval=5" \ + --command \ + "\ + docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ + ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ + " + + test-with-cached-state: + name: Run ${{ inputs.test_id }} test + needs: [ setup-with-cached-state ] + # if the previous step fails, we also want to run and fail this step, + # so that the branch protection rule fails in Mergify and GitHub + if: ${{ !cancelled() && inputs.needs_zebra_state }} + runs-on: ubuntu-latest + permissions: + contents: 'read' + id-token: 'write' + steps: + - uses: actions/checkout@v3.0.2 + with: + persist-credentials: false + fetch-depth: '2' + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Downcase network name for disks + run: | + NETWORK_CAPS=${{ inputs.network }} + echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v0.8.0 + with: + workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' + service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' + token_format: 'access_token' + + # Run the test with the previously created Zebra-only cached state. + # Each test runs one of the "Run test" steps, and skips the other. + # + # SSH into the just created VM, and create a Docker container to run the incoming test + # from ${{ inputs.test_id }}, then mount the docker volume created in the previous job. # # The disk mounted in the VM is located at /dev/sdb, we mount the root `/` of this disk to the docker # container in one path: @@ -244,14 +361,13 @@ jobs: # This path must match the variable used by the tests in Rust, which are also set in # `continous-integration-docker.yml` to be able to run this tests. # - # Although we're mounting the disk root, Zebra will only respect the values from + # Although we're mounting the disk root, Zebra will only respect the values from # $ZEBRA_CACHED_STATE_DIR. The inputs like ${{ inputs.zebra_state_dir }} are only used # to match that variable paths. - name: Run ${{ inputs.test_id }} test - # This step mounts the volume only when a single cached state is needed, in this case - # the cached state from Zebra. - # lightwalletd-full-sync test is an exception to this rule, as it does not need a lwd cached state, - # but it does saves a lwd cached state + # This step only runs for tests that just read or write a Zebra state. + # + # lightwalletd-full-sync reads Zebra and writes lwd, so it is handled specially. # TODO: we should find a better logic for this use cases if: ${{ (inputs.needs_zebra_state && !inputs.needs_lwd_state) && inputs.test_id != 'lwd-full-sync' }} run: | @@ -262,17 +378,16 @@ jobs: --ssh-flag="-o ServerAliveInterval=5" \ --command \ "\ - docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ - ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ - && \ docker run ${{ inputs.test_variables }} -t --name ${{ inputs.test_id }} \ --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ - ${{ env.GAR_BASE }}/${{ env.IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }}" + ${{ env.GAR_BASE }}/${{ env.IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ + " - # SSH into the just created VM, and create a Docker container to run the incoming test - # from ${{ inputs.test_id }}, then create a docker volume with the recently attached disk. - # The disk will be mounted in ${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }}, - # and ${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }} + # Run the test with the previously created Lightwalletd and Zebra cached state. + # Each test runs one of the "Run test" steps, and skips the other. + # + # SSH into the just created VM, and create a Docker container to run the incoming test + # from ${{ inputs.test_id }}, then mount the docker volume created in the previous job. # # In this step we're using the same disk for simplicity, as mounting multiple disks to the # VM and to the container might require more steps in this workflow, and additional @@ -294,10 +409,9 @@ jobs: # will only respect the values from $ZEBRA_CACHED_STATE_DIR and $LIGHTWALLETD_DATA_DIR, # the inputs like ${{ inputs.lwd_state_dir }} are only used to match those variables paths. - name: Run ${{ inputs.test_id }} test - # This step mounts the volume only when both cached states are needed, in this case - # the cached state from Zebra and Lightwalletd - # lightwalletd-full-sync test is an exception to this rule, as it does not need a lwd cached state, - # but it does saves a lwd cached state + # This step only runs for tests that read or write Lightwalletd and Zebra states. + # + # lightwalletd-full-sync reads Zebra and writes lwd, so it is handled specially. # TODO: we should find a better logic for this use cases if: ${{ (inputs.needs_zebra_state && inputs.needs_lwd_state) || inputs.test_id == 'lwd-full-sync' }} run: | @@ -308,14 +422,13 @@ jobs: --ssh-flag="-o ServerAliveInterval=5" \ --command \ "\ - docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ - ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ - && \ docker run ${{ inputs.test_variables }} -t --name ${{ inputs.test_id }} \ --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }} \ - ${{ env.GAR_BASE }}/${{ env.IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }}" + ${{ env.GAR_BASE }}/${{ env.IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ + " + # create a state image from the instance's state disk, if requested by the caller create-state-image: name: Create ${{ inputs.test_id }} cached state image runs-on: ubuntu-latest @@ -402,6 +515,7 @@ jobs: --storage-location=us \ --description="Created from commit ${{ env.GITHUB_SHA_SHORT }} with height ${{ env.SYNC_HEIGHT }}" + # delete the Google Cloud instance for this test delete-instance: name: Delete ${{ inputs.test_id }} instance runs-on: ubuntu-latest From 49cda21f376fb04c4bc35073dea263e1652904f9 Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 24 Jun 2022 10:58:49 +1000 Subject: [PATCH 30/91] Slightly increase some syncer defaults (#4679) This is a partial revert of PR #4670. --- zebrad/src/components/sync.rs | 2 +- zebrad/src/config.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/zebrad/src/components/sync.rs b/zebrad/src/components/sync.rs index 118808f1951..7ab85cf6812 100644 --- a/zebrad/src/components/sync.rs +++ b/zebrad/src/components/sync.rs @@ -81,7 +81,7 @@ pub const MIN_LOOKAHEAD_LIMIT: usize = zebra_consensus::MAX_CHECKPOINT_HEIGHT_GA /// See [`MIN_LOOKAHEAD_LIMIT`] for details. /// /// TODO: increase to `MAX_CHECKPOINT_HEIGHT_GAP * 5`, after we implement orchard batching -pub const DEFAULT_LOOKAHEAD_LIMIT: usize = MIN_LOOKAHEAD_LIMIT; +pub const DEFAULT_LOOKAHEAD_LIMIT: usize = zebra_consensus::MAX_CHECKPOINT_HEIGHT_GAP * 3; /// The expected maximum number of hashes in an ObtainTips or ExtendTips response. /// diff --git a/zebrad/src/config.rs b/zebrad/src/config.rs index 1087b05e0dd..3e2cad9c395 100644 --- a/zebrad/src/config.rs +++ b/zebrad/src/config.rs @@ -198,7 +198,7 @@ impl Default for SyncSection { fn default() -> Self { Self { // TODO: increase to 50, after we implement orchard batching - max_concurrent_block_requests: 25, + max_concurrent_block_requests: 40, lookahead_limit: sync::DEFAULT_LOOKAHEAD_LIMIT, } } From 83aa42e649b488432019b0996d6b13a23aa7779c Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Sun, 26 Jun 2022 21:07:37 -0300 Subject: [PATCH 31/91] tests(config): Add tests for old configs (#4676) * change `initial_mainnet_peers` and `initial_testnet_peers` type to `IndexSet` * add tests for zebra config files * add serde feature to indexmap * remove async * update config * fix `stored_config_path()` * skip tests if config is not found * improve error * use CARGO_MANIFEST_DIR * remove `stored_config_is_newest` test * move `stored_config_works` test to the end of `valid_generated_config_test` * space --- Cargo.lock | 1 + zebra-network/Cargo.toml | 1 + zebra-network/src/config.rs | 13 ++-- .../src/peer_set/initialize/tests/vectors.rs | 10 +-- .../components/inbound/tests/real_peer_set.rs | 7 +- zebrad/tests/acceptance.rs | 30 ++++++++- zebrad/tests/common/config.rs | 11 +++- zebrad/tests/common/config.toml | 64 +++++++++++++++++++ zebrad/tests/common/launch.rs | 6 +- 9 files changed, 124 insertions(+), 19 deletions(-) create mode 100644 zebrad/tests/common/config.toml diff --git a/Cargo.lock b/Cargo.lock index 7a254766b2b..6a00e257286 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6282,6 +6282,7 @@ dependencies = [ "futures", "hex", "humantime-serde", + "indexmap", "lazy_static", "metrics", "ordered-map", diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 602d52cee9a..a7d74177708 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -19,6 +19,7 @@ bytes = "1.1.0" chrono = "0.4.19" hex = "0.4.3" humantime-serde = "1.1.1" +indexmap = { version = "1.8.2", features = ["serde"] } lazy_static = "1.4.0" ordered-map = "0.4.2" pin-project = "1.0.10" diff --git a/zebra-network/src/config.rs b/zebra-network/src/config.rs index cb993059fa6..e041eb80069 100644 --- a/zebra-network/src/config.rs +++ b/zebra-network/src/config.rs @@ -5,6 +5,7 @@ use std::{ time::Duration, }; +use indexmap::IndexSet; use serde::{de, Deserialize, Deserializer}; use zebra_chain::parameters::Network; @@ -55,11 +56,11 @@ pub struct Config { /// A list of initial peers for the peerset when operating on /// mainnet. - pub initial_mainnet_peers: HashSet, + pub initial_mainnet_peers: IndexSet, /// A list of initial peers for the peerset when operating on /// testnet. - pub initial_testnet_peers: HashSet, + pub initial_testnet_peers: IndexSet, /// The initial target size for the peer set. /// @@ -127,7 +128,7 @@ impl Config { } /// Returns the initial seed peer hostnames for the configured network. - pub fn initial_peer_hostnames(&self) -> &HashSet { + pub fn initial_peer_hostnames(&self) -> &IndexSet { match self.network { Network::Mainnet => &self.initial_mainnet_peers, Network::Testnet => &self.initial_testnet_peers, @@ -136,7 +137,7 @@ impl Config { /// Resolve initial seed peer IP addresses, based on the configured network. pub async fn initial_peers(&self) -> HashSet { - Config::resolve_peers(self.initial_peer_hostnames()).await + Config::resolve_peers(&self.initial_peer_hostnames().iter().cloned().collect()).await } /// Concurrently resolves `peers` into zero or more IP addresses, with a @@ -296,8 +297,8 @@ impl<'de> Deserialize<'de> for Config { struct DConfig { listen_addr: String, network: Network, - initial_mainnet_peers: HashSet, - initial_testnet_peers: HashSet, + initial_mainnet_peers: IndexSet, + initial_testnet_peers: IndexSet, peerset_initial_target_size: usize, #[serde(alias = "new_peer_interval", with = "humantime_serde")] crawl_new_peer_interval: Duration, diff --git a/zebra-network/src/peer_set/initialize/tests/vectors.rs b/zebra-network/src/peer_set/initialize/tests/vectors.rs index bf71a933553..ebcc1d4d860 100644 --- a/zebra-network/src/peer_set/initialize/tests/vectors.rs +++ b/zebra-network/src/peer_set/initialize/tests/vectors.rs @@ -14,7 +14,6 @@ //! skip all the network tests by setting the `ZEBRA_SKIP_NETWORK_TESTS` environmental variable. use std::{ - collections::HashSet, net::{Ipv4Addr, SocketAddr}, sync::Arc, time::{Duration, Instant}, @@ -22,6 +21,7 @@ use std::{ use chrono::Utc; use futures::{channel::mpsc, FutureExt, StreamExt}; +use indexmap::IndexSet; use tokio::{net::TcpStream, task::JoinHandle}; use tower::{service_fn, Service}; use tracing::Span; @@ -1137,7 +1137,7 @@ async fn add_initial_peers_deadlock() { // Create a list of dummy IPs, and initialize a config using them as the // initial peers. The amount of these peers will overflow // `PEERSET_INITIAL_TARGET_SIZE`. - let mut peers = HashSet::new(); + let mut peers = IndexSet::new(); for address_number in 0..PEER_COUNT { peers.insert( SocketAddr::new(Ipv4Addr::new(127, 1, 1, address_number as _).into(), 1).to_string(), @@ -1173,8 +1173,8 @@ async fn local_listener_port_with(listen_addr: SocketAddr, network: Network) { network, // Stop Zebra making outbound connections - initial_mainnet_peers: HashSet::new(), - initial_testnet_peers: HashSet::new(), + initial_mainnet_peers: IndexSet::new(), + initial_testnet_peers: IndexSet::new(), ..Config::default() }; @@ -1468,7 +1468,7 @@ where { // Create a list of dummy IPs and initialize a config using them as the // initial peers. - let mut peers = HashSet::new(); + let mut peers = IndexSet::new(); for address_number in 0..peer_count { peers.insert( SocketAddr::new(Ipv4Addr::new(127, 1, 1, address_number as _).into(), 1).to_string(), diff --git a/zebrad/src/components/inbound/tests/real_peer_set.rs b/zebrad/src/components/inbound/tests/real_peer_set.rs index f1bc7310651..c6dbb46f24e 100644 --- a/zebrad/src/components/inbound/tests/real_peer_set.rs +++ b/zebrad/src/components/inbound/tests/real_peer_set.rs @@ -1,8 +1,9 @@ //! Inbound service tests with a real peer set. -use std::{collections::HashSet, iter, net::SocketAddr, sync::Arc}; +use std::{iter, net::SocketAddr, sync::Arc}; use futures::FutureExt; +use indexmap::IndexSet; use tokio::{sync::oneshot, task::JoinHandle}; use tower::{ buffer::Buffer, @@ -655,8 +656,8 @@ async fn setup( listen_addr: config_listen_addr, // Stop Zebra making outbound connections - initial_mainnet_peers: HashSet::new(), - initial_testnet_peers: HashSet::new(), + initial_mainnet_peers: IndexSet::new(), + initial_testnet_peers: IndexSet::new(), ..NetworkConfig::default() }; diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index f969febd87e..ea7609b5694 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -123,7 +123,7 @@ mod common; use common::{ check::{is_zebrad_version, EphemeralCheck, EphemeralConfig}, - config::{default_test_config, persistent_test_config, testdir}, + config::{default_test_config, persistent_test_config, stored_config_path, testdir}, launch::{ spawn_zebrad_for_rpc_without_initial_peers, ZebradTestDirExt, BETWEEN_NODES_DELAY, LAUNCH_DELAY, @@ -509,6 +509,9 @@ fn valid_generated_config_test() -> Result<()> { // cache conflicts. valid_generated_config("start", "Starting zebrad")?; + // Check that the stored configuration we have for Zebra works + stored_config_works()?; + Ok(()) } @@ -561,6 +564,31 @@ fn valid_generated_config(command: &str, expect_stdout_line_contains: &str) -> R Ok(()) } +fn stored_config_works() -> Result<()> { + let stored_config_path = stored_config_path(); + let run_dir = testdir()?; + + // run zebra with stored config + let mut child = + run_dir.spawn_child(args!["-c", stored_config_path.to_str().unwrap(), "start"])?; + + // zebra was able to start with the stored config + child.expect_stdout_line_matches("Starting zebrad".to_string())?; + + // finish + child.kill()?; + + let output = child.wait_with_output()?; + let output = output.assert_failure()?; + + // [Note on port conflict](#Note on port conflict) + output + .assert_was_killed() + .wrap_err("Possible port conflict. Are there other acceptance tests running?")?; + + Ok(()) +} + /// Test if `zebrad` can sync the first checkpoint on mainnet. /// /// The first checkpoint contains a single genesis block. diff --git a/zebrad/tests/common/config.rs b/zebrad/tests/common/config.rs index 60913f00501..25afa16dba3 100644 --- a/zebrad/tests/common/config.rs +++ b/zebrad/tests/common/config.rs @@ -5,7 +5,11 @@ //! Test functions in this file will not be run. //! This file is only for test library code. -use std::{env, time::Duration}; +use std::{ + env, + path::{Path, PathBuf}, + time::Duration, +}; use color_eyre::eyre::Result; use tempfile::TempDir; @@ -80,3 +84,8 @@ pub fn testdir() -> Result { .tempdir() .map_err(Into::into) } + +/// Get stored config path +pub fn stored_config_path() -> PathBuf { + Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/common/config.toml") +} diff --git a/zebrad/tests/common/config.toml b/zebrad/tests/common/config.toml new file mode 100644 index 00000000000..2291b5fb274 --- /dev/null +++ b/zebrad/tests/common/config.toml @@ -0,0 +1,64 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# +# zebrad attempts to load configs in the following order: +# +# 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; +# 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); +# 3. The default config. + +[consensus] +checkpoint_sync = true +debug_skip_parameter_preload = false + +[mempool] +eviction_memory_time = '1h' +tx_cost_limit = 80000000 + +[metrics] + +[network] +crawl_new_peer_interval = '1m 1s' +initial_mainnet_peers = [ + 'dnsseed.z.cash:8233', + 'dnsseed.str4d.xyz:8233', + 'mainnet.seeder.zfnd.org:8233', + 'mainnet.is.yolo.money:8233', +] +initial_testnet_peers = [ + 'dnsseed.testnet.z.cash:18233', + 'testnet.seeder.zfnd.org:18233', + 'testnet.is.yolo.money:18233', +] +listen_addr = '0.0.0.0:8233' +network = 'Mainnet' +peerset_initial_target_size = 25 + +[rpc] + +[state] +cache_dir = 'cache_dir' +delete_old_database = true +ephemeral = false + +[sync] +lookahead_limit = 400 +max_concurrent_block_requests = 25 + +[tracing] +force_use_color = false +use_color = true +use_journald = false diff --git a/zebrad/tests/common/launch.rs b/zebrad/tests/common/launch.rs index fad4315d393..c6972614bda 100644 --- a/zebrad/tests/common/launch.rs +++ b/zebrad/tests/common/launch.rs @@ -6,7 +6,6 @@ //! This file is only for test library code. use std::{ - collections::HashSet, env, net::SocketAddr, path::{Path, PathBuf}, @@ -14,6 +13,7 @@ use std::{ }; use color_eyre::eyre::Result; +use indexmap::IndexSet; use zebra_chain::parameters::Network; use zebra_test::{ @@ -201,8 +201,8 @@ pub fn spawn_zebrad_for_rpc_without_initial_peers( .expect("Failed to create a config file with a known RPC listener port"); config.state.ephemeral = false; - config.network.initial_mainnet_peers = HashSet::new(); - config.network.initial_testnet_peers = HashSet::new(); + config.network.initial_mainnet_peers = IndexSet::new(); + config.network.initial_testnet_peers = IndexSet::new(); config.network.network = network; config.mempool.debug_enable_at_height = Some(0); From e7094201604c2f473145169a10c23a703a57c96d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Jun 2022 07:37:36 +0000 Subject: [PATCH 32/91] build(deps): bump indexmap from 1.8.2 to 1.9.1 (#4671) Bumps [indexmap](https://github.com/bluss/indexmap) from 1.8.2 to 1.9.1. - [Release notes](https://github.com/bluss/indexmap/releases) - [Changelog](https://github.com/bluss/indexmap/blob/master/RELEASES.md) - [Commits](https://github.com/bluss/indexmap/compare/1.8.2...1.9.1) --- updated-dependencies: - dependency-name: indexmap dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 16 +++++++++++----- zebra-network/Cargo.toml | 2 +- zebra-rpc/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 5 files changed, 15 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6a00e257286..a37ab7452c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1922,13 +1922,19 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashbrown" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db0d4cf898abf0081f964436dc980e96670a0f36863e4b83aaacdb65c9d7ccc3" + [[package]] name = "hashlink" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf" dependencies = [ - "hashbrown", + "hashbrown 0.11.2", ] [[package]] @@ -2192,12 +2198,12 @@ checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "indexmap" -version = "1.8.2" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6012d540c5baa3589337a98ce73408de9b5a25ec9fc2c6fd6be8f0d39e0ca5a" +checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" dependencies = [ "autocfg 1.1.0", - "hashbrown", + "hashbrown 0.12.1", "serde", ] @@ -2639,7 +2645,7 @@ dependencies = [ "atomic-shim", "crossbeam-epoch", "crossbeam-utils", - "hashbrown", + "hashbrown 0.11.2", "metrics", "num_cpus", "parking_lot 0.11.2", diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index a7d74177708..8a5160b449d 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -19,7 +19,7 @@ bytes = "1.1.0" chrono = "0.4.19" hex = "0.4.3" humantime-serde = "1.1.1" -indexmap = { version = "1.8.2", features = ["serde"] } +indexmap = { version = "1.9.1", features = ["serde"] } lazy_static = "1.4.0" ordered-map = "0.4.2" pin-project = "1.0.10" diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 52ee2fbb1fc..165e4a3a1b0 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -28,7 +28,7 @@ jsonrpc-derive = "18.0.0" jsonrpc-http-server = "18.0.0" # zebra-rpc needs the preserve_order feature in serde_json, which is a dependency of jsonrpc-core serde_json = { version = "1.0.81", features = ["preserve_order"] } -indexmap = { version = "1.8.2", features = ["serde"] } +indexmap = { version = "1.9.1", features = ["serde"] } tokio = { version = "1.19.2", features = ["time", "rt-multi-thread", "macros", "tracing"] } tower = "0.4.13" diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 0430c5a95b9..735b9f4305b 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" [dependencies] hex = "0.4.3" -indexmap = "1.8.2" +indexmap = "1.9.1" lazy_static = "1.4.0" insta = "1.15.0" proptest = "0.10.1" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 84101723eb0..1d1431d6d0b 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -79,7 +79,7 @@ abscissa_core = "0.5" gumdrop = "0.7" chrono = "0.4.19" humantime-serde = "1.1.1" -indexmap = "1.8.2" +indexmap = "1.9.1" lazy_static = "1.4.0" serde = { version = "1.0.137", features = ["serde_derive"] } toml = "0.5.9" From d37d8aa8c6d8d0f4cda7eda17d04224b839d97ea Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 28 Jun 2022 05:50:47 +1000 Subject: [PATCH 33/91] fix(ci): Ignore lightwalletd hangs for now (#4663) * Ignore lightwalletd hangs for now * cargo fmt --all * cargo +stable fmt --all Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebrad/tests/acceptance.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index ea7609b5694..d9864707dad 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -1328,7 +1328,8 @@ fn lightwalletd_integration_test(test_type: LightwalletdTestType) -> Result<()> // lightwalletd will keep retrying getblock. if !test_type.allow_lightwalletd_cached_state() { if test_type.needs_zebra_cached_state() { - lightwalletd.expect_stdout_line_matches("[Aa]dding block to cache")?; + lightwalletd + .expect_stdout_line_matches("([Aa]dding block to cache)|([Ww]aiting for block)")?; } else { lightwalletd.expect_stdout_line_matches(regex::escape( "Waiting for zcashd height to reach Sapling activation height (419200)", @@ -1340,7 +1341,14 @@ fn lightwalletd_integration_test(test_type: LightwalletdTestType) -> Result<()> // Wait for Zebra to sync its cached state to the chain tip zebrad.expect_stdout_line_matches(SYNC_FINISHED_REGEX)?; + // Wait for lightwalletd to sync some blocks + lightwalletd + .expect_stdout_line_matches("([Aa]dding block to cache)|([Ww]aiting for block)")?; + // Wait for lightwalletd to sync to Zebra's tip + // + // TODO: re-enable this code when lightwalletd hangs are fixed + #[cfg(lightwalletd_hang_fix)] lightwalletd.expect_stdout_line_matches("[Ww]aiting for block")?; // Check Zebra is still at the tip (also clears and prints Zebra's logs) @@ -1350,7 +1358,9 @@ fn lightwalletd_integration_test(test_type: LightwalletdTestType) -> Result<()> // But when it gets near the tip, it starts using the mempool. // // adityapk00/lightwalletd logs mempool changes, but zcash/lightwalletd doesn't. - #[cfg(adityapk00_lightwalletd)] + // + // TODO: re-enable this code when lightwalletd hangs are fixed + #[cfg(lightwalletd_hang_fix)] { lightwalletd.expect_stdout_line_matches(regex::escape( "Block hash changed, clearing mempool clients", From 6b227943966d290f4c2bc8a1611db56beea2e9a1 Mon Sep 17 00:00:00 2001 From: Conrado Gouvea Date: Mon, 27 Jun 2022 20:12:56 -0300 Subject: [PATCH 34/91] deps: upgrade ECC dependencies (#4699) --- Cargo.lock | 162 ++++++++++++++++++++++++++++++------- Cargo.toml | 2 +- deny.toml | 10 ++- zebra-chain/Cargo.toml | 6 +- zebra-consensus/Cargo.toml | 6 +- zebra-state/Cargo.toml | 2 +- 6 files changed, 151 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a37ab7452c6..71e9f55b68a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1487,8 +1487,18 @@ dependencies = [ [[package]] name = "equihash" -version = "0.1.0" -source = "git+https://github.com/ZcashFoundation/librustzcash.git?rev=6d75718076e592a41b6bd6ec916dc15420e4cc3c#6d75718076e592a41b6bd6ec916dc15420e4cc3c" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab579d7cf78477773b03e80bc2f89702ef02d7112c711d54ca93dcdce68533d5" +dependencies = [ + "blake2b_simd 1.0.0", + "byteorder", +] + +[[package]] +name = "equihash" +version = "0.2.0" +source = "git+https://github.com/ZcashFoundation/librustzcash.git?rev=4567a37ceccbd506a58aaaded39ba14c952c1510#4567a37ceccbd506a58aaaded39ba14c952c1510" dependencies = [ "blake2b_simd 1.0.0", "byteorder", @@ -1891,7 +1901,25 @@ dependencies = [ "bitvec", "ff", "group", - "halo2_proofs", + "halo2_proofs 0.1.0", + "lazy_static", + "pasta_curves", + "rand 0.8.5", + "subtle", + "uint", +] + +[[package]] +name = "halo2_gadgets" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e10bf9924da1754e443641c9e7f9f00483749f8fb837fde696ef6ed6e2f079" +dependencies = [ + "arrayvec 0.7.2", + "bitvec", + "ff", + "group", + "halo2_proofs 0.2.0", "lazy_static", "pasta_curves", "rand 0.8.5", @@ -1913,6 +1941,21 @@ dependencies = [ "rayon", ] +[[package]] +name = "halo2_proofs" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff771b9a2445cd2545c9ef26d863c290fbb44ae440c825a20eb7156f67a949a" +dependencies = [ + "blake2b_simd 1.0.0", + "ff", + "group", + "pasta_curves", + "rand_core 0.6.3", + "rayon", + "tracing", +] + [[package]] name = "hashbrown" version = "0.11.2" @@ -2951,8 +2994,35 @@ dependencies = [ "ff", "fpe", "group", - "halo2_gadgets", - "halo2_proofs", + "halo2_gadgets 0.1.0", + "halo2_proofs 0.1.0", + "hex", + "incrementalmerkletree", + "lazy_static", + "memuse", + "nonempty", + "pasta_curves", + "rand 0.8.5", + "reddsa", + "serde", + "subtle", + "zcash_note_encryption 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "orchard" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7619db7f917afd9b1139044c595fab1b6166de2db62317794b5f5e34a2104ae1" +dependencies = [ + "aes", + "bitvec", + "blake2b_simd 1.0.0", + "ff", + "fpe", + "group", + "halo2_gadgets 0.2.0", + "halo2_proofs 0.2.0", "hex", "incrementalmerkletree", "lazy_static", @@ -2963,6 +3033,7 @@ dependencies = [ "reddsa", "serde", "subtle", + "tracing", "zcash_note_encryption 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -6026,7 +6097,7 @@ dependencies = [ [[package]] name = "zcash_encoding" version = "0.1.0" -source = "git+https://github.com/ZcashFoundation/librustzcash.git?rev=6d75718076e592a41b6bd6ec916dc15420e4cc3c#6d75718076e592a41b6bd6ec916dc15420e4cc3c" +source = "git+https://github.com/ZcashFoundation/librustzcash.git?rev=4567a37ceccbd506a58aaaded39ba14c952c1510#4567a37ceccbd506a58aaaded39ba14c952c1510" dependencies = [ "byteorder", "nonempty", @@ -6058,7 +6129,7 @@ dependencies = [ [[package]] name = "zcash_note_encryption" version = "0.1.0" -source = "git+https://github.com/ZcashFoundation/librustzcash.git?rev=6d75718076e592a41b6bd6ec916dc15420e4cc3c#6d75718076e592a41b6bd6ec916dc15420e4cc3c" +source = "git+https://github.com/ZcashFoundation/librustzcash.git?rev=4567a37ceccbd506a58aaaded39ba14c952c1510#4567a37ceccbd506a58aaaded39ba14c952c1510" dependencies = [ "chacha20", "chacha20poly1305", @@ -6081,7 +6152,7 @@ dependencies = [ "bs58", "byteorder", "chacha20poly1305", - "equihash 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "equihash 0.1.0", "ff", "fpe", "group", @@ -6092,7 +6163,7 @@ dependencies = [ "lazy_static", "memuse", "nonempty", - "orchard", + "orchard 0.1.0", "rand 0.8.5", "rand_core 0.6.3", "ripemd", @@ -6105,8 +6176,45 @@ dependencies = [ [[package]] name = "zcash_primitives" -version = "0.6.0" -source = "git+https://github.com/ZcashFoundation/librustzcash.git?rev=6d75718076e592a41b6bd6ec916dc15420e4cc3c#6d75718076e592a41b6bd6ec916dc15420e4cc3c" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fbb401f5dbc482b831954aaa7cba0a8fe148241db6d19fe7cebda78252ca680" +dependencies = [ + "aes", + "bip0039", + "bitvec", + "blake2b_simd 1.0.0", + "blake2s_simd", + "bls12_381", + "bs58", + "byteorder", + "chacha20poly1305", + "equihash 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ff", + "fpe", + "group", + "hdwallet", + "hex", + "incrementalmerkletree", + "jubjub", + "lazy_static", + "memuse", + "nonempty", + "orchard 0.2.0", + "rand 0.8.5", + "rand_core 0.6.3", + "ripemd", + "secp256k1", + "sha2", + "subtle", + "zcash_encoding 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_note_encryption 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "zcash_primitives" +version = "0.7.0" +source = "git+https://github.com/ZcashFoundation/librustzcash.git?rev=4567a37ceccbd506a58aaaded39ba14c952c1510#4567a37ceccbd506a58aaaded39ba14c952c1510" dependencies = [ "aes", "bip0039", @@ -6116,7 +6224,7 @@ dependencies = [ "bls12_381", "byteorder", "chacha20poly1305", - "equihash 0.1.0 (git+https://github.com/ZcashFoundation/librustzcash.git?rev=6d75718076e592a41b6bd6ec916dc15420e4cc3c)", + "equihash 0.2.0 (git+https://github.com/ZcashFoundation/librustzcash.git?rev=4567a37ceccbd506a58aaaded39ba14c952c1510)", "ff", "fpe", "group", @@ -6126,19 +6234,19 @@ dependencies = [ "lazy_static", "memuse", "nonempty", - "orchard", + "orchard 0.2.0", "rand 0.8.5", "rand_core 0.6.3", "sha2", "subtle", - "zcash_encoding 0.1.0 (git+https://github.com/ZcashFoundation/librustzcash.git?rev=6d75718076e592a41b6bd6ec916dc15420e4cc3c)", - "zcash_note_encryption 0.1.0 (git+https://github.com/ZcashFoundation/librustzcash.git?rev=6d75718076e592a41b6bd6ec916dc15420e4cc3c)", + "zcash_encoding 0.1.0 (git+https://github.com/ZcashFoundation/librustzcash.git?rev=4567a37ceccbd506a58aaaded39ba14c952c1510)", + "zcash_note_encryption 0.1.0 (git+https://github.com/ZcashFoundation/librustzcash.git?rev=4567a37ceccbd506a58aaaded39ba14c952c1510)", ] [[package]] name = "zcash_proofs" -version = "0.6.0" -source = "git+https://github.com/ZcashFoundation/librustzcash.git?rev=6d75718076e592a41b6bd6ec916dc15420e4cc3c#6d75718076e592a41b6bd6ec916dc15420e4cc3c" +version = "0.7.0" +source = "git+https://github.com/ZcashFoundation/librustzcash.git?rev=4567a37ceccbd506a58aaaded39ba14c952c1510#4567a37ceccbd506a58aaaded39ba14c952c1510" dependencies = [ "bellman", "blake2b_simd 1.0.0", @@ -6151,7 +6259,7 @@ dependencies = [ "lazy_static", "minreq", "rand_core 0.6.3", - "zcash_primitives 0.6.0 (git+https://github.com/ZcashFoundation/librustzcash.git?rev=6d75718076e592a41b6bd6ec916dc15420e4cc3c)", + "zcash_primitives 0.7.0 (git+https://github.com/ZcashFoundation/librustzcash.git?rev=4567a37ceccbd506a58aaaded39ba14c952c1510)", ] [[package]] @@ -6165,12 +6273,12 @@ dependencies = [ "cc", "libc", "memuse", - "orchard", + "orchard 0.1.0", "rand_core 0.6.3", "tracing", "zcash_encoding 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "zcash_note_encryption 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_primitives 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_primitives 0.6.0", ] [[package]] @@ -6191,17 +6299,17 @@ dependencies = [ "criterion", "displaydoc", "ed25519-zebra", - "equihash 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "equihash 0.1.0", "fpe", "futures", "group", - "halo2_proofs", + "halo2_proofs 0.2.0", "hex", "incrementalmerkletree", "itertools", "jubjub", "lazy_static", - "orchard", + "orchard 0.2.0", "primitive-types", "proptest", "proptest-derive", @@ -6226,7 +6334,7 @@ dependencies = [ "zcash_encoding 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "zcash_history", "zcash_note_encryption 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_primitives 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_primitives 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "zebra-test", ] @@ -6247,13 +6355,13 @@ dependencies = [ "displaydoc", "futures", "futures-util", - "halo2_proofs", + "halo2_proofs 0.2.0", "hex", "jubjub", "lazy_static", "metrics", "once_cell", - "orchard", + "orchard 0.2.0", "proptest", "proptest-derive", "rand 0.7.3", @@ -6372,7 +6480,7 @@ dependencies = [ "dirs", "displaydoc", "futures", - "halo2_proofs", + "halo2_proofs 0.2.0", "hex", "insta", "itertools", diff --git a/Cargo.toml b/Cargo.toml index 9f89983a57e..e2958c15afe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -66,4 +66,4 @@ lto = "thin" [patch.crates-io] # Currently pointing to `download-sprout-params` branch. -zcash_proofs = { git = "https://github.com/ZcashFoundation/librustzcash.git", rev = "6d75718076e592a41b6bd6ec916dc15420e4cc3c" } +zcash_proofs = { git = "https://github.com/ZcashFoundation/librustzcash.git", rev = "4567a37ceccbd506a58aaaded39ba14c952c1510" } diff --git a/deny.toml b/deny.toml index cdacef6c535..59f86626e8e 100644 --- a/deny.toml +++ b/deny.toml @@ -26,10 +26,16 @@ allow = [ skip = [ # wait for zcash_proofs fork be merged back into upstream # https://github.com/ZcashFoundation/zebra/issues/3831 - { name = "equihash", version = "=0.1.0" }, + { name = "equihash", version = "=0.2.0" }, { name = "zcash_encoding", version = "=0.1.0" }, { name = "zcash_note_encryption", version = "=0.1.0" }, - { name = "zcash_primitives", version = "=0.6.0" }, + { name = "zcash_primitives", version = "=0.7.0" }, + + # wait until zcash updates its halo2, orchard, etc. dependencies + # (which is likely to happen in the release after 5.0.0) + { name = "halo2_gadgets", version = "=0.1.0" }, + { name = "halo2_proofs", version = "=0.1.0" }, + { name = "orchard", version = "=0.1.0" }, ] # Similarly to `skip` allows you to skip certain crates during duplicate # detection. Unlike skip, it also includes the entire tree of transitive diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 6be5f905545..3c0aa427efe 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -27,7 +27,7 @@ displaydoc = "0.2.3" fpe = "0.5.1" futures = "0.3.21" group = "0.12.0" -halo2 = { package = "halo2_proofs", version = "0.1.0" } +halo2 = { package = "halo2_proofs", version = "0.2.0" } hex = { version = "0.4.3", features = ["serde"] } incrementalmerkletree = "0.3.0" itertools = "0.10.3" @@ -49,11 +49,11 @@ thiserror = "1.0.31" uint = "0.9.1" x25519-dalek = { version = "1.2.0", features = ["serde"] } -orchard = "0.1.0" +orchard = "0.2.0" equihash = "0.1.0" zcash_note_encryption = "0.1" -zcash_primitives = { version = "0.6.0", features = ["transparent-inputs"] } +zcash_primitives = { version = "0.7.0", features = ["transparent-inputs"] } zcash_encoding = "0.1.0" zcash_history = "0.3.0" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index ebdd253a51f..80c8c1a56d7 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -16,7 +16,7 @@ bls12_381 = "0.7.0" jubjub = "0.9.0" rand = { version = "0.8.5", package = "rand" } -halo2 = { package = "halo2_proofs", version = "0.1.0" } +halo2 = { package = "halo2_proofs", version = "0.2.0" } chrono = "0.4.19" dirs = "4.0.0" @@ -34,11 +34,11 @@ tower = { version = "0.4.13", features = ["timeout", "util", "buffer"] } tracing = "0.1.31" tracing-futures = "0.2.5" -orchard = "0.1.0" +orchard = "0.2.0" # TODO: replace with upstream librustzcash when these changes are merged (#3037) # Currently pointing to `download-sprout-params` branch. -zcash_proofs = { git = "https://github.com/ZcashFoundation/librustzcash.git", rev = "6d75718076e592a41b6bd6ec916dc15420e4cc3c", features = ["local-prover", "multicore", "download-params"] } +zcash_proofs = { git = "https://github.com/ZcashFoundation/librustzcash.git", rev = "4567a37ceccbd506a58aaaded39ba14c952c1510", features = ["local-prover", "multicore", "download-params"] } tower-fallback = { path = "../tower-fallback/" } tower-batch = { path = "../tower-batch/" } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 633c0b8bda9..56bde71341c 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -46,7 +46,7 @@ insta = { version = "1.15.0", features = ["ron"] } proptest = "0.10.1" proptest-derive = "0.3.0" -halo2 = { package = "halo2_proofs", version = "0.1.0" } +halo2 = { package = "halo2_proofs", version = "0.2.0" } jubjub = "0.9.0" tokio = { version = "1.19.2", features = ["full", "tracing", "test-util"] } From cbd703b3fc3e3b593fb28d21d7cbaeade12da0b4 Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 28 Jun 2022 10:36:18 +1000 Subject: [PATCH 35/91] refactor(ci): Split `docker run` into launch, `logs`, and `wait` (#4690) * Put arguments to "docker run" on different lines And update some comments. * Split docker run into launch, logs, and wait * Remove mistaken "needs state" condition on log and results job * Exit the ssh and the job with the container test's exit status --- .github/workflows/deploy-gcp-tests.yml | 164 +++++++++++++++++++++---- 1 file changed, 143 insertions(+), 21 deletions(-) diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index 7a57cd8796d..e6ae8ed59c7 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -81,7 +81,7 @@ env: MACHINE_TYPE: c2d-standard-16 jobs: - # set up the test without any cached state + # set up the test, if it doesn't use any cached state # each test runs one of the *-with/without-cached-state job series, and skips the other setup-without-cached-state: name: Setup ${{ inputs.test_id }} test @@ -146,11 +146,12 @@ jobs: ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ " - test-without-cached-state: - name: Run ${{ inputs.test_id }} test + # launch the test, if it doesn't use any cached state + launch-without-cached-state: + name: Launch ${{ inputs.test_id }} test needs: [ setup-without-cached-state ] - # if the previous step fails, we also want to run and fail this step, - # so that the branch protection rule fails in Mergify and GitHub + # If the previous job fails, we also want to run and fail this job, + # so that the branch protection rule fails in Mergify and GitHub. if: ${{ !cancelled() && !inputs.needs_zebra_state }} runs-on: ubuntu-latest permissions: @@ -180,7 +181,8 @@ jobs: service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' - - name: Run ${{ inputs.test_id }} test + # Launch the test without any cached state + - name: Launch ${{ inputs.test_id }} test run: | gcloud compute ssh \ ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ @@ -189,12 +191,17 @@ jobs: --ssh-flag="-o ServerAliveInterval=5" \ --command \ "\ - docker run ${{ inputs.test_variables }} -t --name ${{ inputs.test_id }} \ + docker run \ + --name ${{ inputs.test_id }} \ + --tty \ + --detach \ + ${{ inputs.test_variables }} \ --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ ${{ env.GAR_BASE }}/${{ env.IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ " - # set up the test using cached state + + # set up the test, if it uses cached state # each test runs one of the *-with/without-cached-state job series, and skips the other setup-with-cached-state: name: Setup ${{ inputs.test_id }} test @@ -313,11 +320,12 @@ jobs: ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ " - test-with-cached-state: - name: Run ${{ inputs.test_id }} test + # launch the test, if it uses cached state + launch-with-cached-state: + name: Launch ${{ inputs.test_id }} test needs: [ setup-with-cached-state ] - # if the previous step fails, we also want to run and fail this step, - # so that the branch protection rule fails in Mergify and GitHub + # If the previous job fails, we also want to run and fail this job, + # so that the branch protection rule fails in Mergify and GitHub. if: ${{ !cancelled() && inputs.needs_zebra_state }} runs-on: ubuntu-latest permissions: @@ -348,8 +356,8 @@ jobs: service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' - # Run the test with the previously created Zebra-only cached state. - # Each test runs one of the "Run test" steps, and skips the other. + # Launch the test with the previously created Zebra-only cached state. + # Each test runs one of the "Launch test" steps, and skips the other. # # SSH into the just created VM, and create a Docker container to run the incoming test # from ${{ inputs.test_id }}, then mount the docker volume created in the previous job. @@ -364,7 +372,7 @@ jobs: # Although we're mounting the disk root, Zebra will only respect the values from # $ZEBRA_CACHED_STATE_DIR. The inputs like ${{ inputs.zebra_state_dir }} are only used # to match that variable paths. - - name: Run ${{ inputs.test_id }} test + - name: Launch ${{ inputs.test_id }} test # This step only runs for tests that just read or write a Zebra state. # # lightwalletd-full-sync reads Zebra and writes lwd, so it is handled specially. @@ -378,13 +386,17 @@ jobs: --ssh-flag="-o ServerAliveInterval=5" \ --command \ "\ - docker run ${{ inputs.test_variables }} -t --name ${{ inputs.test_id }} \ + docker run \ + --name ${{ inputs.test_id }} \ + --tty \ + --detach \ + ${{ inputs.test_variables }} \ --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ ${{ env.GAR_BASE }}/${{ env.IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ " - # Run the test with the previously created Lightwalletd and Zebra cached state. - # Each test runs one of the "Run test" steps, and skips the other. + # Launch the test with the previously created Lightwalletd and Zebra cached state. + # Each test runs one of the "Launch test" steps, and skips the other. # # SSH into the just created VM, and create a Docker container to run the incoming test # from ${{ inputs.test_id }}, then mount the docker volume created in the previous job. @@ -408,7 +420,7 @@ jobs: # Although we're mounting the disk root to both directories, Zebra and Lightwalletd # will only respect the values from $ZEBRA_CACHED_STATE_DIR and $LIGHTWALLETD_DATA_DIR, # the inputs like ${{ inputs.lwd_state_dir }} are only used to match those variables paths. - - name: Run ${{ inputs.test_id }} test + - name: Launch ${{ inputs.test_id }} test # This step only runs for tests that read or write Lightwalletd and Zebra states. # # lightwalletd-full-sync reads Zebra and writes lwd, so it is handled specially. @@ -422,20 +434,130 @@ jobs: --ssh-flag="-o ServerAliveInterval=5" \ --command \ "\ - docker run ${{ inputs.test_variables }} -t --name ${{ inputs.test_id }} \ + docker run \ + --name ${{ inputs.test_id }} \ + --tty \ + --detach \ + ${{ inputs.test_variables }} \ --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }} \ ${{ env.GAR_BASE }}/${{ env.IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ " + + # follow the logs of the test we just launched + follow-logs: + name: Show logs for ${{ inputs.test_id }} test + needs: [ launch-with-cached-state, launch-without-cached-state ] + # We run exactly one of without-cached-state or with-cached-state, and we always skip the other one. + # If the previous job fails, we also want to run and fail this job, + # so that the branch protection rule fails in Mergify and GitHub. + if: ${{ !cancelled() }} + runs-on: ubuntu-latest + permissions: + contents: 'read' + id-token: 'write' + steps: + - uses: actions/checkout@v3.0.2 + with: + persist-credentials: false + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Downcase network name for disks + run: | + NETWORK_CAPS=${{ inputs.network }} + echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v0.8.0 + with: + workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' + service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' + token_format: 'access_token' + + # Show all the logs since the container launched + - name: Show logs for ${{ inputs.test_id }} test + run: | + gcloud compute ssh \ + ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ env.ZONE }} \ + --quiet \ + --ssh-flag="-o ServerAliveInterval=5" \ + --command \ + "\ + docker logs \ + --tail all \ + --follow \ + ${{ inputs.test_id }} \ + " + + # wait for the result of the test + test-result: + # TODO: update the job name here, and in the branch protection rules + name: Run ${{ inputs.test_id }} test + needs: [ follow-logs ] + # If the previous job fails, we also want to run and fail this job, + # so that the branch protection rule fails in Mergify and GitHub. + if: ${{ !cancelled() }} + runs-on: ubuntu-latest + permissions: + contents: 'read' + id-token: 'write' + steps: + - uses: actions/checkout@v3.0.2 + with: + persist-credentials: false + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Downcase network name for disks + run: | + NETWORK_CAPS=${{ inputs.network }} + echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v0.8.0 + with: + workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' + service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' + token_format: 'access_token' + + # Wait for the container to finish, then exit with the test's exit status. + # + # `docker wait` prints the container exit status as a string, but we need to exit `ssh` with that status. + # `docker wait` can also wait for multiple containers, but we only ever wait for a single container. + - name: Result of ${{ inputs.test_id }} test + run: | + gcloud compute ssh \ + ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ env.ZONE }} \ + --quiet \ + --ssh-flag="-o ServerAliveInterval=5" \ + --command \ + "\ + exit $(docker wait ${{ inputs.test_id }}) \ + " + + # create a state image from the instance's state disk, if requested by the caller create-state-image: name: Create ${{ inputs.test_id }} cached state image runs-on: ubuntu-latest + needs: [ test-result ] # We run exactly one of without-cached-state or with-cached-state, and we always skip the other one. # Normally, if a job is skipped, all the jobs that depend on it are also skipped. # So we need to override the default success() check to make this job run. - needs: [ test-without-cached-state, test-with-cached-state ] if: ${{ !cancelled() && !failure() && inputs.saves_to_disk }} permissions: contents: 'read' From 3b839b7976f8ce286c0bd2fddff4dede39911aef Mon Sep 17 00:00:00 2001 From: Marek Date: Tue, 28 Jun 2022 02:36:36 +0200 Subject: [PATCH 36/91] Warn the user when Zebra doesn't recognize the format of `zebrad.toml` (#4689) * Warn the user when Zebra cannot parse `zebrad.toml` * Test that Zebra warns the user when it cannot parse `zebrad.toml` * Fix up a mistaken merge change * Suggest how to fix `zebrad.toml` when Zebra cannot parse it Co-authored-by: teor --- zebrad/src/application.rs | 16 +++++-- zebrad/tests/acceptance.rs | 94 ++++++++++++++++++++++++++++++++++---- 2 files changed, 95 insertions(+), 15 deletions(-) diff --git a/zebrad/src/application.rs b/zebrad/src/application.rs index 494367216f0..d2a1380693e 100644 --- a/zebrad/src/application.rs +++ b/zebrad/src/application.rs @@ -5,6 +5,7 @@ use std::{fmt::Write as _, io::Write as _, process}; use abscissa_core::{ application::{self, fatal_error, AppCell}, config::{self, Configurable}, + status_err, terminal::{component::Terminal, stderr, stdout, ColorChoice}, Application, Component, EntryPoint, FrameworkError, Shutdown, StandardPaths, Version, }; @@ -202,11 +203,16 @@ impl Application for ZebradApp { // Load config *after* framework components so that we can // report an error to the terminal if it occurs. - let config = command - .config_path() - .map(|path| self.load_config(&path)) - .transpose()? - .unwrap_or_default(); + let config = match command.config_path() { + Some(path) => match self.load_config(&path) { + Ok(config) => config, + Err(e) => { + status_err!("Zebra could not parse the provided config file. This might mean you are using a deprecated format of the file. You can generate a valid config by running \"zebrad generate\", and diff it against yours to examine any format inconsistencies."); + return Err(e); + } + }, + None => ZebradConfig::default(), + }; let config = command.process_config(config)?; diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index d9864707dad..d8a4509c92c 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -103,10 +103,10 @@ //! //! Please refer to the documentation of each test for more information. -use std::{collections::HashSet, env, path::PathBuf}; +use std::{collections::HashSet, env, fs, path::PathBuf, time::Duration}; use color_eyre::{ - eyre::{Result, WrapErr}, + eyre::{eyre, Result, WrapErr}, Help, }; @@ -355,7 +355,6 @@ fn misconfigured_ephemeral_missing_directory() -> Result<()> { } fn ephemeral(cache_dir_config: EphemeralConfig, cache_dir_check: EphemeralCheck) -> Result<()> { - use std::fs; use std::io::ErrorKind; zebra_test::init(); @@ -502,19 +501,25 @@ fn version_args() -> Result<()> { Ok(()) } +/// Run config tests that use the default ports and paths. +/// +/// Unlike the other tests, these tests can not be run in parallel, because +/// they use the generated config. So parallel execution can cause port and +/// cache conflicts. #[test] -fn valid_generated_config_test() -> Result<()> { - // Unlike the other tests, these tests can not be run in parallel, because - // they use the generated config. So parallel execution can cause port and - // cache conflicts. +fn config_test() -> Result<()> { valid_generated_config("start", "Starting zebrad")?; - // Check that the stored configuration we have for Zebra works + // Check what happens when Zebra parses an invalid config + invalid_generated_config()?; + + // Check that an older stored configuration we have for Zebra works stored_config_works()?; Ok(()) } +/// Test that `zebrad start` can parse the output from `zebrad generate`. fn valid_generated_config(command: &str, expect_stdout_line_contains: &str) -> Result<()> { zebra_test::init(); @@ -564,6 +569,77 @@ fn valid_generated_config(command: &str, expect_stdout_line_contains: &str) -> R Ok(()) } +/// Checks that Zebra prints an informative message when it cannot parse the +/// config file. +fn invalid_generated_config() -> Result<()> { + zebra_test::init(); + + let testdir = &testdir()?; + + // Add a config file name to tempdir path. + let config_path = testdir.path().join("zebrad.toml"); + + // Generate a valid config file in the temp dir. + let child = testdir.spawn_child(args!["generate", "-o": config_path.to_str().unwrap()])?; + + let output = child.wait_with_output()?; + let output = output.assert_success()?; + + assert_with_context!( + config_path.exists(), + &output, + "generated config file not found" + ); + + // Load the valid config file that Zebra generated. + let mut config_file = fs::read_to_string(config_path.to_str().unwrap()).unwrap(); + + // Let's now alter the config file so that it contains a deprecated format + // of `mempool.eviction_memory_time`. + + config_file = config_file + .lines() + // Remove the valid `eviction_memory_time` key/value pair from the + // config. + .filter(|line| !line.contains("eviction_memory_time")) + .map(|line| line.to_owned() + "\n") + .collect(); + + // Append the `eviction_memory_time` key/value pair in a deprecated format. + config_file += r" + + [mempool.eviction_memory_time] + nanos = 0 + secs = 3600 + "; + + // Write the altered config file so that Zebra can pick it up. + fs::write(config_path.to_str().unwrap(), config_file.as_bytes()) + .expect("Could not write the altered config file."); + + // Run Zebra in a temp dir so that it loads the config. + let mut child = testdir.spawn_child(args!["start"])?; + + // Return an error if Zebra is running for more than two seconds. + // + // Since the config is invalid, Zebra should terminate instantly after its + // start. Two seconds should be sufficient for Zebra to read the config file + // and terminate. + std::thread::sleep(Duration::from_secs(2)); + if child.is_running() { + child.kill()?; + return Err(eyre!("Zebra should not be running anymore.")); + } + + let output = child.wait_with_output()?; + + // Check that Zebra produced an informative message. + output.stderr_contains("Zebra could not parse the provided config file. This might mean you are using a deprecated format of the file.")?; + + Ok(()) +} + +/// Test that an older `zebrad.toml` can still be parsed by the latest `zebrad`. fn stored_config_works() -> Result<()> { let stored_config_path = stored_config_path(); let run_dir = testdir()?; @@ -1596,8 +1672,6 @@ where // See #1781. #[cfg(target_os = "linux")] if node2.is_running() { - use color_eyre::eyre::eyre; - return node2 .kill_on_error::<(), _>(Err(eyre!( "conflicted node2 was still running, but the test expected a panic" From d4b9353d67fa8ad0d1a703cc976d981b04b09fef Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 28 Jun 2022 12:51:41 +1000 Subject: [PATCH 37/91] feat(log): Show the current network upgrade in progress logs (#4694) * Improve time logging using humantime * Only log full seconds, ignore the fractional part * Move humantime_seconds to tracing::fmt * Move the progress task to its own module * Add missing humantime dependency * Log the network upgrade in progress logs * Log when Zebra verifies the final checkpoint --- Cargo.lock | 1 + zebra-consensus/src/checkpoint.rs | 5 + zebrad/Cargo.toml | 1 + zebrad/src/commands/start.rs | 246 +---------------------- zebrad/src/components/sync.rs | 2 + zebrad/src/components/sync/progress.rs | 259 +++++++++++++++++++++++++ zebrad/src/components/tracing.rs | 2 + zebrad/src/components/tracing/fmt.rs | 15 ++ 8 files changed, 288 insertions(+), 243 deletions(-) create mode 100644 zebrad/src/components/sync/progress.rs create mode 100644 zebrad/src/components/tracing/fmt.rs diff --git a/Cargo.lock b/Cargo.lock index 71e9f55b68a..fe6b474da25 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6558,6 +6558,7 @@ dependencies = [ "futures", "gumdrop", "hex", + "humantime", "humantime-serde", "hyper", "indexmap", diff --git a/zebra-consensus/src/checkpoint.rs b/zebra-consensus/src/checkpoint.rs index 4de26b25053..30424bac6cf 100644 --- a/zebra-consensus/src/checkpoint.rs +++ b/zebra-consensus/src/checkpoint.rs @@ -464,6 +464,11 @@ where if verified_height == self.checkpoint_list.max_height() { metrics::gauge!("checkpoint.verified.height", verified_height.0 as f64); self.verifier_progress = FinalCheckpoint; + + tracing::info!( + final_checkpoint_height = ?verified_height, + "verified final checkpoint: starting full validation", + ); } else if self.checkpoint_list.contains(verified_height) { metrics::gauge!("checkpoint.verified.height", verified_height.0 as f64); self.verifier_progress = PreviousCheckpoint(verified_height); diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 1d1431d6d0b..473c3454d03 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -78,6 +78,7 @@ zebra-state = { path = "../zebra-state" } abscissa_core = "0.5" gumdrop = "0.7" chrono = "0.4.19" +humantime = "2.1.0" humantime-serde = "1.1.1" indexmap = "1.9.1" lazy_static = "1.4.0" diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index ff1d592e6a7..d4b48d4ee37 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -67,24 +67,15 @@ //! //! Some of the diagnostic features are optional, and need to be enabled at compile-time. -use std::{cmp::max, ops::Add, time::Duration}; +use std::cmp::max; use abscissa_core::{config, Command, FrameworkError, Options, Runnable}; -use chrono::Utc; use color_eyre::eyre::{eyre, Report}; use futures::FutureExt; -use num_integer::div_ceil; use tokio::{pin, select, sync::oneshot}; use tower::{builder::ServiceBuilder, util::BoxService}; use tracing_futures::Instrument; -use zebra_chain::{ - block::Height, - chain_tip::ChainTip, - parameters::{Network, NetworkUpgrade, POST_BLOSSOM_POW_TARGET_SPACING}, -}; -use zebra_consensus::CheckpointList; - use zebra_rpc::server::RpcServer; use crate::{ @@ -92,7 +83,7 @@ use crate::{ components::{ inbound::{self, InboundSetupData}, mempool::{self, Mempool}, - sync::{self, SyncStatus}, + sync::{self, show_block_chain_progress}, tokio::{RuntimeRun, TokioComponent}, ChainSync, Inbound, }, @@ -216,7 +207,7 @@ impl StartCmd { ); let progress_task_handle = tokio::spawn( - Self::update_progress(config.network.network, latest_chain_tip, sync_status) + show_block_chain_progress(config.network.network, latest_chain_tip, sync_status) .in_current_span(), ); @@ -361,237 +352,6 @@ impl StartCmd { ), ) } - - /// Logs Zebra's estimated progress towards the chain tip. - async fn update_progress( - network: Network, - latest_chain_tip: impl ChainTip, - sync_status: SyncStatus, - ) { - // The amount of time between progress logs. - const LOG_INTERVAL: Duration = Duration::from_secs(60); - - // The number of blocks we consider to be close to the tip. - // - // Most chain forks are 1-7 blocks long. - const MAX_CLOSE_TO_TIP_BLOCKS: i32 = 1; - - // Skip slow sync warnings when we are this close to the tip. - // - // In testing, we've seen warnings around 30 blocks. - // - // TODO: replace with `MAX_CLOSE_TO_TIP_BLOCKS` after fixing slow syncing near tip (#3375) - const MIN_SYNC_WARNING_BLOCKS: i32 = 60; - - // The number of fractional digits in sync percentages. - const SYNC_PERCENT_FRAC_DIGITS: usize = 3; - - // The minimum number of extra blocks mined between updating a checkpoint list, - // and running an automated test that depends on that list. - // - // Makes sure that the block finalization code always runs in sync tests, - // even if the miner or test node clock is wrong by a few minutes. - // - // This is an estimate based on the time it takes to: - // - get the tip height from `zcashd`, - // - run `zebra-checkpoints` to update the checkpoint list, - // - submit a pull request, and - // - run a CI test that logs progress based on the new checkpoint height. - // - // We might add tests that sync from a cached tip state, - // so we only allow a few extra blocks here. - const MIN_BLOCKS_MINED_AFTER_CHECKPOINT_UPDATE: i32 = 10; - - // The minimum number of extra blocks after the highest checkpoint, based on: - // - the non-finalized state limit, and - // - the minimum number of extra blocks mined between a checkpoint update, - // and the automated tests for that update. - let min_after_checkpoint_blocks = i32::try_from(zebra_state::MAX_BLOCK_REORG_HEIGHT) - .expect("constant fits in i32") - + MIN_BLOCKS_MINED_AFTER_CHECKPOINT_UPDATE; - - // The minimum height of the valid best chain, based on: - // - the hard-coded checkpoint height, - // - the minimum number of blocks after the highest checkpoint. - let after_checkpoint_height = CheckpointList::new(network) - .max_height() - .add(min_after_checkpoint_blocks) - .expect("hard-coded checkpoint height is far below Height::MAX"); - - let target_block_spacing = NetworkUpgrade::target_spacing_for_height(network, Height::MAX); - let max_block_spacing = - NetworkUpgrade::minimum_difficulty_spacing_for_height(network, Height::MAX); - - // We expect the state height to increase at least once in this interval. - // - // Most chain forks are 1-7 blocks long. - // - // TODO: remove the target_block_spacing multiplier, - // after fixing slow syncing near tip (#3375) - let min_state_block_interval = max_block_spacing.unwrap_or(target_block_spacing * 4) * 2; - - // Formatted string for logging. - let max_block_spacing = max_block_spacing - .map(|duration| duration.to_string()) - .unwrap_or_else(|| "None".to_string()); - - // The last time we downloaded and verified at least one block. - // - // Initialized to the start time to simplify the code. - let mut last_state_change_time = Utc::now(); - - // The state tip height, when we last downloaded and verified at least one block. - // - // Initialized to the genesis height to simplify the code. - let mut last_state_change_height = Height(0); - - loop { - let now = Utc::now(); - let is_syncer_stopped = sync_status.is_close_to_tip(); - - if let Some(estimated_height) = - latest_chain_tip.estimate_network_chain_tip_height(network, now) - { - // The estimate/actual race doesn't matter here, - // because we're only using it for metrics and logging. - let current_height = latest_chain_tip - .best_tip_height() - .expect("unexpected empty state: estimate requires a block height"); - - // Work out the sync progress towards the estimated tip. - let sync_progress = f64::from(current_height.0) / f64::from(estimated_height.0); - let sync_percent = format!( - "{:.frac$} %", - sync_progress * 100.0, - frac = SYNC_PERCENT_FRAC_DIGITS, - ); - - let remaining_sync_blocks = estimated_height - current_height; - - // Work out how long it has been since the state height has increased. - // - // Non-finalized forks can decrease the height, we only want to track increases. - if current_height > last_state_change_height { - last_state_change_height = current_height; - last_state_change_time = now; - } - - let time_since_last_state_block = last_state_change_time.signed_duration_since(now); - - // TODO: - // - log progress, remaining blocks, and remaining time to next network upgrade - // - add some of this info to the metrics - - if time_since_last_state_block > min_state_block_interval { - // The state tip height hasn't increased for a long time. - // - // Block verification can fail if the local node's clock is wrong. - warn!( - %sync_percent, - ?current_height, - %time_since_last_state_block, - %target_block_spacing, - %max_block_spacing, - ?is_syncer_stopped, - "chain updates have stalled, \ - state height has not increased for {} minutes. \ - Hint: check your network connection, \ - and your computer clock and time zone", - time_since_last_state_block.num_minutes(), - ); - } else if is_syncer_stopped && remaining_sync_blocks > MIN_SYNC_WARNING_BLOCKS { - // We've stopped syncing blocks, but we estimate we're a long way from the tip. - // - // TODO: warn after fixing slow syncing near tip (#3375) - info!( - %sync_percent, - ?current_height, - ?remaining_sync_blocks, - ?after_checkpoint_height, - %time_since_last_state_block, - "initial sync is very slow, or estimated tip is wrong. \ - Hint: check your network connection, \ - and your computer clock and time zone", - ); - } else if is_syncer_stopped && current_height <= after_checkpoint_height { - // We've stopped syncing blocks, - // but we're below the minimum height estimated from our checkpoints. - let min_minutes_after_checkpoint_update: i64 = div_ceil( - i64::from(MIN_BLOCKS_MINED_AFTER_CHECKPOINT_UPDATE) - * POST_BLOSSOM_POW_TARGET_SPACING, - 60, - ); - - warn!( - %sync_percent, - ?current_height, - ?remaining_sync_blocks, - ?after_checkpoint_height, - %time_since_last_state_block, - "initial sync is very slow, and state is below the highest checkpoint. \ - Hint: check your network connection, \ - and your computer clock and time zone. \ - Dev Hint: were the checkpoints updated in the last {} minutes?", - min_minutes_after_checkpoint_update, - ); - } else if is_syncer_stopped { - // We've stayed near the tip for a while, and we've stopped syncing lots of blocks. - // So we're mostly using gossiped blocks now. - info!( - %sync_percent, - ?current_height, - ?remaining_sync_blocks, - %time_since_last_state_block, - "finished initial sync to chain tip, using gossiped blocks", - ); - } else if remaining_sync_blocks <= MAX_CLOSE_TO_TIP_BLOCKS { - // We estimate we're near the tip, but we have been syncing lots of blocks recently. - // We might also be using some gossiped blocks. - info!( - %sync_percent, - ?current_height, - ?remaining_sync_blocks, - %time_since_last_state_block, - "close to finishing initial sync, \ - confirming using syncer and gossiped blocks", - ); - } else { - // We estimate we're far from the tip, and we've been syncing lots of blocks. - info!( - %sync_percent, - ?current_height, - ?remaining_sync_blocks, - %time_since_last_state_block, - "estimated progress to chain tip", - ); - } - } else { - let sync_percent = format!("{:.frac$} %", 0.0f64, frac = SYNC_PERCENT_FRAC_DIGITS,); - - if is_syncer_stopped { - // We've stopped syncing blocks, - // but we haven't downloaded and verified the genesis block. - warn!( - %sync_percent, - current_height = %"None", - "initial sync can't download and verify the genesis block. \ - Hint: check your network connection, \ - and your computer clock and time zone", - ); - } else { - // We're waiting for the genesis block to be committed to the state, - // before we can estimate the best chain tip. - info!( - %sync_percent, - current_height = %"None", - "initial sync is waiting to download the genesis block", - ); - } - } - - tokio::time::sleep(LOG_INTERVAL).await; - } - } } impl Runnable for StartCmd { diff --git a/zebrad/src/components/sync.rs b/zebrad/src/components/sync.rs index 7ab85cf6812..0d96276d626 100644 --- a/zebrad/src/components/sync.rs +++ b/zebrad/src/components/sync.rs @@ -30,6 +30,7 @@ use crate::{ mod downloads; mod gossip; +mod progress; mod recent_sync_lengths; mod status; @@ -39,6 +40,7 @@ mod tests; use downloads::{AlwaysHedge, Downloads}; pub use gossip::{gossip_best_tip_block_hashes, BlockGossipError}; +pub use progress::show_block_chain_progress; pub use recent_sync_lengths::RecentSyncLengths; pub use status::SyncStatus; diff --git a/zebrad/src/components/sync/progress.rs b/zebrad/src/components/sync/progress.rs new file mode 100644 index 00000000000..e8240dcc695 --- /dev/null +++ b/zebrad/src/components/sync/progress.rs @@ -0,0 +1,259 @@ +//! Progress tracking for blockchain syncing. + +use std::{ops::Add, time::Duration}; + +use chrono::Utc; +use num_integer::div_ceil; + +use zebra_chain::{ + block::Height, + chain_tip::ChainTip, + parameters::{Network, NetworkUpgrade, POST_BLOSSOM_POW_TARGET_SPACING}, +}; +use zebra_consensus::CheckpointList; + +use crate::components::{sync::SyncStatus, tracing::humantime_seconds}; + +/// The amount of time between progress logs. +const LOG_INTERVAL: Duration = Duration::from_secs(60); + +/// The number of blocks we consider to be close to the tip. +/// +/// Most chain forks are 1-7 blocks long. +const MAX_CLOSE_TO_TIP_BLOCKS: i32 = 1; + +/// Skip slow sync warnings when we are this close to the tip. +/// +/// In testing, we've seen warnings around 30 blocks. +/// +/// TODO: replace with `MAX_CLOSE_TO_TIP_BLOCKS` after fixing slow syncing near tip (#3375) +const MIN_SYNC_WARNING_BLOCKS: i32 = 60; + +/// The number of fractional digits in sync percentages. +const SYNC_PERCENT_FRAC_DIGITS: usize = 3; + +/// The minimum number of extra blocks mined between updating a checkpoint list, +/// and running an automated test that depends on that list. +/// +/// Makes sure that the block finalization code always runs in sync tests, +/// even if the miner or test node clock is wrong by a few minutes. +/// +/// This is an estimate based on the time it takes to: +/// - get the tip height from `zcashd`, +/// - run `zebra-checkpoints` to update the checkpoint list, +/// - submit a pull request, and +/// - run a CI test that logs progress based on the new checkpoint height. +/// +/// We might add tests that sync from a cached tip state, +/// so we only allow a few extra blocks here. +const MIN_BLOCKS_MINED_AFTER_CHECKPOINT_UPDATE: i32 = 10; + +/// Logs Zebra's estimated progress towards the chain tip every minute or so. +/// +/// TODO: +/// - log progress towards, remaining blocks before, and remaining time to next network upgrade +/// - add some progress info to the metrics +pub async fn show_block_chain_progress( + network: Network, + latest_chain_tip: impl ChainTip, + sync_status: SyncStatus, +) { + // The minimum number of extra blocks after the highest checkpoint, based on: + // - the non-finalized state limit, and + // - the minimum number of extra blocks mined between a checkpoint update, + // and the automated tests for that update. + let min_after_checkpoint_blocks = i32::try_from(zebra_state::MAX_BLOCK_REORG_HEIGHT) + .expect("constant fits in i32") + + MIN_BLOCKS_MINED_AFTER_CHECKPOINT_UPDATE; + + // The minimum height of the valid best chain, based on: + // - the hard-coded checkpoint height, + // - the minimum number of blocks after the highest checkpoint. + let after_checkpoint_height = CheckpointList::new(network) + .max_height() + .add(min_after_checkpoint_blocks) + .expect("hard-coded checkpoint height is far below Height::MAX"); + + let target_block_spacing = NetworkUpgrade::target_spacing_for_height(network, Height::MAX); + let max_block_spacing = + NetworkUpgrade::minimum_difficulty_spacing_for_height(network, Height::MAX); + + // We expect the state height to increase at least once in this interval. + // + // Most chain forks are 1-7 blocks long. + // + // TODO: remove the target_block_spacing multiplier, + // after fixing slow syncing near tip (#3375) + let min_state_block_interval = max_block_spacing.unwrap_or(target_block_spacing * 4) * 2; + + // Formatted string for logging. + let max_block_spacing = max_block_spacing + .map(|duration| duration.to_string()) + .unwrap_or_else(|| "None".to_string()); + + // The last time we downloaded and verified at least one block. + // + // Initialized to the start time to simplify the code. + let mut last_state_change_time = Utc::now(); + + // The state tip height, when we last downloaded and verified at least one block. + // + // Initialized to the genesis height to simplify the code. + let mut last_state_change_height = Height(0); + + loop { + let now = Utc::now(); + let is_syncer_stopped = sync_status.is_close_to_tip(); + + if let Some(estimated_height) = + latest_chain_tip.estimate_network_chain_tip_height(network, now) + { + // The estimate/actual race doesn't matter here, + // because we're only using it for metrics and logging. + let current_height = latest_chain_tip + .best_tip_height() + .expect("unexpected empty state: estimate requires a block height"); + let network_upgrade = NetworkUpgrade::current(network, current_height); + + // Work out the sync progress towards the estimated tip. + let sync_progress = f64::from(current_height.0) / f64::from(estimated_height.0); + let sync_percent = format!( + "{:.frac$}%", + sync_progress * 100.0, + frac = SYNC_PERCENT_FRAC_DIGITS, + ); + + let remaining_sync_blocks = estimated_height - current_height; + + // Work out how long it has been since the state height has increased. + // + // Non-finalized forks can decrease the height, we only want to track increases. + if current_height > last_state_change_height { + last_state_change_height = current_height; + last_state_change_time = now; + } + + let time_since_last_state_block_chrono = + now.signed_duration_since(last_state_change_time); + let time_since_last_state_block = humantime_seconds( + time_since_last_state_block_chrono + .to_std() + .unwrap_or_default(), + ); + + if time_since_last_state_block_chrono > min_state_block_interval { + // The state tip height hasn't increased for a long time. + // + // Block verification can fail if the local node's clock is wrong. + warn!( + %sync_percent, + ?current_height, + ?network_upgrade, + %time_since_last_state_block, + %target_block_spacing, + %max_block_spacing, + ?is_syncer_stopped, + "chain updates have stalled, \ + state height has not increased for {} minutes. \ + Hint: check your network connection, \ + and your computer clock and time zone", + time_since_last_state_block_chrono.num_minutes(), + ); + } else if is_syncer_stopped && remaining_sync_blocks > MIN_SYNC_WARNING_BLOCKS { + // We've stopped syncing blocks, but we estimate we're a long way from the tip. + // + // TODO: warn after fixing slow syncing near tip (#3375) + info!( + %sync_percent, + ?current_height, + ?network_upgrade, + ?remaining_sync_blocks, + ?after_checkpoint_height, + %time_since_last_state_block, + "initial sync is very slow, or estimated tip is wrong. \ + Hint: check your network connection, \ + and your computer clock and time zone", + ); + } else if is_syncer_stopped && current_height <= after_checkpoint_height { + // We've stopped syncing blocks, + // but we're below the minimum height estimated from our checkpoints. + let min_minutes_after_checkpoint_update: i64 = div_ceil( + i64::from(MIN_BLOCKS_MINED_AFTER_CHECKPOINT_UPDATE) + * POST_BLOSSOM_POW_TARGET_SPACING, + 60, + ); + + warn!( + %sync_percent, + ?current_height, + ?network_upgrade, + ?remaining_sync_blocks, + ?after_checkpoint_height, + %time_since_last_state_block, + "initial sync is very slow, and state is below the highest checkpoint. \ + Hint: check your network connection, \ + and your computer clock and time zone. \ + Dev Hint: were the checkpoints updated in the last {} minutes?", + min_minutes_after_checkpoint_update, + ); + } else if is_syncer_stopped { + // We've stayed near the tip for a while, and we've stopped syncing lots of blocks. + // So we're mostly using gossiped blocks now. + info!( + %sync_percent, + ?current_height, + ?network_upgrade, + ?remaining_sync_blocks, + %time_since_last_state_block, + "finished initial sync to chain tip, using gossiped blocks", + ); + } else if remaining_sync_blocks <= MAX_CLOSE_TO_TIP_BLOCKS { + // We estimate we're near the tip, but we have been syncing lots of blocks recently. + // We might also be using some gossiped blocks. + info!( + %sync_percent, + ?current_height, + ?network_upgrade, + ?remaining_sync_blocks, + %time_since_last_state_block, + "close to finishing initial sync, \ + confirming using syncer and gossiped blocks", + ); + } else { + // We estimate we're far from the tip, and we've been syncing lots of blocks. + info!( + %sync_percent, + ?current_height, + ?network_upgrade, + ?remaining_sync_blocks, + %time_since_last_state_block, + "estimated progress to chain tip", + ); + } + } else { + let sync_percent = format!("{:.frac$} %", 0.0f64, frac = SYNC_PERCENT_FRAC_DIGITS,); + + if is_syncer_stopped { + // We've stopped syncing blocks, + // but we haven't downloaded and verified the genesis block. + warn!( + %sync_percent, + current_height = %"None", + "initial sync can't download and verify the genesis block. \ + Hint: check your network connection, \ + and your computer clock and time zone", + ); + } else { + // We're waiting for the genesis block to be committed to the state, + // before we can estimate the best chain tip. + info!( + %sync_percent, + current_height = %"None", + "initial sync is waiting to download the genesis block", + ); + } + } + + tokio::time::sleep(LOG_INTERVAL).await; + } +} diff --git a/zebrad/src/components/tracing.rs b/zebrad/src/components/tracing.rs index d78739cba7a..778cc1fc9e8 100644 --- a/zebrad/src/components/tracing.rs +++ b/zebrad/src/components/tracing.rs @@ -2,12 +2,14 @@ mod component; mod endpoint; +mod fmt; #[cfg(feature = "flamegraph")] mod flame; pub use component::Tracing; pub use endpoint::TracingEndpoint; +pub use fmt::humantime_seconds; #[cfg(feature = "flamegraph")] pub use flame::{layer, Grapher}; diff --git a/zebrad/src/components/tracing/fmt.rs b/zebrad/src/components/tracing/fmt.rs new file mode 100644 index 00000000000..e682561822e --- /dev/null +++ b/zebrad/src/components/tracing/fmt.rs @@ -0,0 +1,15 @@ +//! Formatting for traced values. + +use std::time::Duration; + +/// Returns a human-friendly formatted string for `duration.as_secs()`. +pub fn humantime_seconds(duration: impl Into) -> String { + let duration = duration.into(); + + // Truncate fractional seconds. + let duration = Duration::from_secs(duration.as_secs()); + + let duration = humantime::format_duration(duration); + + format!("{}", duration) +} From 37e83e44dffff898eb34706c4be142ac3bf2da93 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jun 2022 04:01:48 +0000 Subject: [PATCH 38/91] build(deps): bump reviewdog/action-actionlint from 1.25.1 to 1.26.0 (#4695) Bumps [reviewdog/action-actionlint](https://github.com/reviewdog/action-actionlint) from 1.25.1 to 1.26.0. - [Release notes](https://github.com/reviewdog/action-actionlint/releases) - [Commits](https://github.com/reviewdog/action-actionlint/compare/v1.25.1...v1.26.0) --- updated-dependencies: - dependency-name: reviewdog/action-actionlint dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 9c389b98136..42e51298993 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -138,7 +138,7 @@ jobs: if: ${{ needs.changed-files.outputs.workflows == 'true' }} steps: - uses: actions/checkout@v3.0.2 - - uses: reviewdog/action-actionlint@v1.25.1 + - uses: reviewdog/action-actionlint@v1.26.0 with: level: warning fail_on_error: false From 54efbe9d2d0003142e0e3a08772722e24eb34127 Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 28 Jun 2022 14:02:07 +1000 Subject: [PATCH 39/91] Improve ZcashSerialize docs (#4693) --- zebra-chain/src/serialization/error.rs | 2 +- .../src/serialization/zcash_deserialize.rs | 18 ++++++++---------- .../src/serialization/zcash_serialize.rs | 11 +++++++---- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/zebra-chain/src/serialization/error.rs b/zebra-chain/src/serialization/error.rs index 4b11b56a064..03002348cf1 100644 --- a/zebra-chain/src/serialization/error.rs +++ b/zebra-chain/src/serialization/error.rs @@ -1,4 +1,4 @@ -//! Errors for transaction serialization. +//! Errors for Zcash consensus-critical serialization. use std::{array::TryFromSliceError, io, num::TryFromIntError, str::Utf8Error}; diff --git a/zebra-chain/src/serialization/zcash_deserialize.rs b/zebra-chain/src/serialization/zcash_deserialize.rs index 833e7bcd693..45b4df061cc 100644 --- a/zebra-chain/src/serialization/zcash_deserialize.rs +++ b/zebra-chain/src/serialization/zcash_deserialize.rs @@ -1,18 +1,16 @@ -use std::{ - convert::{TryFrom, TryInto}, - io, - net::Ipv6Addr, - sync::Arc, -}; +//! Converting bytes into Zcash consensus-critical data structures. + +use std::{io, net::Ipv6Addr, sync::Arc}; use super::{AtLeastOne, CompactSizeMessage, SerializationError, MAX_PROTOCOL_MESSAGE_LEN}; -/// Consensus-critical serialization for Zcash. +/// Consensus-critical deserialization for Zcash. /// /// This trait provides a generic deserialization for consensus-critical -/// formats, such as network messages, transactions, blocks, etc. It is intended -/// for use only in consensus-critical contexts; in other contexts, such as -/// internal storage, it would be preferable to use Serde. +/// formats, such as network messages, transactions, blocks, etc. +/// +/// It is intended for use only for consensus-critical formats. +/// Internal deserialization can freely use `serde`, or any other format. pub trait ZcashDeserialize: Sized { /// Try to read `self` from the given `reader`. /// diff --git a/zebra-chain/src/serialization/zcash_serialize.rs b/zebra-chain/src/serialization/zcash_serialize.rs index 301fe0fbe87..d281b8ab151 100644 --- a/zebra-chain/src/serialization/zcash_serialize.rs +++ b/zebra-chain/src/serialization/zcash_serialize.rs @@ -1,4 +1,6 @@ -use std::{convert::TryInto, io, net::Ipv6Addr}; +//! Converting Zcash consensus-critical data structures into bytes. + +use std::{io, net::Ipv6Addr}; use super::{AtLeastOne, CompactSizeMessage}; @@ -10,9 +12,10 @@ pub const MAX_PROTOCOL_MESSAGE_LEN: usize = 2 * 1024 * 1024; /// Consensus-critical serialization for Zcash. /// /// This trait provides a generic serialization for consensus-critical -/// formats, such as network messages, transactions, blocks, etc. It is intended -/// for use only in consensus-critical contexts; in other contexts, such as -/// internal storage, it would be preferable to use Serde. +/// formats, such as network messages, transactions, blocks, etc. +/// +/// It is intended for use only for consensus-critical formats. +/// Internal serialization can freely use `serde`, or any other format. pub trait ZcashSerialize: Sized { /// Write `self` to the given `writer` using the canonical format. /// From 97fb85dca9d00a717661ea0cbab7ce66b8d23977 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Tue, 28 Jun 2022 03:22:07 -0300 Subject: [PATCH 40/91] lint(clippy): add `unwrap_in_result` lint (#4667) * `unwrap_in_result` in zebra-chain crate * `unwrap_in_result` in zebra-script crate * `unwrap_in_result` in zebra-state crate * `unwrap_in_result` in zebra-consensus crate * `unwrap_in_result` in zebra-test crate * `unwrap_in_result` in zebra-network crate * `unwrap_in_result` in zebra-rpc crate * `unwrap_in_result` in zebrad crate * rustfmt * revert `?` and add exceptions * explain some panics better * move some lint positions * replace a panic with error * Fix rustfmt? Co-authored-by: teor --- .cargo/config.toml | 1 + zebra-chain/src/amount.rs | 1 + zebra-chain/src/block.rs | 1 + zebra-chain/src/block/header.rs | 1 + zebra-chain/src/block/serialize.rs | 1 + zebra-chain/src/history_tree.rs | 4 ++ zebra-chain/src/orchard/commitment.rs | 1 + zebra-chain/src/orchard/keys.rs | 4 +- zebra-chain/src/orchard/tree.rs | 1 + zebra-chain/src/primitives/zcash_history.rs | 3 ++ zebra-chain/src/sapling/address.rs | 11 +++-- zebra-chain/src/sapling/keys.rs | 25 ++++++++--- zebra-chain/src/sapling/tree.rs | 1 + zebra-chain/src/serialization/compact_size.rs | 2 + zebra-chain/src/serialization/date_time.rs | 1 + .../src/serialization/zcash_serialize.rs | 1 + zebra-chain/src/sprout/tree.rs | 1 + zebra-chain/src/transaction.rs | 2 + zebra-chain/src/transaction/lock_time.rs | 2 + zebra-chain/src/transaction/serialize.rs | 3 ++ zebra-chain/src/transaction/txid.rs | 1 + zebra-chain/src/transparent.rs | 1 + zebra-chain/src/value_balance.rs | 2 + zebra-chain/src/work/difficulty.rs | 2 + zebra-chain/src/work/equihash.rs | 1 + zebra-consensus/src/checkpoint.rs | 2 + zebra-network/src/address_book.rs | 1 + zebra-network/src/meta_addr.rs | 2 + zebra-network/src/peer/client.rs | 2 + zebra-network/src/peer/client/tests.rs | 1 + zebra-network/src/peer/error.rs | 2 + zebra-network/src/peer_set/set.rs | 1 + .../src/protocol/external/addr/v2.rs | 1 + zebra-network/src/protocol/external/codec.rs | 1 + .../src/protocol/internal/response.rs | 1 + zebra-rpc/src/methods.rs | 2 + zebra-script/src/lib.rs | 2 + zebra-state/src/service/arbitrary.rs | 1 + zebra-state/src/service/block_iter.rs | 1 + zebra-state/src/service/finalized_state.rs | 1 + .../src/service/finalized_state/disk_db.rs | 1 + .../service/finalized_state/zebra_db/block.rs | 10 +++++ .../service/finalized_state/zebra_db/chain.rs | 2 + .../finalized_state/zebra_db/shielded.rs | 5 +++ .../finalized_state/zebra_db/transparent.rs | 7 ++++ .../src/service/non_finalized_state.rs | 1 + .../src/service/non_finalized_state/chain.rs | 3 ++ .../non_finalized_state/chain/index.rs | 42 +++++++++++++++---- zebra-test/src/command.rs | 9 ++++ zebrad/src/application.rs | 9 +++- zebrad/src/components/mempool/storage.rs | 1 + .../mempool/storage/verified_set.rs | 4 +- zebrad/src/components/tokio.rs | 3 +- zebrad/src/components/tracing/endpoint.rs | 1 + zebrad/tests/common/launch.rs | 1 + zebrad/tests/common/lightwalletd.rs | 1 + 56 files changed, 173 insertions(+), 22 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index f6f39cbb9a1..0d373727220 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -54,6 +54,7 @@ rustflags = [ # Panics "-Wclippy::fallible_impl_from", + "-Wclippy::unwrap_in_result", # TODOs: # `cargo fix` might help do these fixes, diff --git a/zebra-chain/src/amount.rs b/zebra-chain/src/amount.rs index 7ca036accc2..8a130e4ad7e 100644 --- a/zebra-chain/src/amount.rs +++ b/zebra-chain/src/amount.rs @@ -536,6 +536,7 @@ impl ZcashDeserialize for Amount { } impl ZcashSerialize for Amount { + #[allow(clippy::unwrap_in_result)] fn zcash_serialize(&self, mut writer: W) -> Result<(), std::io::Error> { let amount = self .0 diff --git a/zebra-chain/src/block.rs b/zebra-chain/src/block.rs index ada83e3e2ce..078d80aaa51 100644 --- a/zebra-chain/src/block.rs +++ b/zebra-chain/src/block.rs @@ -113,6 +113,7 @@ impl Block { /// /// /// [ZIP-244]: https://zips.z.cash/zip-0244 + #[allow(clippy::unwrap_in_result)] pub fn check_transaction_network_upgrade_consistency( &self, network: Network, diff --git a/zebra-chain/src/block/header.rs b/zebra-chain/src/block/header.rs index c7a297b0285..2eefcca6a89 100644 --- a/zebra-chain/src/block/header.rs +++ b/zebra-chain/src/block/header.rs @@ -101,6 +101,7 @@ pub enum BlockTimeError { impl Header { /// TODO: Inline this function into zebra_consensus::block::check::time_is_valid_at. /// See for more details. + #[allow(clippy::unwrap_in_result)] pub fn time_is_valid_at( &self, now: DateTime, diff --git a/zebra-chain/src/block/serialize.rs b/zebra-chain/src/block/serialize.rs index 3cf19ae60cf..d51617f40ff 100644 --- a/zebra-chain/src/block/serialize.rs +++ b/zebra-chain/src/block/serialize.rs @@ -21,6 +21,7 @@ use super::{merkle, Block, CountedHeader, Hash, Header}; pub const MAX_BLOCK_BYTES: u64 = 2_000_000; impl ZcashSerialize for Header { + #[allow(clippy::unwrap_in_result)] fn zcash_serialize(&self, mut writer: W) -> Result<(), io::Error> { writer.write_u32::(self.version)?; self.previous_block_hash.zcash_serialize(&mut writer)?; diff --git a/zebra-chain/src/history_tree.rs b/zebra-chain/src/history_tree.rs index de0e305f423..fcd19fb478a 100644 --- a/zebra-chain/src/history_tree.rs +++ b/zebra-chain/src/history_tree.rs @@ -128,6 +128,7 @@ impl NonEmptyHistoryTree { /// `sapling_root` is the root of the Sapling note commitment tree of the block. /// `orchard_root` is the root of the Orchard note commitment tree of the block; /// (ignored for pre-Orchard blocks). + #[allow(clippy::unwrap_in_result)] pub fn from_block( network: Network, block: Arc, @@ -186,6 +187,7 @@ impl NonEmptyHistoryTree { /// # Panics /// /// If the block height is not one more than the previously pushed block. + #[allow(clippy::unwrap_in_result)] pub fn push( &mut self, block: Arc, @@ -419,6 +421,7 @@ impl HistoryTree { /// Create a HistoryTree from a block. /// /// If the block is pre-Heartwood, it returns an empty history tree. + #[allow(clippy::unwrap_in_result)] pub fn from_block( network: Network, block: Arc, @@ -444,6 +447,7 @@ impl HistoryTree { /// /// The tree is updated in-place. It is created when pushing the Heartwood /// activation block. + #[allow(clippy::unwrap_in_result)] pub fn push( &mut self, network: Network, diff --git a/zebra-chain/src/orchard/commitment.rs b/zebra-chain/src/orchard/commitment.rs index 64c523061d1..cc146ffbec6 100644 --- a/zebra-chain/src/orchard/commitment.rs +++ b/zebra-chain/src/orchard/commitment.rs @@ -113,6 +113,7 @@ impl NoteCommitment { /// /// #[allow(non_snake_case)] + #[allow(clippy::unwrap_in_result)] pub fn new(note: Note) -> Option { // s as in the argument name for WindowedPedersenCommit_r(s) let mut s: BitVec = BitVec::new(); diff --git a/zebra-chain/src/orchard/keys.rs b/zebra-chain/src/orchard/keys.rs index 11ffba04f7b..759b1156075 100644 --- a/zebra-chain/src/orchard/keys.rs +++ b/zebra-chain/src/orchard/keys.rs @@ -161,13 +161,15 @@ impl ConstantTimeEq for SpendingKey { } impl fmt::Display for SpendingKey { + #[allow(clippy::unwrap_in_result)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let hrp = match self.network { Network::Mainnet => sk_hrp::MAINNET, Network::Testnet => sk_hrp::TESTNET, }; - bech32::encode_to_fmt(f, hrp, &self.bytes.to_base32(), Variant::Bech32).unwrap() + bech32::encode_to_fmt(f, hrp, &self.bytes.to_base32(), Variant::Bech32) + .expect("hrp is valid") } } diff --git a/zebra-chain/src/orchard/tree.rs b/zebra-chain/src/orchard/tree.rs index eb7c1dfd989..cb7f9e28035 100644 --- a/zebra-chain/src/orchard/tree.rs +++ b/zebra-chain/src/orchard/tree.rs @@ -301,6 +301,7 @@ impl NoteCommitmentTree { /// chain and input into the proof. /// /// Returns an error if the tree is full. + #[allow(clippy::unwrap_in_result)] pub fn append(&mut self, cm_x: pallas::Base) -> Result<(), NoteCommitmentTreeError> { if self.inner.append(&cm_x.into()) { // Invalidate cached root diff --git a/zebra-chain/src/primitives/zcash_history.rs b/zebra-chain/src/primitives/zcash_history.rs index 2f0967c8798..3d20a042678 100644 --- a/zebra-chain/src/primitives/zcash_history.rs +++ b/zebra-chain/src/primitives/zcash_history.rs @@ -105,6 +105,7 @@ impl Tree { /// # Panics /// /// Will panic if `peaks` is empty. + #[allow(clippy::unwrap_in_result)] pub fn new_from_cache( network: Network, network_upgrade: NetworkUpgrade, @@ -138,6 +139,7 @@ impl Tree { /// `sapling_root` is the root of the Sapling note commitment tree of the block. /// `orchard_root` is the root of the Orchard note commitment tree of the block; /// (ignored for V1 trees). + #[allow(clippy::unwrap_in_result)] pub fn new_from_block( network: Network, block: Arc, @@ -171,6 +173,7 @@ impl Tree { /// /// Panics if the network upgrade of the given block is different from /// the network upgrade of the other blocks in the tree. + #[allow(clippy::unwrap_in_result)] pub fn append_leaf( &mut self, block: Arc, diff --git a/zebra-chain/src/sapling/address.rs b/zebra-chain/src/sapling/address.rs index 7cb27c77e6a..741e4ec2e9a 100644 --- a/zebra-chain/src/sapling/address.rs +++ b/zebra-chain/src/sapling/address.rs @@ -48,6 +48,7 @@ impl fmt::Debug for Address { } impl fmt::Display for Address { + #[allow(clippy::unwrap_in_result)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut bytes = io::Cursor::new(Vec::new()); @@ -59,7 +60,8 @@ impl fmt::Display for Address { _ => human_readable_parts::TESTNET, }; - bech32::encode_to_fmt(f, hrp, bytes.get_ref().to_base32(), Variant::Bech32).unwrap() + bech32::encode_to_fmt(f, hrp, bytes.get_ref().to_base32(), Variant::Bech32) + .expect("hrp is valid") } } @@ -69,7 +71,10 @@ impl std::str::FromStr for Address { fn from_str(s: &str) -> Result { match bech32::decode(s) { Ok((hrp, bytes, Variant::Bech32)) => { - let mut decoded_bytes = io::Cursor::new(Vec::::from_base32(&bytes).unwrap()); + let mut decoded_bytes = + io::Cursor::new(Vec::::from_base32(&bytes).map_err(|_| { + SerializationError::Parse("bech32::decode guarantees valid base32") + })?); let mut diversifier_bytes = [0; 11]; decoded_bytes.read_exact(&mut diversifier_bytes)?; @@ -83,7 +88,7 @@ impl std::str::FromStr for Address { }, diversifier: keys::Diversifier::from(diversifier_bytes), transmission_key: keys::TransmissionKey::try_from(transmission_key_bytes) - .unwrap(), + .map_err(|_| SerializationError::Parse("invalid transmission key bytes"))?, }) } _ => Err(SerializationError::Parse("bech32 decoding error")), diff --git a/zebra-chain/src/sapling/keys.rs b/zebra-chain/src/sapling/keys.rs index cd3a3f27871..c6027132cfd 100644 --- a/zebra-chain/src/sapling/keys.rs +++ b/zebra-chain/src/sapling/keys.rs @@ -254,23 +254,27 @@ impl From<[u8; 32]> for SpendingKey { } impl fmt::Display for SpendingKey { + #[allow(clippy::unwrap_in_result)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let hrp = match self.network { Network::Mainnet => sk_hrp::MAINNET, _ => sk_hrp::TESTNET, }; - bech32::encode_to_fmt(f, hrp, &self.bytes.to_base32(), Variant::Bech32).unwrap() + bech32::encode_to_fmt(f, hrp, &self.bytes.to_base32(), Variant::Bech32) + .expect("hrp is valid") } } impl FromStr for SpendingKey { type Err = SerializationError; + #[allow(clippy::unwrap_in_result)] fn from_str(s: &str) -> Result { match bech32::decode(s) { Ok((hrp, bytes, Variant::Bech32)) => { - let decoded = Vec::::from_base32(&bytes).unwrap(); + let decoded = + Vec::::from_base32(&bytes).expect("bech32::decode guarantees valid base32"); let mut decoded_bytes = [0u8; 32]; decoded_bytes[..].copy_from_slice(&decoded[0..32]); @@ -637,13 +641,15 @@ impl fmt::Debug for IncomingViewingKey { } impl fmt::Display for IncomingViewingKey { + #[allow(clippy::unwrap_in_result)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let hrp = match self.network { Network::Mainnet => ivk_hrp::MAINNET, _ => ivk_hrp::TESTNET, }; - bech32::encode_to_fmt(f, hrp, &self.scalar.to_bytes().to_base32(), Variant::Bech32).unwrap() + bech32::encode_to_fmt(f, hrp, &self.scalar.to_bytes().to_base32(), Variant::Bech32) + .expect("hrp is valid") } } @@ -690,10 +696,12 @@ impl From<(AuthorizingKey, NullifierDerivingKey)> for IncomingViewingKey { impl FromStr for IncomingViewingKey { type Err = SerializationError; + #[allow(clippy::unwrap_in_result)] fn from_str(s: &str) -> Result { match bech32::decode(s) { Ok((hrp, bytes, Variant::Bech32)) => { - let decoded = Vec::::from_base32(&bytes).unwrap(); + let decoded = + Vec::::from_base32(&bytes).expect("bech32::decode guarantees valid base32"); let mut scalar_bytes = [0u8; 32]; scalar_bytes[..].copy_from_slice(&decoded[0..32]); @@ -958,6 +966,7 @@ impl fmt::Debug for FullViewingKey { } impl fmt::Display for FullViewingKey { + #[allow(clippy::unwrap_in_result)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut bytes = io::Cursor::new(Vec::new()); @@ -970,17 +979,21 @@ impl fmt::Display for FullViewingKey { _ => fvk_hrp::TESTNET, }; - bech32::encode_to_fmt(f, hrp, bytes.get_ref().to_base32(), Variant::Bech32).unwrap() + bech32::encode_to_fmt(f, hrp, bytes.get_ref().to_base32(), Variant::Bech32) + .expect("hrp is valid") } } impl FromStr for FullViewingKey { type Err = SerializationError; + #[allow(clippy::unwrap_in_result)] fn from_str(s: &str) -> Result { match bech32::decode(s) { Ok((hrp, bytes, Variant::Bech32)) => { - let mut decoded_bytes = io::Cursor::new(Vec::::from_base32(&bytes).unwrap()); + let mut decoded_bytes = io::Cursor::new( + Vec::::from_base32(&bytes).expect("bech32::decode guarantees valid base32"), + ); let authorizing_key_bytes = decoded_bytes.read_32_bytes()?; let nullifier_deriving_key_bytes = decoded_bytes.read_32_bytes()?; diff --git a/zebra-chain/src/sapling/tree.rs b/zebra-chain/src/sapling/tree.rs index 628039a1ed8..90d8af1b5ba 100644 --- a/zebra-chain/src/sapling/tree.rs +++ b/zebra-chain/src/sapling/tree.rs @@ -306,6 +306,7 @@ impl NoteCommitmentTree { /// chain and input into the proof. /// /// Returns an error if the tree is full. + #[allow(clippy::unwrap_in_result)] pub fn append(&mut self, cm_u: jubjub::Fq) -> Result<(), NoteCommitmentTreeError> { if self.inner.append(&cm_u.into()) { // Invalidate cached root diff --git a/zebra-chain/src/serialization/compact_size.rs b/zebra-chain/src/serialization/compact_size.rs index b682565d2de..0cb073ef80d 100644 --- a/zebra-chain/src/serialization/compact_size.rs +++ b/zebra-chain/src/serialization/compact_size.rs @@ -236,6 +236,7 @@ impl TryFrom for CompactSizeMessage { type Error = SerializationError; #[inline] + #[allow(clippy::unwrap_in_result)] fn try_from(size: usize) -> Result { use SerializationError::Parse; @@ -284,6 +285,7 @@ impl ZcashSerialize for CompactSizeMessage { /// /// If the value exceeds `MAX_PROTOCOL_MESSAGE_LEN`. #[inline] + #[allow(clippy::unwrap_in_result)] fn zcash_serialize(&self, writer: W) -> Result<(), std::io::Error> { // # Security // Defence-in-depth for memory DoS via preallocation. diff --git a/zebra-chain/src/serialization/date_time.rs b/zebra-chain/src/serialization/date_time.rs index 3d903ab453b..bd5a3867e87 100644 --- a/zebra-chain/src/serialization/date_time.rs +++ b/zebra-chain/src/serialization/date_time.rs @@ -71,6 +71,7 @@ impl DateTime32 { /// Returns the duration elapsed since this time, /// or if this time is in the future, returns `None`. + #[allow(clippy::unwrap_in_result)] pub fn checked_elapsed(&self, now: chrono::DateTime) -> Option { DateTime32::try_from(now) .expect("unexpected out of range chrono::DateTime") diff --git a/zebra-chain/src/serialization/zcash_serialize.rs b/zebra-chain/src/serialization/zcash_serialize.rs index d281b8ab151..d00948bf11d 100644 --- a/zebra-chain/src/serialization/zcash_serialize.rs +++ b/zebra-chain/src/serialization/zcash_serialize.rs @@ -65,6 +65,7 @@ impl std::io::Write for FakeWriter { /// /// See `zcash_serialize_external_count` for more details, and usage information. impl ZcashSerialize for Vec { + #[allow(clippy::unwrap_in_result)] fn zcash_serialize(&self, mut writer: W) -> Result<(), io::Error> { let len: CompactSizeMessage = self .len() diff --git a/zebra-chain/src/sprout/tree.rs b/zebra-chain/src/sprout/tree.rs index da01af506f5..cb99cc0dbb8 100644 --- a/zebra-chain/src/sprout/tree.rs +++ b/zebra-chain/src/sprout/tree.rs @@ -240,6 +240,7 @@ impl NoteCommitmentTree { /// Appends a note commitment to the leafmost layer of the tree. /// /// Returns an error if the tree is full. + #[allow(clippy::unwrap_in_result)] pub fn append(&mut self, cm: NoteCommitment) -> Result<(), NoteCommitmentTreeError> { if self.inner.append(&cm.into()) { // Invalidate cached root diff --git a/zebra-chain/src/transaction.rs b/zebra-chain/src/transaction.rs index f2b6db03a41..0dc4b4b93bf 100644 --- a/zebra-chain/src/transaction.rs +++ b/zebra-chain/src/transaction.rs @@ -15,6 +15,7 @@ mod txid; mod unmined; #[cfg(any(test, feature = "proptest-impl"))] +#[allow(clippy::unwrap_in_result)] pub mod arbitrary; #[cfg(test)] mod tests; @@ -946,6 +947,7 @@ impl Transaction { /// using the outputs spent by this transaction. /// /// See `transparent_value_balance` for details. + #[allow(clippy::unwrap_in_result)] fn transparent_value_balance_from_outputs( &self, outputs: &HashMap, diff --git a/zebra-chain/src/transaction/lock_time.rs b/zebra-chain/src/transaction/lock_time.rs index 5d5f2f4102f..32b48875f39 100644 --- a/zebra-chain/src/transaction/lock_time.rs +++ b/zebra-chain/src/transaction/lock_time.rs @@ -86,6 +86,7 @@ impl LockTime { } impl ZcashSerialize for LockTime { + #[allow(clippy::unwrap_in_result)] fn zcash_serialize(&self, mut writer: W) -> Result<(), io::Error> { // This implementation does not check the invariants on `LockTime` so that the // serialization is fallible only if the underlying writer is. This ensures that @@ -100,6 +101,7 @@ impl ZcashSerialize for LockTime { } impl ZcashDeserialize for LockTime { + #[allow(clippy::unwrap_in_result)] fn zcash_deserialize(mut reader: R) -> Result { let n = reader.read_u32::()?; if n < Self::MIN_TIMESTAMP.try_into().expect("fits in u32") { diff --git a/zebra-chain/src/transaction/serialize.rs b/zebra-chain/src/transaction/serialize.rs index 579380b9a16..47b8e5bc99c 100644 --- a/zebra-chain/src/transaction/serialize.rs +++ b/zebra-chain/src/transaction/serialize.rs @@ -178,6 +178,7 @@ impl ZcashSerialize for sapling::ShieldedData { // we can't split ShieldedData out of Option deserialization, // because the counts are read along with the arrays. impl ZcashDeserialize for Option> { + #[allow(clippy::unwrap_in_result)] fn zcash_deserialize(mut reader: R) -> Result { // Denoted as `nSpendsSapling` and `vSpendsSapling` in the spec. let spend_prefixes: Vec<_> = (&mut reader).zcash_deserialize_into()?; @@ -447,6 +448,7 @@ impl ZcashDeserialize for Option { } impl ZcashSerialize for Transaction { + #[allow(clippy::unwrap_in_result)] fn zcash_serialize(&self, mut writer: W) -> Result<(), io::Error> { // Post-Sapling, transaction size is limited to MAX_BLOCK_BYTES. // (Strictly, the maximum transaction size is about 1.5 kB less, @@ -661,6 +663,7 @@ impl ZcashSerialize for Transaction { } impl ZcashDeserialize for Transaction { + #[allow(clippy::unwrap_in_result)] fn zcash_deserialize(reader: R) -> Result { // # Consensus // diff --git a/zebra-chain/src/transaction/txid.rs b/zebra-chain/src/transaction/txid.rs index 3e15121eb70..f8b71aec74e 100644 --- a/zebra-chain/src/transaction/txid.rs +++ b/zebra-chain/src/transaction/txid.rs @@ -34,6 +34,7 @@ impl<'a> TxIdBuilder<'a> { /// Compute the Transaction ID for transactions V1 to V4. /// In these cases it's simply the hash of the serialized transaction. + #[allow(clippy::unwrap_in_result)] fn txid_v1_to_v4(self) -> Result { let mut hash_writer = sha256d::Writer::default(); self.trans diff --git a/zebra-chain/src/transparent.rs b/zebra-chain/src/transparent.rs index 0ac6054b1da..4d8d53fe413 100644 --- a/zebra-chain/src/transparent.rs +++ b/zebra-chain/src/transparent.rs @@ -77,6 +77,7 @@ impl AsRef<[u8]> for CoinbaseData { } impl std::fmt::Debug for CoinbaseData { + #[allow(clippy::unwrap_in_result)] fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let escaped = String::from_utf8( self.0 diff --git a/zebra-chain/src/value_balance.rs b/zebra-chain/src/value_balance.rs index 9d37874c1ba..e34f132befe 100644 --- a/zebra-chain/src/value_balance.rs +++ b/zebra-chain/src/value_balance.rs @@ -294,6 +294,7 @@ impl ValueBalance { /// value pool. /// /// See `add_block` for details. + #[allow(clippy::unwrap_in_result)] pub fn add_chain_value_pool_change( self, chain_value_pool_change: ValueBalance, @@ -345,6 +346,7 @@ impl ValueBalance { } /// From byte array + #[allow(clippy::unwrap_in_result)] pub fn from_bytes(bytes: [u8; 32]) -> Result, ValueBalanceError> { let transparent = Amount::from_bytes( bytes[0..8] diff --git a/zebra-chain/src/work/difficulty.rs b/zebra-chain/src/work/difficulty.rs index 7a2242e0dcf..e4a714c3786 100644 --- a/zebra-chain/src/work/difficulty.rs +++ b/zebra-chain/src/work/difficulty.rs @@ -178,6 +178,7 @@ impl CompactDifficulty { /// /// Returns None for negative, zero, and overflow values. (zcashd rejects /// these values, before comparing the hash.) + #[allow(clippy::unwrap_in_result)] pub fn to_expanded(self) -> Option { // The constants for this floating-point representation. // Alias the struct constants here, so the code is easier to read. @@ -464,6 +465,7 @@ impl PartialOrd for block::Hash { /// /// See `::partial_cmp` /// for details. + #[allow(clippy::unwrap_in_result)] fn partial_cmp(&self, other: &ExpandedDifficulty) -> Option { Some( // Use the canonical implementation, but reverse the order diff --git a/zebra-chain/src/work/equihash.rs b/zebra-chain/src/work/equihash.rs index 38f34abf106..230ba6d5a94 100644 --- a/zebra-chain/src/work/equihash.rs +++ b/zebra-chain/src/work/equihash.rs @@ -41,6 +41,7 @@ impl Solution { pub const INPUT_LENGTH: usize = 4 + 32 * 3 + 4 * 2; /// Returns `Ok(())` if `EquihashSolution` is valid for `header` + #[allow(clippy::unwrap_in_result)] pub fn check(&self, header: &Header) -> Result<(), Error> { let n = 200; let k = 9; diff --git a/zebra-consensus/src/checkpoint.rs b/zebra-consensus/src/checkpoint.rs index 30424bac6cf..8d8c8f4c8c2 100644 --- a/zebra-consensus/src/checkpoint.rs +++ b/zebra-consensus/src/checkpoint.rs @@ -521,6 +521,7 @@ where /// /// If the block does not pass basic validity checks, /// returns an error immediately. + #[allow(clippy::unwrap_in_result)] fn queue_block(&mut self, block: Arc) -> Result { // Set up a oneshot channel to send results let (tx, rx) = oneshot::channel(); @@ -595,6 +596,7 @@ where /// During checkpoint range processing, process all the blocks at `height`. /// /// Returns the first valid block. If there is no valid block, returns None. + #[allow(clippy::unwrap_in_result)] fn process_height( &mut self, height: block::Height, diff --git a/zebra-network/src/address_book.rs b/zebra-network/src/address_book.rs index 40f691d1f25..9838950fe19 100644 --- a/zebra-network/src/address_book.rs +++ b/zebra-network/src/address_book.rs @@ -272,6 +272,7 @@ impl AddressBook { /// As an exception, this function can ignore all changes for specific /// [`SocketAddr`]s. Ignored addresses will never be used to connect to /// peers. + #[allow(clippy::unwrap_in_result)] pub fn update(&mut self, change: MetaAddrChange) -> Option { let previous = self.get(&change.addr()); diff --git a/zebra-network/src/meta_addr.rs b/zebra-network/src/meta_addr.rs index be3b1ef4e60..63e68bf09fd 100644 --- a/zebra-network/src/meta_addr.rs +++ b/zebra-network/src/meta_addr.rs @@ -294,6 +294,7 @@ impl MetaAddr { /// [`MetaAddr`]. /// /// Returns [`None`] if the gossiped peer is missing the untrusted services field. + #[allow(clippy::unwrap_in_result)] pub fn new_gossiped_change(self) -> Option { let untrusted_services = self.services?; @@ -593,6 +594,7 @@ impl MetaAddr { /// Return a sanitized version of this `MetaAddr`, for sending to a remote peer. /// /// Returns `None` if this `MetaAddr` should not be sent to remote peers. + #[allow(clippy::unwrap_in_result)] pub fn sanitize(&self, network: Network) -> Option { if !self.last_known_info_is_valid_for_outbound(network) { return None; diff --git a/zebra-network/src/peer/client.rs b/zebra-network/src/peer/client.rs index 5f56e6c1b3a..16101bd47b4 100644 --- a/zebra-network/src/peer/client.rs +++ b/zebra-network/src/peer/client.rs @@ -203,6 +203,7 @@ impl ClientRequestReceiver { /// Closing the channel ensures that: /// - the request stream terminates, and /// - task notifications are not required. + #[allow(clippy::unwrap_in_result)] pub fn close_and_flush_next(&mut self) -> Option { self.inner.close(); @@ -419,6 +420,7 @@ impl MissingInventoryCollector { impl Client { /// Check if this connection's heartbeat task has exited. + #[allow(clippy::unwrap_in_result)] fn check_heartbeat(&mut self, cx: &mut Context<'_>) -> Result<(), SharedPeerError> { let is_canceled = self .shutdown_tx diff --git a/zebra-network/src/peer/client/tests.rs b/zebra-network/src/peer/client/tests.rs index 09dbf69ebd3..809386ed854 100644 --- a/zebra-network/src/peer/client/tests.rs +++ b/zebra-network/src/peer/client/tests.rs @@ -136,6 +136,7 @@ impl ClientTestHarness { /// /// TODO: make ReceiveRequestAttempt generic, and use it here. #[allow(dead_code)] + #[allow(clippy::unwrap_in_result)] pub(crate) fn try_to_receive_inventory_change(&mut self) -> Option { let receive_result = self .inv_receiver diff --git a/zebra-network/src/peer/error.rs b/zebra-network/src/peer/error.rs index b96a7f2db39..4e53cb9ac0f 100644 --- a/zebra-network/src/peer/error.rs +++ b/zebra-network/src/peer/error.rs @@ -181,6 +181,7 @@ impl ErrorSlot { /// /// Briefly locks the error slot's threaded `std::sync::Mutex`, to get a /// reference to the error in the slot. + #[allow(clippy::unwrap_in_result)] pub fn try_get_error(&self) -> Option { self.0 .lock() @@ -197,6 +198,7 @@ impl ErrorSlot { /// /// Briefly locks the error slot's threaded `std::sync::Mutex`, to check for /// a previous error, then update the error in the slot. + #[allow(clippy::unwrap_in_result)] pub fn try_update_error(&self, e: SharedPeerError) -> Result<(), AlreadyErrored> { let mut guard = self.0.lock().expect("error mutex should be unpoisoned"); diff --git a/zebra-network/src/peer_set/set.rs b/zebra-network/src/peer_set/set.rs index 62ba42173b7..37989daafc4 100644 --- a/zebra-network/src/peer_set/set.rs +++ b/zebra-network/src/peer_set/set.rs @@ -587,6 +587,7 @@ where } /// Performs P2C on `ready_service_list` to randomly select a less-loaded ready service. + #[allow(clippy::unwrap_in_result)] fn select_p2c_peer_from_list(&self, ready_service_list: &HashSet) -> Option { match ready_service_list.len() { 0 => None, diff --git a/zebra-network/src/protocol/external/addr/v2.rs b/zebra-network/src/protocol/external/addr/v2.rs index d11c6560ab7..ec712359692 100644 --- a/zebra-network/src/protocol/external/addr/v2.rs +++ b/zebra-network/src/protocol/external/addr/v2.rs @@ -181,6 +181,7 @@ impl AddrV2 { /// /// The returned IP version is chosen based on `IP_ADDR_SIZE`, /// which should be [`ADDR_V2_IPV4_ADDR_SIZE`] or [`ADDR_V2_IPV6_ADDR_SIZE`]. + #[allow(clippy::unwrap_in_result)] fn ip_addr_from_bytes( addr_bytes: Vec, ) -> Result diff --git a/zebra-network/src/protocol/external/codec.rs b/zebra-network/src/protocol/external/codec.rs index c06724f58ca..d249eca09d3 100644 --- a/zebra-network/src/protocol/external/codec.rs +++ b/zebra-network/src/protocol/external/codec.rs @@ -326,6 +326,7 @@ impl Decoder for Codec { type Item = Message; type Error = Error; + #[allow(clippy::unwrap_in_result)] fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { use Error::Parse; match self.state { diff --git a/zebra-network/src/protocol/internal/response.rs b/zebra-network/src/protocol/internal/response.rs index eb54e037b3c..44a8ba68f6a 100644 --- a/zebra-network/src/protocol/internal/response.rs +++ b/zebra-network/src/protocol/internal/response.rs @@ -78,6 +78,7 @@ pub enum Response { } impl fmt::Display for Response { + #[allow(clippy::unwrap_in_result)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str(&match self { Response::Nil => "Nil".to_string(), diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index bbafb127c94..e9ce2c889ae 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -343,6 +343,7 @@ where Ok(response) } + #[allow(clippy::unwrap_in_result)] fn get_blockchain_info(&self) -> Result { let network = self.network; @@ -1227,6 +1228,7 @@ pub struct GetAddressTxIdsRequest { impl GetRawTransaction { /// Converts `tx` and `height` into a new `GetRawTransaction` in the `verbose` format. + #[allow(clippy::unwrap_in_result)] fn from_transaction( tx: Arc, height: Option, diff --git a/zebra-script/src/lib.rs b/zebra-script/src/lib.rs index 79404f7a460..3ea61134c12 100644 --- a/zebra-script/src/lib.rs +++ b/zebra-script/src/lib.rs @@ -152,6 +152,7 @@ impl CachedFfiTransaction { /// Verify if the script in the input at `input_index` of a transaction correctly /// spends the matching [`transparent::Output`] it refers to, with the [`ConsensusBranchId`] /// of the block containing the transaction. + #[allow(clippy::unwrap_in_result)] pub fn is_valid(&self, branch_id: ConsensusBranchId, input_index: usize) -> Result<(), Error> { let previous_output = self .all_previous_outputs @@ -210,6 +211,7 @@ impl CachedFfiTransaction { /// Returns the number of transparent signature operations in the /// transparent inputs and outputs of this transaction. + #[allow(clippy::unwrap_in_result)] pub fn legacy_sigop_count(&self) -> Result { let mut err = 0; diff --git a/zebra-state/src/service/arbitrary.rs b/zebra-state/src/service/arbitrary.rs index eeb2b092058..acb08070e87 100644 --- a/zebra-state/src/service/arbitrary.rs +++ b/zebra-state/src/service/arbitrary.rs @@ -112,6 +112,7 @@ impl Strategy for PreparedChain { type Tree = PreparedChainTree; type Value = ::Value; + #[allow(clippy::unwrap_in_result)] fn new_tree(&self, runner: &mut TestRunner) -> NewTree { let mut chain = self.chain.lock().unwrap(); if chain.is_none() { diff --git a/zebra-state/src/service/block_iter.rs b/zebra-state/src/service/block_iter.rs index 8e430566d95..73da14bf93b 100644 --- a/zebra-state/src/service/block_iter.rs +++ b/zebra-state/src/service/block_iter.rs @@ -39,6 +39,7 @@ impl Iter<'_> { } } + #[allow(clippy::unwrap_in_result)] fn next_finalized_block(&mut self) -> Option> { let Iter { service, state } = self; diff --git a/zebra-state/src/service/finalized_state.rs b/zebra-state/src/service/finalized_state.rs index ea8cbe325fe..958910a260c 100644 --- a/zebra-state/src/service/finalized_state.rs +++ b/zebra-state/src/service/finalized_state.rs @@ -235,6 +235,7 @@ impl FinalizedState { /// - Propagates any errors from updating history and note commitment trees /// - If `hashFinalSaplingRoot` / `hashLightClientRoot` / `hashBlockCommitments` /// does not match the expected value + #[allow(clippy::unwrap_in_result)] pub fn commit_finalized_direct( &mut self, finalized: FinalizedBlock, diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 5bee586fa90..8361c20a54d 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -216,6 +216,7 @@ impl ReadDisk for DiskDb { .valid() } + #[allow(clippy::unwrap_in_result)] fn zs_get(&self, cf: &C, key: &K) -> Option where C: rocksdb::AsColumnFamilyRef, diff --git a/zebra-state/src/service/finalized_state/zebra_db/block.rs b/zebra-state/src/service/finalized_state/zebra_db/block.rs index 3d1d9858337..66ec3c4eec9 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block.rs @@ -59,18 +59,21 @@ impl ZebraDb { /// Returns the tip height and hash, if there is one. // // TODO: move this method to the tip section + #[allow(clippy::unwrap_in_result)] pub fn tip(&self) -> Option<(block::Height, block::Hash)> { let hash_by_height = self.db.cf_handle("hash_by_height").unwrap(); self.db.zs_last_key_value(&hash_by_height) } /// Returns the finalized hash for a given `block::Height` if it is present. + #[allow(clippy::unwrap_in_result)] pub fn hash(&self, height: block::Height) -> Option { let hash_by_height = self.db.cf_handle("hash_by_height").unwrap(); self.db.zs_get(&hash_by_height, &height) } /// Returns the height of the given block if it exists. + #[allow(clippy::unwrap_in_result)] pub fn height(&self, hash: block::Hash) -> Option { let height_by_hash = self.db.cf_handle("height_by_hash").unwrap(); self.db.zs_get(&height_by_hash, &hash) @@ -80,6 +83,7 @@ impl ZebraDb { /// [`Height`](zebra_chain::block::Height), if it exists in the finalized chain. // // TODO: move this method to the start of the section + #[allow(clippy::unwrap_in_result)] pub fn block(&self, hash_or_height: HashOrHeight) -> Option> { // Blocks let block_header_by_height = self.db.cf_handle("block_by_height").unwrap(); @@ -116,6 +120,7 @@ impl ZebraDb { /// Returns the Sapling /// [`NoteCommitmentTree`](sapling::tree::NoteCommitmentTree) specified by a /// hash or height, if it exists in the finalized `db`. + #[allow(clippy::unwrap_in_result)] pub fn sapling_tree( &self, hash_or_height: HashOrHeight, @@ -130,6 +135,7 @@ impl ZebraDb { /// Returns the Orchard /// [`NoteCommitmentTree`](orchard::tree::NoteCommitmentTree) specified by a /// hash or height, if it exists in the finalized `db`. + #[allow(clippy::unwrap_in_result)] pub fn orchard_tree( &self, hash_or_height: HashOrHeight, @@ -166,6 +172,7 @@ impl ZebraDb { /// Returns the [`TransactionLocation`] for [`transaction::Hash`], /// if it exists in the finalized chain. + #[allow(clippy::unwrap_in_result)] pub fn transaction_location(&self, hash: transaction::Hash) -> Option { let tx_loc_by_hash = self.db.cf_handle("tx_by_hash").unwrap(); self.db.zs_get(&tx_loc_by_hash, &hash) @@ -173,6 +180,7 @@ impl ZebraDb { /// Returns the [`transaction::Hash`] for [`TransactionLocation`], /// if it exists in the finalized chain. + #[allow(clippy::unwrap_in_result)] #[allow(dead_code)] pub fn transaction_hash(&self, location: TransactionLocation) -> Option { let hash_by_tx_loc = self.db.cf_handle("hash_by_tx_loc").unwrap(); @@ -183,6 +191,7 @@ impl ZebraDb { /// if a transaction with that hash exists in the finalized chain. // // TODO: move this method to the start of the section + #[allow(clippy::unwrap_in_result)] pub fn transaction(&self, hash: transaction::Hash) -> Option<(Arc, Height)> { let tx_by_loc = self.db.cf_handle("tx_by_loc").unwrap(); @@ -406,6 +415,7 @@ impl DiskWriteBatch { /// # Errors /// /// - This method does not currently return any errors. + #[allow(clippy::unwrap_in_result)] pub fn prepare_block_header_transactions_batch( &mut self, db: &DiskDb, diff --git a/zebra-state/src/service/finalized_state/zebra_db/chain.rs b/zebra-state/src/service/finalized_state/zebra_db/chain.rs index 76e5aceac27..6684fff13b0 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/chain.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/chain.rs @@ -67,6 +67,7 @@ impl DiskWriteBatch { /// # Errors /// /// - Returns any errors from updating the history tree + #[allow(clippy::unwrap_in_result)] pub fn prepare_history_batch( &mut self, db: &DiskDb, @@ -108,6 +109,7 @@ impl DiskWriteBatch { /// # Errors /// /// - Propagates any errors from updating value pools + #[allow(clippy::unwrap_in_result)] pub fn prepare_chain_value_pools_batch( &mut self, db: &DiskDb, diff --git a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs index 06a7143b7b1..c69a481b577 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs @@ -91,6 +91,7 @@ impl ZebraDb { /// Returns the Sprout note commitment tree matching the given anchor. /// /// This is used for interstitial tree building, which is unique to Sprout. + #[allow(clippy::unwrap_in_result)] pub fn sprout_note_commitment_tree_by_anchor( &self, sprout_anchor: &sprout::tree::Root, @@ -118,6 +119,7 @@ impl ZebraDb { /// Returns the Sapling note commitment tree matching the given block height. #[allow(dead_code)] + #[allow(clippy::unwrap_in_result)] pub fn sapling_note_commitment_tree_by_height( &self, height: &Height, @@ -145,6 +147,7 @@ impl ZebraDb { /// Returns the Orchard note commitment tree matching the given block height. #[allow(dead_code)] + #[allow(clippy::unwrap_in_result)] pub fn orchard_note_commitment_tree_by_height( &self, height: &Height, @@ -199,6 +202,7 @@ impl DiskWriteBatch { /// # Errors /// /// - This method doesn't currently return any errors, but it might in future + #[allow(clippy::unwrap_in_result)] pub fn prepare_nullifier_batch( &mut self, db: &DiskDb, @@ -263,6 +267,7 @@ impl DiskWriteBatch { /// # Errors /// /// - Propagates any errors from updating the history tree + #[allow(clippy::unwrap_in_result)] pub fn prepare_note_commitment_batch( &mut self, db: &DiskDb, diff --git a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs index 756189e2aae..6453d65b779 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs @@ -43,6 +43,7 @@ impl ZebraDb { /// Returns the [`AddressBalanceLocation`] for a [`transparent::Address`], /// if it is in the finalized state. + #[allow(clippy::unwrap_in_result)] pub fn address_balance_location( &self, address: &transparent::Address, @@ -89,6 +90,7 @@ impl ZebraDb { /// Returns the transparent output for an [`OutputLocation`], /// if it is unspent in the finalized state. + #[allow(clippy::unwrap_in_result)] pub fn utxo_by_location( &self, output_location: OutputLocation, @@ -176,6 +178,7 @@ impl ZebraDb { } /// Returns the transaction hash for an [`TransactionLocation`]. + #[allow(clippy::unwrap_in_result)] pub fn tx_id_by_location(&self, tx_location: TransactionLocation) -> Option { let hash_by_tx_loc = self.db.cf_handle("hash_by_tx_loc").unwrap(); @@ -415,6 +418,7 @@ impl DiskWriteBatch { /// # Errors /// /// - This method doesn't currently return any errors, but it might in future + #[allow(clippy::unwrap_in_result)] pub fn prepare_new_transparent_outputs_batch( &mut self, db: &DiskDb, @@ -490,6 +494,7 @@ impl DiskWriteBatch { /// # Errors /// /// - This method doesn't currently return any errors, but it might in future + #[allow(clippy::unwrap_in_result)] pub fn prepare_spent_transparent_outputs_batch( &mut self, db: &DiskDb, @@ -543,6 +548,7 @@ impl DiskWriteBatch { /// # Errors /// /// - This method doesn't currently return any errors, but it might in future + #[allow(clippy::unwrap_in_result)] pub fn prepare_spending_transparent_tx_ids_batch( &mut self, db: &DiskDb, @@ -591,6 +597,7 @@ impl DiskWriteBatch { /// # Errors /// /// - This method doesn't currently return any errors, but it might in future + #[allow(clippy::unwrap_in_result)] pub fn prepare_transparent_balances_batch( &mut self, db: &DiskDb, diff --git a/zebra-state/src/service/non_finalized_state.rs b/zebra-state/src/service/non_finalized_state.rs index f17c605d55a..875a26f4396 100644 --- a/zebra-state/src/service/non_finalized_state.rs +++ b/zebra-state/src/service/non_finalized_state.rs @@ -381,6 +381,7 @@ impl NonFinalizedState { /// /// The trees must be the trees of the finalized tip. /// They are used to recreate the trees if a fork is needed. + #[allow(clippy::unwrap_in_result)] fn parent_chain( &mut self, parent_hash: block::Hash, diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index eef54f7990e..01bdc7c9f78 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -266,6 +266,7 @@ impl Chain { /// /// The trees must match the trees of the finalized tip and are used /// to rebuild them after the fork. + #[allow(clippy::unwrap_in_result)] pub fn fork( &self, fork_tip: block::Hash, @@ -704,6 +705,7 @@ trait UpdateWith { impl UpdateWith for Chain { #[instrument(skip(self, contextually_valid), fields(block = %contextually_valid.block))] + #[allow(clippy::unwrap_in_result)] fn update_chain_tip_with( &mut self, contextually_valid: &ContextuallyValidBlock, @@ -992,6 +994,7 @@ impl &HashMap, )> for Chain { + #[allow(clippy::unwrap_in_result)] fn update_chain_tip_with( &mut self, &(created_outputs, creating_tx_hash, block_created_outputs): &( diff --git a/zebra-state/src/service/non_finalized_state/chain/index.rs b/zebra-state/src/service/non_finalized_state/chain/index.rs index d1f5c2d543b..e272500ae51 100644 --- a/zebra-state/src/service/non_finalized_state/chain/index.rs +++ b/zebra-state/src/service/non_finalized_state/chain/index.rs @@ -66,12 +66,19 @@ impl &transparent::OrderedUtxo, )> for TransparentTransfers { + #[allow(clippy::unwrap_in_result)] fn update_chain_tip_with( &mut self, &(outpoint, created_utxo): &(&transparent::OutPoint, &transparent::OrderedUtxo), ) -> Result<(), ValidateContextError> { - self.balance = - (self.balance + created_utxo.utxo.output.value().constrain().unwrap()).unwrap(); + self.balance = (self.balance + + created_utxo + .utxo + .output + .value() + .constrain() + .expect("NonNegative values are always valid NegativeAllowed values")) + .expect("total UTXO value has already been checked"); let transaction_location = transaction_location(created_utxo); let output_location = OutputLocation::from_outpoint(transaction_location, outpoint); @@ -94,8 +101,14 @@ impl &(outpoint, created_utxo): &(&transparent::OutPoint, &transparent::OrderedUtxo), _position: RevertPosition, ) { - self.balance = - (self.balance - created_utxo.utxo.output.value().constrain().unwrap()).unwrap(); + self.balance = (self.balance + - created_utxo + .utxo + .output + .value() + .constrain() + .expect("NonNegative values are always valid NegativeAllowed values")) + .expect("reversing previous balance changes is always valid"); let transaction_location = transaction_location(created_utxo); let output_location = OutputLocation::from_outpoint(transaction_location, outpoint); @@ -130,6 +143,7 @@ impl &transparent::OrderedUtxo, )> for TransparentTransfers { + #[allow(clippy::unwrap_in_result)] fn update_chain_tip_with( &mut self, &(spending_input, spending_tx_hash, spent_output): &( @@ -139,8 +153,14 @@ impl ), ) -> Result<(), ValidateContextError> { // Spending a UTXO subtracts value from the balance - self.balance = - (self.balance - spent_output.utxo.output.value().constrain().unwrap()).unwrap(); + self.balance = (self.balance + - spent_output + .utxo + .output + .value() + .constrain() + .expect("NonNegative values are always valid NegativeAllowed values")) + .expect("total UTXO value has already been checked"); let spent_outpoint = spending_input.outpoint().expect("checked by caller"); @@ -166,8 +186,14 @@ impl ), _position: RevertPosition, ) { - self.balance = - (self.balance + spent_output.utxo.output.value().constrain().unwrap()).unwrap(); + self.balance = (self.balance + + spent_output + .utxo + .output + .value() + .constrain() + .expect("NonNegative values are always valid NegativeAllowed values")) + .expect("reversing previous balance changes is always valid"); let spent_outpoint = spending_input.outpoint().expect("checked by caller"); diff --git a/zebra-test/src/command.rs b/zebra-test/src/command.rs index 93ff57daa43..fde29cc7303 100644 --- a/zebra-test/src/command.rs +++ b/zebra-test/src/command.rs @@ -131,6 +131,7 @@ impl TestDirExt for T where Self: AsRef + Sized, { + #[allow(clippy::unwrap_in_result)] fn spawn_child_with_command(self, cmd: &str, args: Arguments) -> Result> { let mut cmd = test_cmd(cmd, self.as_ref())?; @@ -639,6 +640,7 @@ impl TestChild { /// Kills the child on error, or after the configured timeout has elapsed. /// See [`Self::expect_line_matching_regex_set`] for details. #[instrument(skip(self))] + #[allow(clippy::unwrap_in_result)] pub fn expect_stdout_line_matches(&mut self, success_regex: R) -> Result<&mut Self> where R: ToRegex + Debug, @@ -665,6 +667,7 @@ impl TestChild { /// Kills the child on error, or after the configured timeout has elapsed. /// See [`Self::expect_line_matching_regex_set`] for details. #[instrument(skip(self))] + #[allow(clippy::unwrap_in_result)] pub fn expect_stderr_line_matches(&mut self, success_regex: R) -> Result<&mut Self> where R: ToRegex + Debug, @@ -689,6 +692,7 @@ impl TestChild { /// /// [`Self::expect_line_matching_regexes`] wrapper for strings, /// [`Regex`](regex::Regex)es, and [`RegexSet`]s. + #[allow(clippy::unwrap_in_result)] pub fn expect_line_matching_regex_set( &mut self, lines: &mut L, @@ -709,6 +713,7 @@ impl TestChild { /// Checks each line in `lines` against a regex set, and returns Ok if a line matches. /// /// [`Self::expect_line_matching_regexes`] wrapper for regular expression iterators. + #[allow(clippy::unwrap_in_result)] pub fn expect_line_matching_regex_iter( &mut self, lines: &mut L, @@ -1007,6 +1012,7 @@ impl TestOutput { /// Tests if standard output matches `regex`. #[instrument(skip(self))] + #[allow(clippy::unwrap_in_result)] pub fn stdout_matches(&self, regex: R) -> Result<&Self> where R: ToRegex + Debug, @@ -1030,6 +1036,7 @@ impl TestOutput { /// Tests if any lines in standard output match `regex`. #[instrument(skip(self))] + #[allow(clippy::unwrap_in_result)] pub fn stdout_line_matches(&self, regex: R) -> Result<&Self> where R: ToRegex + Debug, @@ -1059,6 +1066,7 @@ impl TestOutput { /// Tests if standard error matches `regex`. #[instrument(skip(self))] + #[allow(clippy::unwrap_in_result)] pub fn stderr_matches(&self, regex: R) -> Result<&Self> where R: ToRegex + Debug, @@ -1082,6 +1090,7 @@ impl TestOutput { /// Tests if any lines in standard error match `regex`. #[instrument(skip(self))] + #[allow(clippy::unwrap_in_result)] pub fn stderr_line_matches(&self, regex: R) -> Result<&Self> where R: ToRegex + Debug, diff --git a/zebrad/src/application.rs b/zebrad/src/application.rs index d2a1380693e..46891e09e73 100644 --- a/zebrad/src/application.rs +++ b/zebrad/src/application.rs @@ -194,6 +194,7 @@ impl Application for ZebradApp { /// beyond the default ones provided by the framework, this is the place /// to do so. #[allow(clippy::print_stderr)] + #[allow(clippy::unwrap_in_result)] fn register_components(&mut self, command: &Self::Cmd) -> Result<(), FrameworkError> { use crate::components::{ metrics::MetricsEndpoint, tokio::TokioComponent, tracing::TracingEndpoint, @@ -316,7 +317,7 @@ impl Application for ZebradApp { // This MUST happen after `Terminal::new` to ensure our preferred panic // handler is the last one installed let (panic_hook, eyre_hook) = builder.into_hooks(); - eyre_hook.install().unwrap(); + eyre_hook.install().expect("eyre_hook.install() error"); // The Sentry default config pulls in the DSN from the `SENTRY_DSN` // environment variable. @@ -399,6 +400,7 @@ impl Application for ZebradApp { } /// Load this application's configuration and initialize its components. + #[allow(clippy::unwrap_in_result)] fn init(&mut self, command: &Self::Cmd) -> Result<(), FrameworkError> { // Create and register components with the application. // We do this first to calculate a proper dependency ordering before @@ -406,7 +408,10 @@ impl Application for ZebradApp { self.register_components(command)?; // Fire callback to signal state in the application lifecycle - let config = self.config.take().unwrap(); + let config = self + .config + .take() + .expect("register_components always populates the config"); self.after_config(config)?; Ok(()) diff --git a/zebrad/src/components/mempool/storage.rs b/zebrad/src/components/mempool/storage.rs index 2af476e57ef..0f3ac4b4f3f 100644 --- a/zebrad/src/components/mempool/storage.rs +++ b/zebrad/src/components/mempool/storage.rs @@ -170,6 +170,7 @@ impl Storage { /// /// If inserting this transaction evicts other transactions, they will be tracked /// as [`SameEffectsChainRejectionError::RandomlyEvicted`]. + #[allow(clippy::unwrap_in_result)] pub fn insert(&mut self, tx: VerifiedUnminedTx) -> Result { // # Security // diff --git a/zebrad/src/components/mempool/storage/verified_set.rs b/zebrad/src/components/mempool/storage/verified_set.rs index ef2cd26c18f..4f48445498a 100644 --- a/zebrad/src/components/mempool/storage/verified_set.rs +++ b/zebrad/src/components/mempool/storage/verified_set.rs @@ -135,6 +135,7 @@ impl VerifiedSet { /// be too bad. /// /// [ZIP-401]: https://zips.z.cash/zip-0401 + #[allow(clippy::unwrap_in_result)] pub fn evict_one(&mut self) -> Option { if self.transactions.is_empty() { None @@ -148,7 +149,8 @@ impl VerifiedSet { .map(|tx| tx.clone().eviction_weight()) .collect(); - let dist = WeightedIndex::new(weights).unwrap(); + let dist = WeightedIndex::new(weights) + .expect("there is at least one weight and all weights are valid"); Some(self.remove(dist.sample(&mut thread_rng()))) } diff --git a/zebrad/src/components/tokio.rs b/zebrad/src/components/tokio.rs index dd16d76662d..d942e28816e 100644 --- a/zebrad/src/components/tokio.rs +++ b/zebrad/src/components/tokio.rs @@ -22,13 +22,14 @@ pub struct TokioComponent { } impl TokioComponent { + #[allow(clippy::unwrap_in_result)] pub fn new() -> Result { Ok(Self { rt: Some( tokio::runtime::Builder::new_multi_thread() .enable_all() .build() - .unwrap(), + .expect("runtime building should not fail"), ), }) } diff --git a/zebrad/src/components/tracing/endpoint.rs b/zebrad/src/components/tracing/endpoint.rs index 56b5ef8288d..831cf26307c 100644 --- a/zebrad/src/components/tracing/endpoint.rs +++ b/zebrad/src/components/tracing/endpoint.rs @@ -50,6 +50,7 @@ impl TracingEndpoint { } #[cfg(feature = "filter-reload")] + #[allow(clippy::unwrap_in_result)] pub fn init_tokio(&mut self, tokio_component: &TokioComponent) -> Result<(), FrameworkError> { use hyper::{ service::{make_service_fn, service_fn}, diff --git a/zebrad/tests/common/launch.rs b/zebrad/tests/common/launch.rs index c6972614bda..75e378b4479 100644 --- a/zebrad/tests/common/launch.rs +++ b/zebrad/tests/common/launch.rs @@ -111,6 +111,7 @@ impl ZebradTestDirExt for T where Self: TestDirExt + AsRef + Sized, { + #[allow(clippy::unwrap_in_result)] fn spawn_child(self, extra_args: Arguments) -> Result> { let dir = self.as_ref(); let default_config_path = dir.join("zebrad.toml"); diff --git a/zebrad/tests/common/lightwalletd.rs b/zebrad/tests/common/lightwalletd.rs index fb75e53b23c..a50de8934fa 100644 --- a/zebrad/tests/common/lightwalletd.rs +++ b/zebrad/tests/common/lightwalletd.rs @@ -130,6 +130,7 @@ impl LightWalletdTestDirExt for T where Self: TestDirExt + AsRef + Sized, { + #[allow(clippy::unwrap_in_result)] fn spawn_lightwalletd_child( self, lightwalletd_state_path: impl Into>, From 2103f36dea43538cb683726735f501863d607444 Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 29 Jun 2022 02:08:12 +1000 Subject: [PATCH 41/91] Update checkpoints - June 2022 (#4708) --- .../src/checkpoint/main-checkpoints.txt | 301 ++++++++++++++++++ .../src/checkpoint/test-checkpoints.txt | 215 +++++++++++++ 2 files changed, 516 insertions(+) diff --git a/zebra-consensus/src/checkpoint/main-checkpoints.txt b/zebra-consensus/src/checkpoint/main-checkpoints.txt index 60e18433f0a..c6524dc45bf 100644 --- a/zebra-consensus/src/checkpoint/main-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/main-checkpoints.txt @@ -4185,3 +4185,304 @@ 1644039 00000000007a3500bc32d646ed52b16b5e53dda4b14fb86012772f0e23f6b947 1644439 000000000030815167f32bb8081c9dc3a900061fb0a8be823b12db9736dd25b0 1644839 0000000000fffa3453f48b0cc81ffa39e9a5df8819bfe5ab5c71dd6d83c1ee1b +1645239 000000000021d4bee58b0a38f4ca5222347d2bcc7e2ccfb4ef9b2cad3787ea7f +1645639 0000000000e25ff2a46cc6d8bb72d0e8d2da569cfe3f47bc92c6f6f431ff1760 +1646039 000000000088524ddbc1b495e2d7c192dd418e7a4c527f0f5fae21a1b36efc42 +1646439 000000000057eee87a73c7467d98b510f8ec386bc7a9aaab1358c85691816f11 +1646839 0000000000e42431e6487be5714c9ab90049cb47a41912d55e6ddcd726150c3a +1647239 00000000017b2dc3df13f97a2d6c35df8bfcef26a2a12360735124c21400773b +1647639 0000000000492ec1981cc302f9bd716c4571f075bdcec39ef779ee1b375ef237 +1648039 00000000010aa62714c8fdc4b89f811a1bd28c0c70c1ca38eb949cf0692d0747 +1648439 00000000004cefd320c8725a7f5d5798d29204aa3233f32c2085775f0db91c2a +1648839 0000000000fed4299c2b9fb230429e0416054bc3b397e007d4afc2c218463481 +1649239 00000000001be475a511c3166227f6ccd4d1461d6f2a4502b4645859d6ef3baa +1649639 0000000001081b251f98b70a96fa6fd0ce601ff39efa80b5133f24506c6c5762 +1650039 0000000000499285630c653d0867a3f00faf85201ef2a50e8a84baff2a8af10c +1650439 00000000012cd15a521463d5523cff760642c2babcd591c6bee45bc4c2774a32 +1650839 00000000013bf749076ca554d18cc868c3916799ba14c34a028be9187fbd8a2e +1651239 00000000000745ee75a2a96e282df94abea85d411f15275564a5568926f899f9 +1651639 00000000012b12564f832ece5ca06c07d8dc3b79075ac4814c894512fac35b71 +1652039 00000000010683eb6ffcf60883399f754ae3735911b799614d8cb550a119a4ed +1652439 000000000015210091b46d6f49de63a41ef2b0c426029f865aa59474475cf0b8 +1652839 0000000000e6f0e6607e99495c421653ab050148bd2637a5fb5f025d2441acb4 +1653239 000000000132abee7d630d96647e53a960677417a638ebff9917d2ae777eff2f +1653639 00000000003b5fc45aad2ebe185c4bc5138e9fcbcc5534e04012a4e26a54d353 +1654039 00000000011ecd4363d53e559e9ae35856b5efe64089437b32d4ec4d3f7b79f5 +1654439 000000000123cfcd2c10f4a910e9ded7ddb93cc9bcada98eb8ab21b9daba4be7 +1654839 0000000001469fe91dd425033e3420f992d11a9539a623a684bc7dbf0b9de963 +1655239 0000000000d771da881d0402405f90d74787dd8a028584efd868fb0b44c48e79 +1655639 000000000044bf7768dbf5ba62a4426246ca4e9ccd25ca938d0914c77ab1c438 +1656039 000000000075389a20a56c58ef183bdaa925415450d0da1d8ed569264b6cfc9b +1656439 0000000000c9352a9968f376da15d6ebf75926cf27b4b0e185b6f789fd53e5b6 +1656839 0000000000712423d78a9fd1a059bc8271beaae26b1a411ef0945fb9efc38f88 +1657239 0000000000b3573cd89c2b6c6848f18262957aa6b30d41ba6ca152402505da9e +1657639 0000000000c61730115355f0c91a51cb0774e5c4d91faf23bda469f74a15914e +1658039 00000000014423f649d0f6b613d2a3a93f83c8a36196d77fb0520890cb28234b +1658439 0000000000ecd300a16b36e788936f1849775c22183d38dfc0dcf65549455bab +1658839 000000000164bbb3e8583ba97a41f419eeb1bd8e987f6b4c5069b47496f7fa33 +1659239 000000000045c288232c6f9b21d63a5936c2ce2d6cdb4650a8ae9b20eeae7bd4 +1659639 0000000000ad267c7db1d62472b1681756930158d175ad3226eddcf80da67791 +1660039 0000000000000cd619866b901d0eb26f0d7f36c2ae9953d78bdd451b4dacf24c +1660439 00000000001b3042238c7b45dd6aa0ceee102f2d85042ac935ea5d7e6a896af7 +1660839 0000000000f5b17e9739ea7f1941bb56e3b7aeed960c0d23fde0c2183edadced +1661239 00000000007f12960dd70f02dad821d2a192efd992f7237faf7f9780029686ec +1661639 000000000056c58ecb2b2d6af313c98b134142d43f331ac6c5c6230685e83142 +1662039 0000000000805d8d96f847aac8c2eb6dc7e57a7195c81cbd8f62ae65e76ad69c +1662439 0000000000cd669914a842ac3ab1d4e2fb96fde0375e472590e9d1d9e481edee +1662839 0000000000b7812fd7edda497f11afdfd8d96015bca90ea099b62f065c28584d +1663239 00000000000adf0876a77341d7125ef8424461621415fc5d0c57a49d013bc211 +1663639 00000000008af18a04076a7da44bf309ea2a48ce8c840f4b7642864c8839fae0 +1664039 0000000000f5072faae8bb5dfff2ae259444661a164d26b584424416870946fb +1664439 00000000014d4f000b43750fe7c97fa20a7db8fb81f6a33b596d1b241e8fab45 +1664839 000000000089fddcbea90bf426eb162ab5051617e63456522ff5da06cfc90083 +1665239 0000000001b02524fc454b68b9484384d818495b3a52a5d452f292408736646d +1665639 000000000148f7716932564ec94879c9545bf0eeeea472e276f720fd324f7781 +1666039 0000000001889c751514d57a2501e5238a064509b4fe8c9b1f97bcb22b01f169 +1666439 0000000000e850980e72dd179d49741211645e3230d70782ab5ee323e0beddfd +1666839 0000000000ebb0d18d3a32a70353d2334e0f9fa74f92a1ad62c2177782e2c3f7 +1667239 000000000121da21f5dc9e864961b4e92984ef80a6c14ff77200df394cc488d9 +1667639 0000000000e923b96cf3f758759ae6909a28791dae19e7b01ce548b70c1ccf9c +1668039 000000000071730d243493ceea24dc7cf036c53d9c5ced74c4273c55ca46aff7 +1668439 000000000034d328aa1de2f89b907c3b193cb585e4e3c70d5690b602aa91a26b +1668839 0000000000a5d5efa9f65a6b3aca3499cf6c21959e99b005ca6c61d43f6f0ef3 +1669239 00000000005b9f008c2246ef5e701c149aa63a4e61e09cd88d272d2a52fe99d8 +1669639 0000000000bb733ae6e11fedd686cfb1a7cda1434aa788d4948c5a776d2751d9 +1670039 00000000010a99dbb5e16a425c3c9a0a271d49dcbd8ecd05fd9430763c40a094 +1670439 00000000005a5a3ea6f76f4a88d928dd438e130281c019d257d41576e2e87d5f +1670839 00000000013727f3993ff7b4450ddc5c43059c9c436fdeaa38da3480d33f7846 +1671239 00000000004982a4a8d24009261f57adf2b1a20d693383b30d3bb0e66c8698cb +1671639 000000000175452718a3bcf73535b6e92f5f85e244e45bb81c7b898c416c5ae5 +1672039 00000000009d1f067270264e1593f14e0a6c8ead9e7791745c01e588d757bb70 +1672439 00000000005a36e54c582677a7f0bb1a20ac78a1e6a18a7f310b1e22cfc76040 +1672839 000000000122ea96836fc1ad479bdf4347f7382ee52975c143959621fb44ebd7 +1673239 000000000108e97eb3e9c9b5c40ed278373149861e628c01cf4013f0a10d11c3 +1673639 0000000000a878c2dcc597edfda147227023b8a705d77df2c111a22c926cbca0 +1674039 0000000000fc5ff4729fb3195dbed5710feb57e7ce685fe5c0fd234b2f359a49 +1674439 00000000003d8d15932fc33dc737a6abab02caa3862c24b13aea60eca1609cbf +1674839 00000000010c7d9d6d7c3948559a6ff55c43585521097dfbbf07a8a91b4ef6df +1675239 00000000014edf4e9e1658539b104d641e5e970e5ef46664d586d2d0329a83be +1675639 00000000003a9ca361deeea3daf6ac07af54326341aafd6679d574329059163e +1676039 0000000001309c8ceab50a664ebd04258b7ff368f764b26a642c584b9371e22b +1676439 0000000000992694d2a3b32d6a1ca950ad90358b70592137a4395738febf4714 +1676839 000000000118f2fd2b91ba6a8750921fceea81f5778f42906813a971d4b219a0 +1677239 00000000008715d5a4a22e414c630bacb5722bd8b3d3559929fc79ba71f4d3b6 +1677639 0000000000c497228138879ab53aa8f9ee421c294927e6450c19082ff25f45d7 +1678039 0000000000c6c644133c0ef093f37d1e962a3d57e025a9eab91e733446e93595 +1678439 000000000118deca0701070061add29ae5e161a3141be18e49fc2137e8a3408e +1678839 0000000000d95a9d1b7b9b58baabcff71635acbdd82f9f4836dd43232aff3fab +1679239 000000000092b45ff233dfe7ec773f49bd710f6160919c0910b8a6fd79ff3112 +1679639 000000000055716ae528050f1292bce59d883353e6786bd86378f599f384bbb0 +1680039 000000000032965970029e45a09df089214019f96f680fcf28fb2d30b3e72202 +1680439 0000000000fa3063dd31b55b6f255feb7b4215ba2cd81c57994332e5446601de +1680839 00000000012bbc319873c2896166826b041e0ea25267742459df5847708bf135 +1681239 00000000008e108b41b17eb5394063fc29933cd613405736d88b3d40130c4023 +1681639 000000000125e9214ecb44d9f1a31008dd4f2ed51fd01cdf0ca824fe75b76a08 +1682039 0000000000851ce34ac72a91d88dd55d6ca997e6dc40fffc989f22332a3a7133 +1682439 00000000016baf0e623d943334a616ea4f5a3140004b2ec4c448d2881ed9938d +1682839 000000000101b0ef32fd0748001e7ae2bf76af9befba095135579384910c2576 +1683239 00000000018965dc7b5292dbca6c7a0ee94029a147736b530569cd315a5974f8 +1683639 000000000109078298aa0ea4fb368261a3830c6fca4f6cefc9148f511448b802 +1684039 0000000000995ac9181754bad9cb46fd71fc686fb98e92cff8a9b651001c09a8 +1684439 000000000121c2aa576230e394ea57332076e2c22f02396e622681b72d0e09b6 +1684839 0000000000c4b734bb85d3b9930c4de3620819e2c32c9892e9a380442f6d8e1f +1685239 00000000002ac08fa6abc3b42a0e5157b66a5407a75005ba4df52c99f291740b +1685639 0000000000bc3d980ecfb2d439f6df5b6abdc94672410a80b9e168f233fa8c34 +1686039 00000000014afb0c39fc3c957b5c6fa446e7be8e77972a613be79b76047ac208 +1686439 0000000000bda2e7830bc2ffbca6b00b75ade03cad3ac339ac52f1ef7741b56d +1686839 0000000000227db8079bdb579cbeca560dce032fb6e2e3abbaae1ffddad1843b +1687239 00000000000e71c9d5256ad3a848213b9a53852e2fd834ac38a3d0a1d1f876da +1687639 000000000072a990cf8bba344c8351fa66c33d70c4f1eab117841182a6533fc6 +1688039 00000000001884eeb21fb69f86af6bae3c2494e5c9918ab1bae63135c7c3886a +1688439 0000000000db22e96ab50a85d91ed37cd170aa503289f2c7104202b4ed0abdbf +1688839 00000000000c8461420e3699b8396a4ce85e7b022dd60d0f7f0ba4a0576632e8 +1689239 0000000000744d3dd3f49eb68de75a26c68d1aba9272110238932af95185c8fb +1689639 00000000004fde36f9a3580d4b1ef9b75e329064d1942dd6e2e02416cefbc078 +1690039 00000000016f9e6884db9d8e9f3876d17ee753046bf4df4dec9cc1821f9f3856 +1690439 00000000010668bba566ac9e8e8363a0b89545147d24ba387dff30fb9cc2dd4e +1690839 0000000001666dd8bf0e74b1b2310f1bb640c39d427e74c8ae352ec303019e6c +1691239 0000000000a2a6d033006240f4ba9a9e382b1dea3bcb748cade580600857944f +1691639 00000000009aa75881096a60b8f31221aaccb78e63cedd9c29f58b40e4bcd031 +1692039 00000000010e1a8271e67a11ee3a483454486cb42b5f6edbc3f0eede9a6994ab +1692439 0000000000705eb8577ca69d5bdd9ae1656bcab015e0893c93f55ddf1efee7a4 +1692839 00000000012d6ab8ad8454dff2ca7c7c017110386e19c72c0be951b4a766d946 +1693239 00000000009f411bafe692966ce7efecf30bfc237d412b97570009207251d5a4 +1693639 0000000000399f36d2d793e38fa1e0c0d9cfabcf2d6fd283c90bafc83879f971 +1694039 0000000000330978e787e050e7febde18d96ad90224a16ca7b55c9328d9a518d +1694439 00000000011d76d377fc4bb308353913f056af8a06662a9c32d73edb34fd6c8b +1694839 0000000000a11eae06b0a2a7418ce099625d6a05acde6d7b6635f6b85b6d5c5e +1695239 0000000000bb6152ca065b854179130b29324deaa2f783d9dbfa32b5c1bf96f8 +1695639 00000000012ecf2179899b78843e17a00b30ef66438eaedbf42b44f26f022f4a +1696039 000000000137c9122304ee5e7be6bf7a9cca23043e59d1d16ed09f707db27126 +1696439 0000000000d735623c4122382e13caa7c71b885bc9ffaec4628ecd2b054f197e +1696839 0000000000fe23138b5ab0a1ef2e7afc14677c0e3addbf91677770bfc27fd56d +1697239 00000000000b172010fe177555c46b099ceb99d765ca2f9dfe1b21154366b956 +1697639 0000000000b2ec9108787c0ba02559324bfef4947a552eb93b4d6a120a5272f4 +1698039 00000000011d3e2e8a93f2f3cf1580934810bcc9feb609159e815154f721338e +1698439 0000000000dcef9580f5c262242d1464d53db40cf3467051631479edbb2f042e +1698839 000000000042a71271e849b84bf820b701598bf33dad925f91b5b6c6a5e7cf46 +1699239 000000000063ff7926a2973d37fb8aa6618631139da67ff12f4dd310ad8c1657 +1699639 0000000000007b82552c07d4821cd71e44efe187aa760bf191a20fdee7edcf8c +1700039 0000000000ed88fd9ff710af458cb33fb6648f2b3a7cb4e56d17de7c0be5c112 +1700439 00000000010c3602d00cda0febd8912145e29807ed29b8ebab8ac0b2f3e3ddba +1700839 00000000007d4463d80fbc625b32f7e01add2def168c3be8aa14e674a503542d +1701239 00000000013118da7c47ec2bde42d6fb0b4a425116fc8bdbd2b6473b4ab191f5 +1701639 0000000000a5399655d4b2dd9042f36ade2cdf6d8e95308b4d1cdb43a6ac62d2 +1702039 00000000008f26a8a89bcf5cd93ba87b361c9734088c0683939e5809b19da488 +1702439 0000000001553da2bd0958ff5296d54e50222ab115f629f2aa90d825959db94a +1702839 000000000102ba7ee3140ce719bf42ed0dcc7aa9548bf43f1de89b1805d76e8b +1703239 00000000004ac126a67df2dc8a78fb0cf0da70761b746e1ca7cb150c3ff50723 +1703639 000000000025f2b9c70ac00964083dc3bb4104b15343a2441e48279488e24c39 +1704039 00000000004e1b2356330837bb25bad9e513ab5678b4675bb274e3e421813322 +1704439 00000000006438db877fd2a2ab09acdcff50554bbb2b90b9fda072049eeaedd3 +1704839 0000000000699f22493c1730333555d27e98e9e2ff086675a971bf49a77308ed +1705239 0000000000d272fc9ad7222fe6d0ab235dd05116518b2332dfe01b7ec6ea73d8 +1705639 0000000000615165bacd48be057a6c7d257114d59af3d345c543bd47499c71f1 +1706039 0000000000e93817f2aa4a96cca47f30e3c2122dc1c957751ba841d007501991 +1706439 00000000006ceab949ba81fc8519229e4a15167aeb7abbf8e27e042e88a9c408 +1706743 0000000000546b71c58ad2c63622ad2420a8322274fd643a6b85aaafc98d08f8 +1706915 00000000001a5aea8f9a40271c081c4ea3e5d2bb972f4a0b6b700a03196e41b7 +1707105 0000000001c12496c67efa5e0f972f7f3c63107905ebbbc2b7b1c8a0bcaa6356 +1707276 0000000000557bec8df2ced92046e526a94cd56102695a54a85adf5cd956c48e +1707432 00000000000ab083539ffe638afb3bbaf93507d0e8adae3b9ff1d1b25ff9f804 +1707595 000000000157c2b6c0d7984a94ebf6db0a5a4353d8d5d2911d9abd4a6de5b8c2 +1707704 0000000000f40f9e3471e12ed0f9c9242b8833c01027c4f3c7bf13d29767f9ae +1707819 000000000009ff1ce10a23b7ee87681ea38302d3a72d451ca372fe18c06b0147 +1707904 0000000001258c83d57b1101a5dd16dc5731825eebab01cf96fe7f5ac593e829 +1707981 0000000000ff06d55dc8af3918d59e1535d0cbc2294b12825dc87051e39fc561 +1708054 0000000001806c30f8682775012b190a553e9385c8d123734dad534723a082b6 +1708134 00000000016e28f73e84dc962d8cff5131d72fa511536f4d4fce970ca6e01a48 +1708201 0000000000e3b36c8096c34df45da223066db12e7a10f52cbd1b3b7ab2d5e9ab +1708270 000000000028a24bebd15b46a7817146e6d396322faff8646d88ee084918cd4d +1708340 00000000024a8be4b27003287dd2f33ba237efd32c6c81ef2c2230475025de65 +1708423 000000000173668809464d253cdbc6fe284458401122d49fc3c5a39d7ed32e7d +1708489 00000000009c8d88ae6ce6e5fbae9243b51a61d19cc0e5c075a487524b134a20 +1708579 000000000141b5ad1be69ba0517ee4eff9016fda4fd5eaaa81062408afb3c5fa +1708649 0000000000d2e5bbf3b7b3c3a82031a7c5114ea4457c74a949ce3af23ef9c270 +1708720 00000000007e66e8ccdf815f79c523023d033a2244e627806e3b5c554d3a5b2c +1708805 00000000017177a6f940e30f20cdc7e25cffc4b86ed8f6510786bd1924687917 +1708881 0000000000bda680466220dd5f56dcf430fa030441d62fe76885959e742a1c15 +1708967 00000000006a7831d6d1799d3a956ab41f22f5d97720cef8f2d1a05712368063 +1709047 00000000009a83a5df6236fb868d439ad8f804c2f62846946c06af4da30a907a +1709134 00000000000948cbe5fa19d74f7d5628c8abc6c1d3ed33980fda2dbee90b2cdd +1709217 00000000007cd47c4f96e650787f695b3e0b76870d70f9dc91efb84a130413f2 +1709302 0000000000decc78eea55980fadea5a4a506968d9cd500f3d6924d2666fdf840 +1709403 0000000000f7391bf470d03154fddad4b4ae2efb7aaee973d5d6161edbabc819 +1709482 0000000000af3d6025b1d0fbba433acc35baceb1c57b06fa47737734d7fa1a0d +1709581 00000000006aa113b1371348815a8dc9ef4d8b5c2bb77e65c67d53687f18377a +1709681 0000000000899748512490ff08a4fcac0a1b275a2de488db54f45870b17c1224 +1709779 00000000011f525d35a658fb36a289cc4e9db411761a62792ed1fb0c41232c44 +1709872 0000000000283d1ff8c3b18ac7f2860c206ee213b23705735e0accbee9113348 +1709978 0000000000b190c1c1fa723ed09e84708bc2f21945eff9910003ef48936586eb +1710085 000000000019eb940bc26e0a10233843cbe4c6003fe864ae77db3f41caad25ea +1710184 0000000001a346e7394e80d1ed40de4735f9677c931680cb49ecc152fa0a1730 +1710283 00000000015560e67befc17257f5aad24ac66b9d8b079452b9c8a05853994de9 +1710401 00000000002013f50c9cb3abee4155f172beccce582281f39dee33b3704c25b9 +1710571 0000000000dab4de39b8e9688ef98c0bfa020b9d1e91691b3406c3612fcedf23 +1710716 000000000169528faab044b82b346686913649f0598189e60e4192a7b7ad9c81 +1710851 00000000014903ae2ac650d9edf236c851e72753ef6c9d6175b6055c8dceb921 +1711011 00000000006c8629c1b2171fddc2435bbc43952e904353e94d52a55e2391e3af +1711155 00000000013b1fdef82a65558b9d1212a59831f1d01b2a9ccf1ffc1ce584a7b4 +1711270 000000000144ae3c66104adf171b7ab69f8938acbdecb2caa58534329aafcb19 +1711384 00000000013bcaeb3bb6e57d99ca23234185d1b017d5d8d1bcfd3ed9bc0b168c +1711535 000000000187be870af8101667916ec70361352c3dd9b0de73224ad3f5a69163 +1711630 000000000034b4a89dee754d6be65e884b9b8c6c847b47b0dc3c664c3f940d96 +1711726 0000000000121b53c781e659837d50f5c50c60eec548867e62fbb50a7f8f0fcc +1711806 0000000000ff3b24ff6488d9f3ab60cb33e2a5e2a5d43a7d672171147d27aaa4 +1711899 00000000007ecfeb299d5bea93795d7f31e36d508a7f1a7cc5b251446d917142 +1711978 00000000017c3998ec8a69fdaf8f7872ee41ccda915e95b1db6bda3f95e18c6c +1712044 00000000009aecca9b6bf02ba0110b83819725004e5f5c12824f2cc628785e31 +1712120 0000000000c41ce860764ef998400bcc1c0bc80400a7a4115d9444c08b6a986f +1712173 000000000177ad65f320fb193a67bff254283ef845db239ff00782d04e650a7d +1712249 0000000000c4dd55ad05e0eec311dc0cb6ceb2271805183c69040eca7997e65c +1712317 0000000001365ff441d6e5c9e349f670ed4cd52674676e285864f35e02f8e3c6 +1712381 000000000172f46af467ab25338086a3206a8d2a6f5a005f0b20b26f530b2467 +1712441 0000000000c5134474567f623b85acb9820757094b2a904002c900e7db283323 +1712520 0000000000f01c22744ab923a62670b50de778e4ce1f4f94481f4a061bdd153d +1712590 00000000002384adb3e9fd0e7ec4e03bb38db489a4547e3edcdb1894981e3084 +1712658 0000000000099b7df7c2b9dd8d7acddad54537a397e06376150373a831705a36 +1712726 00000000008891355864f81416ddd5c9c170918a9707bf6e4a335bbdccd0aebf +1712811 00000000011e00d11599e4af1dd673c531c53267d8aa4ad5ac1068c73b5c3f4a +1712889 0000000001495d74bee2030f816db28b2be431474490dbfebbc421ede90045f1 +1712964 00000000012d01ed17d96151dcda11ba8089205cb528a5efb13226c7dc8336d9 +1713037 00000000007e9c90636b4eb6455e3bda023a305acdb2f6799901a7d3b899e8db +1713111 0000000001919e9ba3b42278860cd5a8c49ac4e4e517224e61a968d2356ef9a7 +1713185 0000000000bb643ac027dcda2074c6d32270b3e5c3a49837d9b71a4f6a3adc18 +1713269 00000000011bbad3c846c2a80923ed00c4ce906a7429dabe7d70907b63161300 +1713349 0000000000ab6d1b3d98f66d294360d435db249197954b137fe84681015a9e67 +1713433 00000000004ebbb846e1295ba5059f5d6928875ed534dde499e6c2e634765e6f +1713515 000000000092ef319b982b43d921e046b80274da9897c27c98449ace09d2d0f4 +1713593 0000000001937319f7f3b414ab98d19abdf2704b22c0d500508b3d3423516404 +1713682 0000000000e9baec1fc2c9c9061da03391b497dd0e9160b1061cb4beea95caa5 +1713768 0000000000d11b728fbad41d48c236f1ef7605235a3adba32666d08734f53fe8 +1713864 00000000003f68bf26c210d73fc4d0157c44f8cdbd7fd59922071b31fa089f5e +1713962 0000000000b80609c5d9fdc19c882292bb505627f48bfcd80a36d7d927fcce5f +1714103 0000000000cdf4c231ef8aa7d5c7408b882791317cd2d87b3d210e4bf20c47b8 +1714223 0000000001f614cf1bd213d0f0c1b710caaa4d2ecb380895ed00fc55946f72a1 +1714292 00000000018c8725d965112ea7c62794ffef9cc82c82658dde6d3bc4cf72b7a0 +1714364 00000000003dad80f3e02900b82cc75998784ef3120b846498a839103eba9e04 +1714422 000000000054931fa677babd7be8c6b0363586eeb6763872d6822d46639e059c +1714484 0000000000c9f98c290b3f99491817a213153bfad2cda15d9a368b6e880f9ab3 +1714552 0000000000347e276e0f53799662d6a2298a80c34f32b62dde1bd9b491e2c40a +1714619 00000000008790444ca55b1a383c41443e3adfb1e6b3be7d424a48fd78c0d2b2 +1714670 0000000001feb396274ad40fed0d3cbb4f7ba983db5c23311de6f49f4fa5ac62 +1714848 0000000000a13bfe4c098e381abd4804daa46811c7bc1e982f462c66edfb2904 +1714911 00000000006f7deb66552bb27d267a146f301591ba7e36631adecbde012c6595 +1714970 0000000000b882912cbe27381d16ea80a447aa12b735abc2d003f81a187b4888 +1715029 0000000000522d5b91050710b77ce778099c1f45638561b41cef0e7029ad7d14 +1715101 0000000000582ba70ef3cca3c2de3d87352d64d792226fde2958efad171c4b30 +1715165 00000000007a70d69331d2fa6cc28a4aaff408c7d5a2bae268182d6d548e6c24 +1715220 000000000150f7d3888c1908e91d7448d2e37130a555163287f63863ec6120a3 +1715304 00000000008666999e618d8c562c0c56bdd9f0889291a3f73f192aebc8706604 +1715376 000000000006f5dde49aa107e2f357c338169ed637c10117930427a63ea8fef5 +1715429 0000000001adb86784e4e4826324f132ffd0e009dd44cadc6c054da1a236e114 +1715482 0000000001542832b94cd58368cedfd8ab24888254f60face563bad71bc2158e +1715532 00000000013d8a49debc82fd915ea933487d41fae8ba7b6d4d042ee40c7f100e +1715569 00000000002a7e7bfef84f82e104d9eaf4cbab0479add1f153a42e084521e949 +1715617 0000000000ebefe57eb7a166d448a9d12b792f118de3eeacb2901d9b5dce9e5e +1715679 0000000000dc5b4c1a5302852cac43ced1997fe8fa769db22305f58d52ea30fe +1715721 0000000001dcf93fddd782ac0698e2f876c372dfc1980fdb2276b82bb8c33c32 +1715777 000000000079f9a5029498ee025bff19c1d3ac89178c9ef298eb21489b12a606 +1715860 000000000030199bd2710e5279972ea9e92c770f0d8528c75e852a7bf283fd03 +1715890 000000000075588c6f02f9c54fbe8f2ceb9114950737e3b7db95bae64ce1fb5d +1715948 00000000014dd6b193c95f12951684b8802e816f83637778f679f920177ab32f +1715997 0000000000a8f5308a5a5e0cd56552dac1cbdf009afc21a5d755eb540e569819 +1716051 0000000000407197f7b1aa9bfd81b3e7d2e0d9e6e85d1cf26d98463358062d51 +1716110 0000000000adb46186b46463392a58d75ec3e7c35b41ea0f1fb253fc21911d81 +1716154 000000000193862e96d7a2b730941d1297f90d6fe7b9279973959ac6c67f47b1 +1716209 0000000000b7ceabb93bbe50ed761093ce6e666ad21ff982c30323dcda9ecbab +1716257 000000000108e6fa67d5f196e48e045dd3413c25245b0ebb3036e06def4bbd15 +1716319 00000000010907b7c009d01b18878455a56dd706d3090b890ed9d7ac71bca183 +1716360 0000000000e64f90699bb273ff3f3149d254ea16ab979ce4eef05e0a399bcb1c +1716416 00000000007238a4b988f6cf03703bc968605263e01c2f8cc5e758a907eba65b +1716460 00000000017020b68f170a995371fb022e0d3c77925d056ea3b287c84fbd9cc1 +1716510 0000000000a0a038425d90ed9967b1f018274c98151c2b7d1b5d3e2c96c48b33 +1716574 0000000001573f470dd663ec1e46eca945bcd5deae6364f8ed78b9ef0716e70a +1716620 0000000001b0cfe39a80ed047b970fc5ba903570ea8df6a4266249707b59d3ea +1716675 0000000000da66d649242c730e6f3e4a4c7b2e7f4fe05af7d9596eacf0853e6f +1716728 0000000000fc185fec80d7524cb56a7cbfc07db562aee014a955b61343e4c889 +1716787 0000000001bdead3b9e76200857711fab6ee1e20969e668e5e81302ccb2badb3 +1716833 000000000091409f62439552977bc66cab6999b6000a1dd8bf86a77fbbe93fa3 +1716900 00000000010aa2eea5ca40b8242705b1fc40605606dd4b364445298e9833ec99 +1716962 00000000000fc643473d7576f7a799fe49d7eb7bcbe49e4f64701bb4f1fa8c5f +1717035 0000000000a4ec616fc1839f4a532bde350f66cd334604a0f553164474faa476 +1717085 00000000006e3d1e53cf95d5e6dd329cb0dd87645b45a4dd02f9b51d187f45d2 +1717150 00000000008650cb408e7b7495b061dcf4d1a3e0563da88479294566b1ae105b +1717209 00000000011f3fe6a9b49078f47d27dded6e3870bdf3d5ac2193da73ed1f2631 +1717279 0000000000b425db7aaaac22f6a25501a62e5e298ff3d9a2a0cb66667dae44c3 +1717352 0000000000d14429d206e4e8613a5b8f3306e9d9cd3651b11481a2229970a528 +1717515 0000000001b754990b1abe387226ed6d4b885fd05850f37b9945a16c5a7a0579 +1717557 000000000255761abfc8309a253e2f56d432f66730e38963377fb5046641a04c +1717618 00000000002870c7ea45cef099641b075f0bad0a9f4453f105c96bfff1487ca8 +1717666 0000000000759f1002d64bb6e2592964d09dd31a9368596b399c109d22f9c955 +1717719 00000000018fa63001e266316ccb86a758357426dd66a351b2f6700886667fbe +1717771 000000000016b8dba221a96334e643636a7949d84feb36354fb701ed4d9aaf3b +1717828 00000000011dde6c144ddef429803ac6e2c7cf181ee8e7e837ca8ea93b5454c2 +1717868 000000000233d3e9c496f634b6823f2e3bfb6e62688ec6181d3d920f2abd4fe8 +1717930 0000000000c275fd36371d92b6c5e4a0214b5a86d577bc609c209a76bd4bd50d +1717986 0000000001d3f725af09616fae0b0e32d0ced7c8d2600816ccc773759b61e3a8 +1718046 0000000001a77abdbb80d22b7d5bef301da5ec61f8e6c3221a73a67eab6768e0 +1718100 0000000000421bc6df0a70280738d98983518983d493dd9e1e01627650e49fc9 +1718167 0000000000fb1f13113514f1f73380eabe91f8b2c018247535724081b958e896 +1718224 0000000000c3982b65bb41ec17d4223dcef6b3241fd17eebe6a1f0d7601d4017 +1718296 0000000000fb561dd0d5cde8575d4f9baeda63659d61ba1ec4405b29c960b763 +1718379 000000000160e5dd3dca80aa2046faf3d04014182c5bd97600790d5c9b2579b6 diff --git a/zebra-consensus/src/checkpoint/test-checkpoints.txt b/zebra-consensus/src/checkpoint/test-checkpoints.txt index fc143c3235f..17b913798b2 100644 --- a/zebra-consensus/src/checkpoint/test-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/test-checkpoints.txt @@ -4623,3 +4623,218 @@ 1848800 001df0cce464f07e0b4a8d5c1384ac6b9ef74c82c3bd23f001f33aab8222b1c9 1849200 002dadaff4858c28d2ec4699e2102bfba761923bf3a3089dc2b0bfcc85976a63 1849600 003be271aefb7ffd082a28992fdd0abab2f2c8a2cf4f1fa0f8abb2b2fc94208c +1850000 0032443d59adac19584a3a0fd352c03456b786eec1356aa4f311c77e2251babe +1850400 0004ab896e8b8e10b2fe3d6599700978fea6cf7b0d2fb3f6c50186bb33e19a87 +1850800 0020f6371ca7e6694c896cb2e0835f4b60a2f82a9671611a6e39fd04d04448ce +1851200 004d5c3a89e00a154fcc7a0cfb071ea2f885b227cb5656914cfe5c52075d5ab6 +1851600 002b923d03f667df85671e4d062fc0de9fbf63ffc242bfeab65050e55078d2ba +1852000 002fa19f33cff7c051d22c6853bb1d326a396bcca4ac8e71de4aa4f070c62419 +1852400 003c151a29c7c24fe836f7059c4f7130064eb67eea0dc94a75e74fb50af0ddb8 +1852800 00a4bcb4dd3e636bbb4597e30b5211f58c4631a65e3de711f8f2aaf2c18cd41d +1853200 00080e85945e098906eae6eb44942d7d99fee591490e65744efbb464b2445146 +1853600 001c468b3639625d7a71d60de970ff32cfa2f2f17d3ed2b35c597eb3f6e7c73f +1854000 0010e433145876cfefe7eca3780fe7c93ec3d15c81448822b8401b9e9526e758 +1854400 0030c83c3e0b38e402e0ce49f6a8365d79f4e6b5aca8308771902be4f8e49047 +1854800 002bf8c0caea9f64b2ec98a32bb0d16b3abc4275b67d68b35200367bbf530ece +1855200 0008191edb3bc5aaa0dbb06d614e2d745f2c816811f188a682b39b4f0672fb88 +1855600 00137162ecb911b99b85b0f8c960ea47baab610e58992a51b4c29c89d12513ad +1856000 00df8fd8ad476d59380759ea739976be476b77f51a7e88f0352d9ac2a1858ea0 +1856400 0003ee4164cf6f7c3b064e52fff2488386b0208d84c8580144826e1b610ca1d4 +1856800 0003f5bc98a9f51c54ec1b36225d981e7b9ad31c183e28777ba3076d73736dc6 +1857200 0023b2f68f62032a57978d39a8ffa7ab0f5dc54cf8bd63f875732af3ce5cac3f +1857600 005993de51ac873183e2a84b6eb777b1cf3c73a4eac681cb20c123e6a505790d +1858000 00080f8149d712e1db36766421b1962c7bc1c111e8e169c64202f7e3346df016 +1858400 001b4702d3434fdff4a7eb6f7ecd48e49fe25ea81d2567a08ceca21bd6a7b6dc +1858800 000de2c452a373bcab1c21d994b2516604df161b56f7f85f81c6c6c87da1296e +1859200 0000659a2993ec0bb269bbbd7f07c34f54680efe6d7d6df13ce4648696d1c32f +1859600 00154c12a0e5e427fe79f8a83137df302afbfac7f606e37dae6c3e87d91170b4 +1860000 001f0e80b12b45edcd6ab7f6a476c590cfd829840fafe973a0a63a2a357807c0 +1860400 0059e584b87577c36534f4ef16e86b85b00de9d9063d821032ad39663abdb6a8 +1860800 000abaefead3d48ee02365fab886c638b5cf6924cc1ececeb5a34bba4ee769c5 +1861200 00000880a0ca7c0d89c81b8e7109aeab33032146a1503e60552b572913ad0965 +1861600 0062572c0a601154e1129b246e76369566f18406e10b5d81072f02c891b1a13e +1862000 000c9033b2038b8b31aff20db00b1c1d7abf624e238953f7f7299c13d188286a +1862400 0060873c75a7defa9d6b3c4d1eb8215ce22079a9d9d2232377dfb817b19b03bd +1862800 0023aee0caa8b2ffdced262a1232c7358e71bc422f8a428c3a1477285c30f7e7 +1863200 0007d4970c2687c92b8580448fbad9432d6b9731557689d15561a0864fa42da0 +1863600 0018543fe27e30d6679a417ec269a79ee4648d798a36109392ea4169c382e94a +1864000 003245bbea97cd60ccbcfae812481ee29be6975d2d9dcf0c3f0dad5d843c6978 +1864400 003906eedacbb912f3436d9be80083e29016d065bd7afc162acfaad4860d7c84 +1864800 009189eeb3887261871d98f9bcb87d3729eb09a0bc0dced384bb7472f716bd4d +1865200 002b2e28e6eed9d949653530ee51c73be55db31840e031ce5be1a598452c6dd8 +1865600 0004f01daa2f4951bd44996552db644b6d0533bf6b6b2c88b9aac9de2f76e686 +1866000 006a77352c5ad9cf0de3e0795db61918f38f6ec7a2c9b46b267c9f15434899be +1866400 0000985169bd2e64bef5480c1391df3cd682eff898e9da9e1d62b050ca1add8f +1866800 002bd92535807d2af1e4537d38466df26a31e1c2b3a5ec9874d2eac72d6e9c38 +1867200 0032907f498e44d5a7618c50820e8e649cf1af41bc52bffbc8f4887b6a39c877 +1867600 001d720ac69124d30af1758dc6dc12875ecbbad01e57390d4dfb88391a9f761e +1868000 000e01eb3101474072a16f78da72d08f44cebf86a21651474b97bbca6f87ac4d +1868400 000a668cf11b95c571a5e9e641652263ef0f1bd90906b0d780885444f5d3bb59 +1868800 01129b0a26009eda0bcc21546054d7e3e84bb9a1f8e9b964840f3adfab6a39e0 +1869200 00786f7ed401bc7dd07eb5590ea5fad214f5454a9bac6d80c4a27158a0d646ba +1869600 0023a25a8139df1c9b41027e7a72a6da5085176601f3c39c5f058b91fb9c846f +1870000 00070cedab1862456d9dbf916d92fd307fc085442979f5790ad1addf7e9012e4 +1870400 00000a200ed73e215cce8b4a23941602e16a5cdc58c6c7a02e5cae56b449b1ad +1870800 0039b00272bea8583b1d97f7706570cc7c1c0812ae8f100430e2849d11d07958 +1871200 001cd5bd35d1852e5eb99baf862ffa7697faf730b06e6c050494b7710855d6e8 +1871600 0006e50f822a45354287fe30c65acef6eb0abcb37dadac0de195b7e3dde054e6 +1872000 002d18eda13dafd9453a7987a1d6f3a5d790d672456022ea33fadd2f6d3e1d37 +1872400 000f637bd6e65f3f23fe7a9945ed28920d7e31a8f9fd3c8437dd70e720c9732f +1872800 001f05cc6001ca80754778b3f85574393718ac91e0d11dd5088c677fa5d6c64d +1873200 006f8cda09cf7ff007a419909206f8209abfeefe50871534c827e402fe51aebb +1873600 0154e88733d494804fdcfcbfb8d7f41b7a1756da311fdc8dedda912d4c3d3be9 +1874000 0046adcb21d2b670f71e3648ad92a42f612b2ae8e5b230a0cde3abdae0c6cf18 +1874400 0000667a871a48b07d6299c11db14158746937fb3cadb0cba51967c0fa6876dc +1874800 003427d8c34b89281515061395e7d7df3c66572f534770ad6c8ae8fd22824420 +1875200 001fb4ab2c1a0ed2761ae02ffe7d1e87639d63bd6c88cfb9b1b3dbf3b8f8e9a7 +1875600 004b7f1d805bf5ce8d3918812c7de827617e6413141e444d30d9fd3a67869eeb +1876000 00931efc86d75f54025034f76779ed3a8525d6f8d9d3090c75346184f73cd06e +1876400 0033edc6a210dbdb6b0a378c89403dc3243e4f774aea18950678701db3aa42b1 +1876800 0032e9702ffed8363dc463fab6d98b7df47ada9d6c66c320664e65fdca5c77e9 +1877200 001ffad803b4d8f61e02a6a4f8c5cdc20db0930d9fc0efbfdb511a5f824a2186 +1877600 007d97176aa304031ce50c844de8a3cbd2330d29fef25a8c49994fb3acb23b29 +1878000 003d2902157d3d0c5202de457542b40ea42ebdcc6f31b32b26f5fcf132dfe94e +1878400 00293727d92c79c00228cf4c5d03b987831e88e04ab0470bfafbf60afc16f212 +1878800 0000b870aeec0f7b8e383e1e0f41118e30b6960ef2a05218ccf71a236b2bf5ff +1879200 001006d842177be5319acce857b45a4e059229e1ec8341c176bb4e58c1fe2e59 +1879600 00294f23d7a65cba63da072623088d241a7738305f3d730501202cfe69cff37f +1880000 05c0a0996bb351bb742865a8698075deea9e9bee3e0210de19a5b5c8636a2cc7 +1880400 00c1c324e85b272d3c17f75ed633836b42c52a6372ea96ae3c19377c077c5d34 +1880800 000d916b8db4b9305c63b9e4371254fd4431ad605597a1d6d5f6d9b5d7a188f9 +1881200 002a535caa9418cd2744b65b4dbd767d5760e1b4b6e952f67652ba47550d832a +1881600 00379551d03ea056341d6477e43e82c0d218ac30d8509755e86d4da668b4c2fb +1882000 0006d87ef28b8dd8bec98c2e9fb23b269195e81bf8c71e1c12722ab8fd191af1 +1882400 005a37296dbd260e6ac0836a36701e6b05a4c8653dad9bec6dcf85aafe6f1c8f +1882800 0005a52782b3f14a86c2d8fc04003e9f1d7fe67ac8075a06fcd01ceda6b2e593 +1883200 0014763ab4235b61aa2783cfb5e0aeb4e515eefc7712b7e1cd1a56f5b8304b07 +1883600 00250966c62357bcbad8517b3575fc8d12d48b8af32306b636cdb8e809ba71d3 +1884000 0028f8357f7a462d9d1b757afd95465629fae7f3b9fb54fbd66d076142ccf802 +1884400 000c2c5356dffa2816f89bd7c399d8d38b450ddf72a1e4a3db0a5251ca93e226 +1884800 000ff77e955816b05ec38c003f90b8252da0ef164c42a5eb9f9260d5d8b2a46d +1885200 00398c14e8b5c3b78309a7aaaec2dc60afd05f75111c2db6596d3d61f1a6fecd +1885600 002e627f558a660243dbea544b8641d4b1c1091c4f7a092be569bcad1c057889 +1886000 0036f7f215f41f52d655aa95251f4981b4686f67753afbf5454563062e80e4b1 +1886400 001661f21e095b7bb31f43d6ee9019b94e5d0f04a0e0491a29010521a92b114d +1886800 001311cca3688b728ce90f5119056a3e15769594a081e0d12c5ab7a024ea3f29 +1887200 0018a5d0c8c6ef108ffc8ede7d153da252061fd5e072bcb4f1d33631e00ee34a +1887600 00d8b8f03c620b88fe6b50a97f8ec62e9cdc9fc1239b2db2821304b20b623792 +1888000 00352fe118df3aa58dcd09aeab5c6617b82f7b6887feb56ada2e932d12f78aee +1888400 003796a74def32858a6cfc9096e5c5a2388f951255be4905e08c6e452fee67a5 +1888800 003201f5176cd4cf154963c44fb052d774db4abc975abf4a4b96eec5b15ae78a +1889200 0006d4da83b906db04a57fd9e5a4ccd2350671239a120e5eb23bee63bb270ecf +1889600 0006f50e9ed479f02b323001dd3d14101438bdadfdcf4c490e9e35a5380a6c8e +1890000 0008fde8fcb0f923662c9c47e7510e6761080bbb42279c17494449381c656c78 +1890400 00351ab3aa7d5505c3441f08fc326a1e80ce4917019d1d2864202c37f49e4edb +1890800 0008482f9b06a3ab2e5e850f794a8b1bb28d919c95f1c6a9e316635e500c2f29 +1891200 00274598cdba63cc307451feb87510754fb8fa5208142f9d65a94d1721a96114 +1891600 003195bdf054bb721e9fdba7a291ae64cc3dbb8c2841ff7145b8d15074c17993 +1892000 002c73da75ba6f4f346bb7b93a5b3ef97d82dafff72a8542484b47bc7b04fad9 +1892400 06e745aa593c4d0a43e46c1ae44a45ec03143e7db68b387b7645ac69ea5c7eb5 +1892800 00ec4309f16dae8005ffb6cdffc5efc22dfc5bafc4761e57b3dc1e22fefdc805 +1893200 002ea994e812d19e21ebf5bb7110e3e1b2c8376f277e4ff1fd98992ef52c936b +1893600 00190c4878516b50264202d2a554c20cb7d9335ac665506081272ce47575351f +1894000 001087535f5034b3dcd76c47df7c1c2cdd9bd736f543882f226dc7116f02c089 +1894400 0000f0cde7b1a92c4f3a5ead94ba4898b742f699a69920fe582097ab6627acdc +1894800 001cc0a2afb4f56a4364901e0a0dda448c3631fa6883e18be62bc95bbde0c1c1 +1895200 0005076c835b7e33a80d725c365e8ebf59ca98d8602bce81beb2d74f26bbda29 +1895600 0033ce85e9f052ef25d08dc1edaf3e8c808c5014962fa7602b677e55a8ba3f55 +1896000 002e1e30194c1eb9f5c8131536e9b3a18735d0dcc5c162fe19a598f59f661841 +1896400 00025ac9db8da78d296e71a6833e69ad15d7d634bcf0e9250e3a0ebdccded4b4 +1896800 0015513a35e85362a5ff16d926bc673f27e63e0ba3181817ed48d1ee9b371c3e +1897200 00193914c0f1c6b37d5b73aa8c1cb64da43cba45e21968700feceef56ce079fc +1897600 0017a1135e03f5e9f166f726e794adfa70b29c9104d3978682551eef386ae552 +1898000 009bda6b7454a8623d7210154ce3e01cc1772c5056f3a0076bf9e8c744b2190d +1898400 0002e2343646dd1057c0b2bc0f0b0f7b16ebebb616bf93ed0281be859b882318 +1898800 003e832d9d2c326d30c90e7ad1a5d9f4a54478b41be39f8042a8d2364d8fe0c1 +1899200 003554cfade07d87e6813baed4d1b55fa108f0b174fcb915f1ae413b8c6dd47e +1899600 001852d8a552ee4c43ef737a5d371732e15d84c7932986ba237a7fdbe809b8bc +1900000 0064d43b7ad3e2814f2359606cf045fab6284ad8d8458564dd15400e2709cf41 +1900400 000d1df2d62f4929844ff431cfd92548733402a6273fe665d665a0b5927b4027 +1900800 00037eb7b8804787062c3a1579ba1c24d9a09f90946427d45c14c70d57345197 +1901200 0029f0f5e93b111f178f33598f90de661d8ef8c234514e21f30bff7c1d354bfb +1901600 000008cf467cdfbdc57371df103477e64bd93560401aa95d9e9f26654fbb0e0a +1902000 003abb747d5bb709b6385b8683fdcb5f770bcc3ecda19c06a03381400e0fcd74 +1902400 00875b649dbdd53a61dc62914d2b311b4697ac7cd52f521f795fe56f7ac0c91d +1902800 0012c6ad2d4d394d0ed422b38bcb44e501460d1c4fea0c489eb86b3937611f48 +1903200 000d50eced194b99f880db767ed421faad2dc50a2d14680f025d5eaa14271066 +1903600 003ec29d1c3ee9a44788c08d29ae6c6f073a189756b5d15c9a7fd1df546a3d81 +1904000 002c232e3256e6cdbe1d6cc934f6221969347f93808acf4603c490dc97c45bfb +1904400 07ede42f862373ed8753f396a1f207119cd1bc14449bca586086c037fd59c951 +1904800 00546668ea48969ea7e4d690b7a5f3a83cad37da63efe2d9b2fa27d7779dd9e8 +1905200 003887dcbf2f92b9db61ab2dcf8afde0e72feecbef0a79d77dc97c5eb6167b88 +1905600 0018114f567220f1514a09dd84dcee5667a1a15844c2c45917532e977c0c3f0a +1906000 002ec2075a325c538d8cdd127c0ac52449089bead28e1c39a9d0c9e4e72ff65e +1906400 000f8deda01cd7b62ea2afcc15315489d2f7b5e7be1058c36b7d27355cbf54d6 +1906800 0000d940a47538cf886622505160a5cc84ec42e43753ed76174b1a2ecae3f7fe +1907200 005db38001738ef8d6c0090eaddae2453237ef6b7b51866baca5e9c475243edb +1907600 0019cde5c723a4090fb26719b88249946de8788797588f8245580b180ad835e5 +1908000 003dec85df6151d8e3160f8ff42328deeb096a647613fbc1609c7c18d3007129 +1908400 00357e71088141b76eadc9033808ba23db61b43bd3c86f0e68e7646d03f139f5 +1908800 009cd84b38f23c53cd60754f753546bf86014e103bd6238716e8adb88ea7f2be +1909200 00280db7a41d07615dd007bcee01b0f9cc63405474620352f36271825d4d70ae +1909600 0029ce23a89bab77bc6c8854a360f77189ddf886e17e2ddd31fd97327e1ddbc7 +1910000 000df7ae91cdbc4df9a738ea04df4c8efe479faee8e3b488884f9eecba4c837e +1910400 000f2260e33cfc75304be0416537cc909383db26f57feb6b87c9a1720eec1f02 +1910800 000147612dc9b9831d8767ddf4e99813114f4d0f82f76b8adcb4fe593bbef5d1 +1911200 00065dc47cc3f7601bfb344c35b5b812338af6184790be6b5bf3b7206a9744a6 +1911600 0019a3ca728b264789cab7dd60f5d946463d2726216f10372e44b8bb692946f9 +1912000 005614aea8265ccc74995590aeba8fa7c110fd02d50dea570a9038f87fbb8202 +1912400 0028658ee0fe063fcf08af5aa52716cd90e21649d1690fb09f8a178dd8d756bc +1912800 0010786cb72743b6406d4b78b8496af016b4c7e35a495e69734b5d97ade35293 +1913200 003b1b45bed42c97cde2453fcd471c1c87bf016a50d09b193660a38a9b7e866c +1913600 0064bb73dc148381748ff29f36661327a1eca6cf12361dd76e41cfa259f1d3d7 +1914000 0022af9bf4c5f7a8bee70b06e00ce1a7c1caea03ab5bd1bded51eab9ea9deea6 +1914400 0005611c412bfe520951558ed48a2764fac66d798f8522738033132ea7ef42bc +1914800 001ac3aa0a8a398a5bccef47624ccd36210e0dcc8a3d0c5d5bc25e1586e310b5 +1915200 0009f23de7662c34c69e0b5efa66f7cd169f942ba83cb94cfda57d386fa60d65 +1915600 002d827beaad04ff4b9baafefc42464c56b88252031a1eeec610116e0527b863 +1916000 006019b009bbc8da9b2131bbeef59ded7c2f240ebb28bc9fae300624c61471a8 +1916400 0002debcb9d9c513ce22dd8d26a528cddd0cd5a196aceaf3733ad8286bbf3a91 +1916800 0095f28b3d8a471d81260611e368675ad8436ff9d8affbc2f2ea4a56f2789de8 +1917200 005b92c3ca584eb530fc7a35ca12656d140d9fe5fa01977a48477ef0ebc51049 +1917600 003e1fd4ff6e584eb3fb5479914470d4c478d6b3e5a2e2f922c5efe45aad8f35 +1918000 020eb6873f65d20b3b8e4ef15a314f8537bc2d79f4c3b27fb1929a2a883fe170 +1918400 005d8efe80af173610126eede821f6e9a1a98d530c7aca4a235efa2dba76c793 +1918800 000c8278aa085e6b689955e2543a2da2faa1afc0b31b17b03f2a5f906a94d145 +1919200 00709f94b1fb38fc563f30c6646018a31afd65f8ee135008f9e26ec57de00c2b +1919600 000ad44ba4018c8959810c5c1ad95aff64e6b04638a6a3ffa5b33dcf2bd70aef +1920000 0077dbbb8a05aa1defd058ef156e2dabb92d3779daa074e4f618765f091f69ab +1920400 00414b2e06f78d21721a668c0ac9fafa02cd3807c9c6eadf83b20f1cebc04c06 +1920800 007eca7c956bfbcc001728a000a0fc7b22c5d4f7e8d8c58cb60def9de5d883a9 +1921200 006fa75149f97f723ca1fb92b007635173c1c51f675fc701d527dbf724e55e09 +1921600 0042d8e29d43358de4ad66941f6a5a0ef42f5ee58bdfbc9ba862aadd0ccea338 +1922000 000d878e1bd915218f4e135b5448d473af87a204d71478de43cf62e1d660cee0 +1922400 00b131dcdd7060b81eb7a8b59684c741a083177a30bf263bda57607bf69a5a82 +1922800 00300abf1cc56e04bcbf346dac2f64ac5f9bb91f20f1f9a73e2d140114c67a61 +1923200 0040961ef1c41dc9c916e379bdb31f19283b83e86f0ee79de9e37a9c50389b29 +1923600 009af3b9fa9a59c31408dcc1ab629b06878b84ed06452578f9c6c355554f105a +1924000 009fd0589c1faa1cfde42c825d122c090f359a49388f495fd084e9cf1e37e182 +1924400 00a44dc3aab56abe678db62a7fa409ab8c0fd44a25783671f05a4707854de0c7 +1924800 003af3af0725383195c3bf96231a559b73a5f0714b945b3653e170e4cec15c35 +1925200 002298a573502f316e81d5e48d465cc5ad365f7e7ee63e4be661ffb88bba9839 +1925600 005be41ba0efd4e5092525f11355be923f095524dbe51be893a817bb12d1de1a +1926000 003d0762bfba4106431b139818d1e64a5f6f7830b09bca138ff2f10bf4f2b27a +1926400 001397c27d799bb117fda9c9e0aa8c820ad7ca30b07a789d00dc79451778fa8a +1926800 0067eb09b93c90f53b4eddbc3139e56aa18c4fd3e30b5929f3f2204766f3aefd +1927200 00ebd130fc9677b2f7e3bf5f87f9c3bfc92d0b4f14ddf4f4636529755b743ec3 +1927600 003fe4cd33416517f4e104c76b3806b8d1624cf0da3951eacdad782cbfc26526 +1928000 01b731da3383ebb77321f0d869e3dc264ea0b42005d7b6fe270e6bb82417f374 +1928400 000000f46f9b7000b576a4f8c3cfeebeb981d0cf8c1415284b869ba4c8d40536 +1928800 00000127c52ced03972ec382740e471edfc4c249dddf94a2ca3a0ae628eaec76 +1929200 000a1c30c750042234ea4dd91c9c623dad17881b00afc67c42699a503896d9ef +1929600 0055d1f688aac5d11dd7334ec12c5c08b8354add66b50066d123c3d443408db5 +1930000 00d65a0c3a61b6fb77d74c5a8b59cfd24efb72d38b31b44d9c0fde373b756ca5 +1930400 0026a4b3b0e08e02358e59958e695c317c5b476e4ec4ecf03bd6bbddfa494a61 +1930800 0059189701136e8fda33282e20682b4c6b203073123e0a7c86e921dea76ef7e9 +1931200 003b9ddaa670d606e64d5ba89e7b9c637e68dd037cc03cffc41612d1f69f5ab6 +1931600 0060efec98f2f5a5e312327faa6863aeb3ec0dc66a3c921ce8da11e65f193c47 +1932000 00e9cdb9e54ec59355c246c81d9ffd413376bfe57b698ce267ed16a4e145114d +1932400 000d1b1f8affd5ba12ff7a4f70f2cecde6660dcca2356d0130e0941de1070bf4 +1932800 002cc7f1e553b594227327f970f4455a2279f00fad2bb7f81d673bb3f6de5f57 +1933200 0000179623886be29e966ce56cbdf9db9d70eb1428b6df18b78d39efa28610ac +1933600 00000a1ff7e7e23b0fee767a1b5553b8b6af0e00a43fc874c1ccc242c49b4266 +1934000 0000002f51ce89518fde804b449687211d3e25d4032d01c2ff84fd461a0bb961 +1934400 00004e9088cdc6fcf6b5118d8fea80dee50aacb5b12d85a62b8838fef40f8b60 +1934800 000002e0aa4094d2d92c406e5846a25f07b49e4be8d55e485ebad065ea286d70 +1935200 0000167f6463dbfe310f44e995b78ab8431a715c67f23a92a1681ce6a8909d61 +1935600 00001a1d523472b74b320de5f16af61e70b7fa19a54b1a8c16c97653186d1198 From ad524f2589c115c88a268872d28c2227fd97f448 Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 29 Jun 2022 08:15:01 +1000 Subject: [PATCH 42/91] doc(features): Document optional `zebrad` features and clean up README (#4680) * Document configs that need compile-time features * Document features in zebrad/src/lib.rs * Link to the feature list from the README * Remove some outdated README info * Move some detailed README info to the `zebrad` crate docs --- README.md | 68 +++++++++++---------------------- zebrad/src/config.rs | 8 ++++ zebrad/src/lib.rs | 90 +++++++++++++++++++++++++++++++++++++++----- 3 files changed, 111 insertions(+), 55 deletions(-) diff --git a/README.md b/README.md index 25b4d08017a..2fb665d94a1 100644 --- a/README.md +++ b/README.md @@ -25,51 +25,25 @@ consensus-compatible implementation of a Zcash node, currently under development. It can be used to join the Zcash peer-to-peer network, which helps keeping Zcash working by validating and broadcasting transactions, and maintaining the Zcash blockchain state in a distributed manner. + +[Zcash](https://doc.zebra.zfnd.org/zebrad/index.html#about-zcash) +is a cryptocurrency designed to preserve the user's privacy. +If you just want to send and receive Zcash then you don't need to use Zebra +directly. You can download a Zcash wallet application which will handle that +for you. + Please [join us on Discord](https://discord.gg/na6QZNd) if you'd like to find out more or get involved! -Zcash is a cryptocurrency designed to preserve the user's privacy. Like most -cryptocurrencies, it works by a collection of software nodes run by members of -the Zcash community or any other interested parties. The nodes talk to each -other in peer-to-peer fashion in order to maintain the state of the Zcash -blockchain. They also communicate with miners who create new blocks. When a -Zcash user sends Zcash, their wallet broadcasts transactions to these nodes -which will eventually reach miners, and the mined transaction will then go -through Zcash nodes until they reach the recipient's wallet which will report -the received Zcash to the recipient. - -The original Zcash node is named `zcashd` and is developed by the Electric Coin -Company as a fork of the original Bitcoin node. Zebra, on the other hand, is -an independent Zcash node implementation developed from scratch. Since they -implement the same protocol, `zcashd` and Zebra nodes can communicate with each -other and maintain the Zcash network together. +### Using Zebra -If you just want to send and receive Zcash then you don't need to use Zebra -directly. You can download a Zcash wallet application which will handle that -for you. (Eventually, Zebra can be used by wallets to implement their -functionality.) You would want to run Zebra if you want to contribute to the +You would want to run Zebra if you want to contribute to the Zcash network: the more nodes are run, the more reliable the network will be in terms of speed and resistance to denial of service attacks, for example. -These are some of the advantages or benefits of Zebra: - -- Better performance: since it was implemented from scratch in an async, parallelized way, Zebra - is currently faster than `zcashd`. -- Better security: since it is developed in a memory-safe language (Rust), Zebra - is less likely to be affected by memory-safety and correctness security bugs that - could compromise the environment where it is run. -- Better governance: with a new node deployment, there will be more developers - who can implement different features for the Zcash network. -- Dev accessibility: supports more developers, which gives new developers - options for contributing to Zcash protocol development. -- Runtime safety: with an independent implementation, the detection of consensus bugs - can happen quicker, reducing the risk of consensus splits. -- Spec safety: with several node implementations, it is much easier to notice - bugs and ambiguity in protocol specification. -- User options: different nodes present different features and tradeoffs for - users to decide on their preferred options. -- Additional contexts: wider target deployments for people to use a consensus - node in more contexts e.g. mobile, wasm, etc. +Zebra aims to be +[faster, more secure, and more easily extensible](https://doc.zebra.zfnd.org/zebrad/index.html#zebra-advantages) +than other Zcash implementations. ## Beta Releases @@ -78,11 +52,7 @@ Every few weeks, we release a new Zebra beta [release](https://github.com/ZcashF Zebra's network stack is interoperable with `zcashd`, and Zebra implements all the features required to reach Zcash network consensus. -The goals of the beta release series are for Zebra to act as a fully validating Zcash node, -for all active consensus rules as of NU5 activation. - Currently, Zebra validates all of the Zcash consensus rules for the NU5 network upgrade. -(As of the second NU5 activation on testnet.) But it may not validate any: - Undocumented rules derived from Bitcoin @@ -106,7 +76,7 @@ for your platform: 2. Install Zebra's build dependencies: - **libclang:** the `libclang`, `libclang-dev`, `llvm`, or `llvm-dev` packages, depending on your package manager - **clang** or another C++ compiler: `g++`, `Xcode`, or `MSVC` - 3. Run `cargo install --locked --git https://github.com/ZcashFoundation/zebra --tag v1.0.0-beta.11 zebrad` +3. Run `cargo install --locked --git https://github.com/ZcashFoundation/zebra --tag v1.0.0-beta.11 zebrad` 4. Run `zebrad start` (see [Running Zebra](https://zebra.zfnd.org/user/run.html) for more information) If you're interested in testing out `zebrad` please feel free, but keep in mind @@ -114,6 +84,15 @@ that there is a lot of key functionality still missing. For more detailed instructions, refer to the [documentation](https://zebra.zfnd.org/user/install.html). +### Optional Features + +For performance reasons, some debugging and monitoring features are disabled in release builds. + +You can [enable these features](https://doc.zebra.zfnd.org/zebrad/index.html#zebra-feature-flags) using: +```sh +cargo install --features= ... +``` + ### System Requirements The recommended requirements for compiling and running `zebrad` are: @@ -206,8 +185,6 @@ So Zebra's state should always be valid, unless your OS or disk hardware is corr ## Known Issues There are a few bugs in Zebra that we're still working on fixing: -- [Old state versions are not deleted #1213](https://github.com/ZcashFoundation/zebra/issues/1213) - - When Zebra changes its state format, it does not delete the old state directory. You can delete old state directories if you need the space. - [No Windows support #3801](https://github.com/ZcashFoundation/zebra/issues/3801) - We used to test with Windows Server 2019, but not anymore; see issue for details @@ -217,7 +194,6 @@ Features: - Wallet functionality Performance and Reliability: -- Reliable syncing on Testnet - Reliable syncing under poor network conditions - Additional batch verification - Performance tuning diff --git a/zebrad/src/config.rs b/zebrad/src/config.rs index 3e2cad9c395..4dfb7759992 100644 --- a/zebrad/src/config.rs +++ b/zebrad/src/config.rs @@ -95,11 +95,15 @@ pub struct TracingSection { /// The address used for an ad-hoc RPC endpoint allowing dynamic control of the tracing filter. /// + /// Install Zebra using `cargo install --features=filter-reload` to enable this config. + /// /// If this is set to None, the endpoint is disabled. pub endpoint_addr: Option, /// Controls whether to write a flamegraph of tracing spans. /// + /// Install Zebra using `cargo install --features=flamegraph` to enable this config. + /// /// If this is set to None, flamegraphs are disabled. Otherwise, it specifies /// an output file path, as described below. /// @@ -125,6 +129,8 @@ pub struct TracingSection { /// The use_journald flag sends tracing events to systemd-journald, on Linux /// distributions that use systemd. + /// + /// Install Zebra using `cargo install --features=journald` to enable this config. pub use_journald: bool, } @@ -147,6 +153,8 @@ impl Default for TracingSection { pub struct MetricsSection { /// The address used for the Prometheus metrics endpoint. /// + /// Install Zebra using `cargo install --features=prometheus` to enable this config. + /// /// The endpoint is disabled if this is set to `None`. pub endpoint_addr: Option, } diff --git a/zebrad/src/lib.rs b/zebrad/src/lib.rs index 2ce4ed1e6db..c3fab383ba6 100644 --- a/zebrad/src/lib.rs +++ b/zebrad/src/lib.rs @@ -1,18 +1,90 @@ //! ![Zebra logotype](https://zfnd.org/wp-content/uploads/2022/03/zebra-logotype.png) //! -//! Hello! I am Zebra, an ongoing Rust implementation of a Zcash node. +//! Zebra is a Zcash node written in Rust. //! -//! Zebra is a work in progress. It is developed as a collection of `zebra-*` -//! libraries implementing the different components of a Zcash node (networking, -//! chain structures, consensus rules, etc), and a `zebrad` binary which uses them. -//! -//! Most of our work so far has gone into `zebra-network`, building a new -//! networking stack for Zcash, and `zebra-chain`, building foundational data -//! structures. +//! The `zebrad` binary uses a collection of `zebra-*` crates, +//! which implement the different components of a Zcash node +//! (networking, chain structures, validation, rpc, etc). //! //! [Rendered docs from the `main` branch](https://doc.zebra.zfnd.org). -//! //! [Join us on the Zcash Foundation Engineering Discord](https://discord.gg/na6QZNd). +//! +//! ## About Zcash +//! +//! Zcash is a cryptocurrency designed to preserve the user's privacy. Like most +//! cryptocurrencies, it works by a collection of software nodes run by members of +//! the Zcash community or any other interested parties. The nodes talk to each +//! other in peer-to-peer fashion in order to maintain the state of the Zcash +//! blockchain. They also communicate with miners who create new blocks. When a +//! Zcash user sends Zcash, their wallet broadcasts transactions to these nodes +//! which will eventually reach miners, and the mined transaction will then go +//! through Zcash nodes until they reach the recipient's wallet which will report +//! the received Zcash to the recipient. +//! +//! ## Alternative Implementations +//! +//! The original Zcash node is named `zcashd` and is developed by the Electric Coin +//! Company as a fork of the original Bitcoin node. Zebra, on the other hand, is +//! an independent Zcash node implementation developed from scratch. Since they +//! implement the same protocol, `zcashd` and Zebra nodes can communicate with each +//! other and maintain the Zcash network together. +//! +//! ## Zebra Advantages +//! +//! These are some of the advantages or benefits of Zebra: +//! +//! - Better performance: since it was implemented from scratch in an async, parallelized way, Zebra +//! is currently faster than `zcashd`. +//! - Better security: since it is developed in a memory-safe language (Rust), Zebra +//! is less likely to be affected by memory-safety and correctness security bugs that +//! could compromise the environment where it is run. +//! - Better governance: with a new node deployment, there will be more developers +//! who can implement different features for the Zcash network. +//! - Dev accessibility: supports more developers, which gives new developers +//! options for contributing to Zcash protocol development. +//! - Runtime safety: with an independent implementation, the detection of consensus bugs +//! can happen quicker, reducing the risk of consensus splits. +//! - Spec safety: with several node implementations, it is much easier to notice +//! bugs and ambiguity in protocol specification. +//! - User options: different nodes present different features and tradeoffs for +//! users to decide on their preferred options. +//! - Additional contexts: wider target deployments for people to use a consensus +//! node in more contexts e.g. mobile, wasm, etc. +//! +//! ## Zebra Feature Flags +//! +//! The following `zebrad` feature flags are available at compile time: +//! +//! ### Metrics +//! +//! * `prometheus`: export metrics to prometheus. +//! +//! Read the [metrics](https://zebra.zfnd.org/user/metrics.html) section of the book +//! for more details. +//! +//! ### Tracing +//! +//! Sending traces to different subscribers: +//! * `journald`: send tracing spans and events to `systemd-journald`. +//! * `sentry`: send crash and panic events to sentry.io. +//! * `flamegraph`: generate a flamegraph of tracing spans. +//! +//! Changing the traces that are collected: +//! * `filter-reload`: dynamically reload tracing filters at runtime. +//! * `error-debug`: enable extra debugging in release builds. +//! * `tokio-console`: enable tokio's `console-subscriber`. +//! * A set of features that [skip verbose tracing]. +//! The default features ignore `debug` and `trace` logs in release builds. +//! +//! Read the [tracing](https://zebra.zfnd.org/user/tracing.html) section of the book +//! for more details. +//! +//! [ignore verbose tracing]: https://docs.rs/tracing/0.1.35/tracing/level_filters/index.html#compile-time-filters +//! +//! ### Testing +//! +//! * `proptest-impl`: enable randomised test data generation. +//! * `lightwalletd-grpc-tests`: enable Zebra JSON-RPC tests that query `lightwalletd` using gRPC. #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] From 9e277ed91a4b95cefae4da8aa93cedb7a980e8d9 Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 30 Jun 2022 03:14:16 +1000 Subject: [PATCH 43/91] Release Zebra 1.0.0-beta.12 (#4714) * Update Zebra to 1.0.0-beta.12 * Simplify tower-* versioning by updating both to 0.2.27 * Simplify version search and replace by removing an old version from the docs * Update Cargo.lock * Add draft changelog as of PR #4693 * Update CHANGELOG to PR #4680 --- CHANGELOG.md | 114 +++++++++++++++++++++++++++++++-- Cargo.lock | 24 +++---- README.md | 5 +- book/src/user/install.md | 2 +- book/src/user/lightwalletd.md | 2 +- tower-batch/Cargo.toml | 2 +- tower-fallback/Cargo.toml | 2 +- zebra-chain/Cargo.toml | 2 +- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebra-network/src/constants.rs | 2 +- zebra-node-services/Cargo.toml | 2 +- zebra-rpc/Cargo.toml | 2 +- zebra-script/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 18 files changed, 137 insertions(+), 36 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a50b3fc6c2..2e0c855bd38 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,22 +4,126 @@ All notable changes to Zebra are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org). -## Next Release: [Zebra 1.0.0-beta.12](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0-beta.12) - 2022-06-?? -This release contains some breaking changes which improve config usability, and increase compilation speed. +## Next Release (Draft) + +(Draft notes for the next release can be added here.) + + +## [Zebra 1.0.0-beta.12](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0-beta.12) - 2022-06-29 + +This release improves Zebra's Orchard proof verification performance and sync performance. +Zebra prefers to connect to peers on the canonical Zcash ports. + +This release also contains some breaking changes which: +- improve usability, and +- make Zebra compile faster. ### Breaking Changes +#### Cache Deletion + +- Zebra deletes unused cached state directories in `/zebra` (#4586) + These caches only contain public chain data, so it is safe to delete them. + #### Compile-Time Features -- Most of Zebra's [tracing](https://github.com/ZcashFoundation/zebra/blob/main/book/src/user/tracing.md) and [metrics](https://github.com/ZcashFoundation/zebra/blob/main/book/src/user/metrics.md) features are off by default (#4539) +- Most of Zebra's [tracing](https://github.com/ZcashFoundation/zebra/blob/main/book/src/user/tracing.md) + and [metrics](https://github.com/ZcashFoundation/zebra/blob/main/book/src/user/metrics.md) features + are off by default at compile time (#4539, #4680) - The `enable-sentry` feature has been renamed to `sentry` (#4623) #### Config -- Times in `zebrad.config` change from seconds/nanoseconds to a [human-readable format](https://docs.rs/humantime/latest/humantime/) (#4587) +- Times in `zebrad.config` change from seconds/nanoseconds to a + [human-readable format](https://docs.rs/humantime/latest/humantime/). + Remove times in the old format, or use `zebrad generate` to create a new config. (#4587) + +### Added + +#### Diagnostics + +- Show the current network upgrade in progress logs (#4694) +- Add some missing tracing spans (#4660) +- Add tokio-console support to zebrad (#4519, #4641) +- Add `fallible_impl_from` clippy lint (#4609) +- Add `unwrap_in_result` clippy lint (#4667) + +#### Testing + +- Check that old `zebrad.toml` configs can be parsed by the latest version (#4676) +- Test `cargo doc` warnings and errors (#4635, #4654) +- Document how to run full sync and lightwalletd tests (#4523) + +#### Continuous Integration + +- Add `beta` rust to CI (#4637, #4668) +- Build each Zebra crate individually (#4640) + +### Changed + +#### Chain Sync + +- Update mainnet and testnet checkpoint hashes (#4708) + +#### Diagnostics + +- Update transaction verification dashboard to show all shielded pool sigs, proofs, nullifiers (#4585) + +#### Testing + +- Add an identifiable suffix to zcash-rpc-diff temp directories (#4577) + +#### Dependencies + +- Manage`cargo-mdbook` as a GitHub action (#4636) + +#### Continuous Integration + +- Automatically delete old GCP resources (#4598) + +#### Documentation + +- Improve the release checklist (#4568, #4595) + +### Removed + +#### Continuous Integration + +- Remove redundant build-chain-no-features job (#4656) + +### Fixed + +#### Performance + +- Upgrade `halo2` and related dependencies to improve proof verification speed (#4699) +- Change default sync config to improve reliability (#4662, #4670, #4679) +- Fix a lookahead config panic (#4662) + +#### Continuous Integration + +- Actually create a cached state image after running a sync (#4669) +- Split `docker run` into launch, `logs`, and `wait`, to avoid GitHub job timeouts (#4675, #4690) +- Ignore lightwalletd test hangs for now (#4663) +- Disable `zcash_rpc_conflict` test on macOS (#4614) +- Use `latest` lightwalletd image for Zebra's Dockerfile (#4599) +- Increase lightwalletd timeout, remove testnet tests (#4584) + +#### Documentation + +- Fix various `cargo doc` warnings (#4561, #4611, #4627) +- Clarify how Zebra and `zcashd` interact in `README.md` (#4570) +- Improve `lightwalletd` tutorial (#4566) +- Simplify README and link to detailed documentation (#4680) + +#### Diagnostics + +- Resolve some lifetime and reference lints (#4578) + +### Security + +- When connecting to peers, ignore invalid ports, and prefer canonical ports (#4564) -TODO: insert changelog here ## [Zebra 1.0.0-beta.11](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0-beta.11) - 2022-06-03 diff --git a/Cargo.lock b/Cargo.lock index fe6b474da25..a645677a406 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5439,7 +5439,7 @@ dependencies = [ [[package]] name = "tower-batch" -version = "0.2.26" +version = "0.2.27" dependencies = [ "color-eyre", "ed25519-zebra", @@ -5459,7 +5459,7 @@ dependencies = [ [[package]] name = "tower-fallback" -version = "0.2.22" +version = "0.2.27" dependencies = [ "futures-core", "pin-project 0.4.29", @@ -6283,7 +6283,7 @@ dependencies = [ [[package]] name = "zebra-chain" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" dependencies = [ "aes", "bech32", @@ -6344,7 +6344,7 @@ version = "1.0.0-beta.0" [[package]] name = "zebra-consensus" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" dependencies = [ "bellman", "blake2b_simd 1.0.0", @@ -6386,7 +6386,7 @@ dependencies = [ [[package]] name = "zebra-network" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" dependencies = [ "arti-client", "bitflags", @@ -6423,14 +6423,14 @@ dependencies = [ [[package]] name = "zebra-node-services" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" dependencies = [ "zebra-chain", ] [[package]] name = "zebra-rpc" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" dependencies = [ "chrono", "futures", @@ -6459,7 +6459,7 @@ dependencies = [ [[package]] name = "zebra-script" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" dependencies = [ "displaydoc", "hex", @@ -6472,7 +6472,7 @@ dependencies = [ [[package]] name = "zebra-state" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" dependencies = [ "bincode", "chrono", @@ -6507,7 +6507,7 @@ dependencies = [ [[package]] name = "zebra-test" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" dependencies = [ "color-eyre", "futures", @@ -6532,7 +6532,7 @@ dependencies = [ [[package]] name = "zebra-utils" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" dependencies = [ "color-eyre", "hex", @@ -6547,7 +6547,7 @@ dependencies = [ [[package]] name = "zebrad" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" dependencies = [ "abscissa_core", "atty", diff --git a/README.md b/README.md index 2fb665d94a1..ed7106dca83 100644 --- a/README.md +++ b/README.md @@ -76,12 +76,9 @@ for your platform: 2. Install Zebra's build dependencies: - **libclang:** the `libclang`, `libclang-dev`, `llvm`, or `llvm-dev` packages, depending on your package manager - **clang** or another C++ compiler: `g++`, `Xcode`, or `MSVC` -3. Run `cargo install --locked --git https://github.com/ZcashFoundation/zebra --tag v1.0.0-beta.11 zebrad` +3. Run `cargo install --locked --git https://github.com/ZcashFoundation/zebra --tag v1.0.0-beta.12 zebrad` 4. Run `zebrad start` (see [Running Zebra](https://zebra.zfnd.org/user/run.html) for more information) -If you're interested in testing out `zebrad` please feel free, but keep in mind -that there is a lot of key functionality still missing. - For more detailed instructions, refer to the [documentation](https://zebra.zfnd.org/user/install.html). ### Optional Features diff --git a/book/src/user/install.md b/book/src/user/install.md index 4a48d9261ff..88fd68cdc94 100644 --- a/book/src/user/install.md +++ b/book/src/user/install.md @@ -9,7 +9,7 @@ for your platform: 2. Install Zebra's build dependencies: - **libclang:** the `libclang`, `libclang-dev`, `llvm`, or `llvm-dev` packages, depending on your package manager - **clang** or another C++ compiler: `g++`, `Xcode`, or `MSVC` -3. Run `cargo install --locked --git https://github.com/ZcashFoundation/zebra --tag v1.0.0-beta.11 zebrad` +3. Run `cargo install --locked --git https://github.com/ZcashFoundation/zebra --tag v1.0.0-beta.12 zebrad` 4. Run `zebrad start` (see [Running Zebra](run.md) for more information) If you're interested in testing out `zebrad` please feel free, but keep in mind diff --git a/book/src/user/lightwalletd.md b/book/src/user/lightwalletd.md index 8c0fdd9586f..797d06b3131 100644 --- a/book/src/user/lightwalletd.md +++ b/book/src/user/lightwalletd.md @@ -1,6 +1,6 @@ # Running lightwalletd with zebra -Starting on [v1.0.0-beta.11](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0-beta.11), the Zebra RPC methods are fully featured to run a lightwalletd service backed by zebrad. +Zebra's RPC methods can support a lightwalletd service backed by zebrad. Contents: diff --git a/tower-batch/Cargo.toml b/tower-batch/Cargo.toml index d8fbee8c1cc..504e8d87dcd 100644 --- a/tower-batch/Cargo.toml +++ b/tower-batch/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-batch" -version = "0.2.26" +version = "0.2.27" authors = ["Zcash Foundation "] license = "MIT" edition = "2021" diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index 85f45351459..bed6cfca5df 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-fallback" -version = "0.2.22" +version = "0.2.27" authors = ["Zcash Foundation "] license = "MIT" edition = "2021" diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 3c0aa427efe..55bbf814c09 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-chain" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 80c8c1a56d7..99ab5d3e467 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-consensus" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 8a5160b449d..e189c55b557 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-network" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index 3167dc5d3cb..18c4d331b43 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -242,7 +242,7 @@ pub const TIMESTAMP_TRUNCATION_SECONDS: u32 = 30 * 60; /// [BIP 14]: https://github.com/bitcoin/bips/blob/master/bip-0014.mediawiki // // TODO: generate this from crate metadata (#2375) -pub const USER_AGENT: &str = "/Zebra:1.0.0-beta.11/"; +pub const USER_AGENT: &str = "/Zebra:1.0.0-beta.12/"; /// The Zcash network protocol version implemented by this crate, and advertised /// during connection setup. diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index f6d25c06991..11574066f10 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -2,7 +2,7 @@ name = "zebra-node-services" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" edition = "2021" repository = "https://github.com/ZcashFoundation/zebra" diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 165e4a3a1b0..0a210b5b846 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-rpc" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index d9f2798fff3..74009de967b 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-script" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 56bde71341c..526c8dbd015 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-state" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 735b9f4305b..9e15ddb0c82 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-test" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index ff86ee896b0..aa924e215a7 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -2,7 +2,7 @@ name = "zebra-utils" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" edition = "2021" # Prevent accidental publication of this utility crate. publish = false diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 473c3454d03..62dcf6a59c9 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -2,7 +2,7 @@ name = "zebrad" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" -version = "1.0.0-beta.11" +version = "1.0.0-beta.12" edition = "2021" # Zebra is only supported on the latest stable Rust version. Some earlier versions might work. # Zebra uses features introduced in Rust 1.58. From 4543a256393bcf721e7b08c632eb46d5b13cb014 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Wed, 29 Jun 2022 18:58:00 -0300 Subject: [PATCH 44/91] fix(network messages): add limits to rejection message and reason (#4687) * add limits to rejection message and reason * truncate bytes instead of characters * change `MAX_REJECT_REASON_LENGTH` to match zcashd * clippy * avoid panix in the slice * Escape and truncate error messages Co-authored-by: teor --- .../src/protocol/external/message.rs | 36 +++++++++++++++---- 1 file changed, 29 insertions(+), 7 deletions(-) diff --git a/zebra-network/src/protocol/external/message.rs b/zebra-network/src/protocol/external/message.rs index 743212b4194..a4681cfddfa 100644 --- a/zebra-network/src/protocol/external/message.rs +++ b/zebra-network/src/protocol/external/message.rs @@ -340,25 +340,47 @@ pub enum Message { FilterClear, } +/// The maximum size of the rejection message. +/// +/// This is equivalent to `COMMAND_SIZE` in zcashd. +const MAX_REJECT_MESSAGE_LENGTH: usize = 12; + +/// The maximum size of the rejection reason. +/// +/// This is equivalent to `MAX_REJECT_MESSAGE_LENGTH` in zcashd. +const MAX_REJECT_REASON_LENGTH: usize = 111; + +// TODO: add tests for Error conversion and Reject message serialization (#4633) +// (Zebra does not currently send reject messages, and it ignores received reject messages.) impl From for Message where E: Error, { fn from(e: E) -> Self { + let message = e + .to_string() + .escape_default() + .take(MAX_REJECT_MESSAGE_LENGTH) + .collect(); + let reason = e + .source() + .map(ToString::to_string) + .unwrap_or_default() + .escape_default() + .take(MAX_REJECT_REASON_LENGTH) + .collect(); + Message::Reject { - message: e.to_string(), + message, // The generic case, impls for specific error types should // use specific varieties of `RejectReason`. ccode: RejectReason::Other, - reason: if let Some(reason) = e.source() { - reason.to_string() - } else { - String::from("") - }, + reason, - // Allow this to be overridden but not populated by default, methinks. + // The hash of the rejected block or transaction. + // We don't have that data here, so the caller needs to fill it in later. data: None, } } From b29b4dbf6c441abae69d55fcd8b02e4d02538d9b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Jun 2022 23:56:01 +0000 Subject: [PATCH 45/91] build(deps): bump reviewdog/action-actionlint from 1.26.0 to 1.27.0 (#4722) Bumps [reviewdog/action-actionlint](https://github.com/reviewdog/action-actionlint) from 1.26.0 to 1.27.0. - [Release notes](https://github.com/reviewdog/action-actionlint/releases) - [Commits](https://github.com/reviewdog/action-actionlint/compare/v1.26.0...v1.27.0) --- updated-dependencies: - dependency-name: reviewdog/action-actionlint dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 42e51298993..8fdb055366f 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -138,7 +138,7 @@ jobs: if: ${{ needs.changed-files.outputs.workflows == 'true' }} steps: - uses: actions/checkout@v3.0.2 - - uses: reviewdog/action-actionlint@v1.26.0 + - uses: reviewdog/action-actionlint@v1.27.0 with: level: warning fail_on_error: false From c8cdf0617c2343e38362bf2a2f3cb3e9be22c2cb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Jun 2022 23:56:19 +0000 Subject: [PATCH 46/91] build(deps): bump semver from 1.0.10 to 1.0.11 (#4723) Bumps [semver](https://github.com/dtolnay/semver) from 1.0.10 to 1.0.11. - [Release notes](https://github.com/dtolnay/semver/releases) - [Commits](https://github.com/dtolnay/semver/compare/1.0.10...1.0.11) --- updated-dependencies: - dependency-name: semver dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- zebrad/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a645677a406..6bcd865cb5d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4079,7 +4079,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.10", + "semver 1.0.11", ] [[package]] @@ -4236,9 +4236,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a41d061efea015927ac527063765e73601444cdc344ba855bc7bd44578b25e1c" +checksum = "3d92beeab217753479be2f74e54187a6aed4c125ff0703a866c3147a02f0c6dd" [[package]] name = "semver-parser" @@ -6576,7 +6576,7 @@ dependencies = [ "rand 0.8.5", "regex", "reqwest", - "semver 1.0.10", + "semver 1.0.11", "sentry", "sentry-tracing", "serde", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 62dcf6a59c9..cb1741433c5 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -147,7 +147,7 @@ abscissa_core = { version = "0.5", features = ["testing"] } hex = "0.4.3" once_cell = "1.12.0" regex = "1.5.6" -semver = "1.0.10" +semver = "1.0.11" # zebra-rpc needs the preserve_order feature, it also makes test results more stable serde_json = { version = "1.0.81", features = ["preserve_order"] } From 67dc26fbb51837703af727e8593a2980795cb261 Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 30 Jun 2022 20:33:01 +1000 Subject: [PATCH 47/91] fix(ci): Split Docker logs into sprout, other checkpoints, and full validation (#4704) * Checkout zebra in each job to avoid warnings But put TODOs where we might be able to skip checkouts * Split log following into sprout checkpoints, sapling/orchard checkpoints, and full validation * Make job IDs shorter * Use /dev/stderr because docker doesn't have a tty * remove pipefail * Revert "remove pipefail" This reverts commit a7ee37bebdc107a4215e7dd307b189d925969234. * Make tee ignore errors writing to a grep pipe * Avoid launching multiple docker instances for duplicate jobs * Ignore broken pipe error messages and statuses * fix(ci): docker wait not finding container We had this issue before, I can't recall if this was a parsing error between GitHub Actions and gcloud `--command` parsing, but we had to change this into two pieces. This implementation keeps it how we did it before https://github.com/ZcashFoundation/zebra/blob/9b9578c99975952a291006dde8d2828fd3e97799/.github/workflows/test.yml#L235-L243 * docs: remove pending TODO We can't remove `actions/checkout` nor set `create_credentials_file` to `false` as next steps won't be able to authenticate to GCP. We can surely remove `actions/checkout` and leave `create_credentials_file` as `true`, but this will raise a warning on each step, and there's no benefit of doing so. * Show `docker wait` and `gcloud ssh` output * If `docker wait` fails, get the exit code using `docker inspect` Co-authored-by: Conrado Gouvea Co-authored-by: Gustavo Valverde Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .github/workflows/deploy-gcp-tests.yml | 190 ++++++++++++++++++++++--- 1 file changed, 168 insertions(+), 22 deletions(-) diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index e6ae8ed59c7..fedba1361af 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -75,10 +75,19 @@ on: description: 'Application name for Google Cloud instance metadata' env: + # where we get the Docker image from IMAGE_NAME: zebrad-test GAR_BASE: us-docker.pkg.dev/zealous-zebra/zebra + # what kind of Google Cloud instance we want to launch ZONE: us-central1-a MACHINE_TYPE: c2d-standard-16 + # How many previous log lines we show at the start of each new log job. + # Increase this number if some log lines are skipped between jobs + # + # We want to show all the logs since the last job finished, + # but we don't know how long it will be between jobs. + # 200 lines is about 6-15 minutes of sync logs, or one panic log. + EXTRA_LOG_LINES: 200 jobs: # set up the test, if it doesn't use any cached state @@ -94,6 +103,7 @@ jobs: - uses: actions/checkout@v3.0.2 with: persist-credentials: false + fetch-depth: '2' - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -150,9 +160,8 @@ jobs: launch-without-cached-state: name: Launch ${{ inputs.test_id }} test needs: [ setup-without-cached-state ] - # If the previous job fails, we also want to run and fail this job, - # so that the branch protection rule fails in Mergify and GitHub. - if: ${{ !cancelled() && !inputs.needs_zebra_state }} + # If creating the Google Cloud instance fails, we don't want to launch another docker instance. + if: ${{ !cancelled() && !failure() && !inputs.needs_zebra_state }} runs-on: ubuntu-latest permissions: contents: 'read' @@ -161,6 +170,7 @@ jobs: - uses: actions/checkout@v3.0.2 with: persist-credentials: false + fetch-depth: '2' - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -324,9 +334,8 @@ jobs: launch-with-cached-state: name: Launch ${{ inputs.test_id }} test needs: [ setup-with-cached-state ] - # If the previous job fails, we also want to run and fail this job, - # so that the branch protection rule fails in Mergify and GitHub. - if: ${{ !cancelled() && inputs.needs_zebra_state }} + # If creating the Google Cloud instance fails, we don't want to launch another docker instance. + if: ${{ !cancelled() && !failure() && inputs.needs_zebra_state }} runs-on: ubuntu-latest permissions: contents: 'read' @@ -445,13 +454,12 @@ jobs: " - # follow the logs of the test we just launched - follow-logs: - name: Show logs for ${{ inputs.test_id }} test - needs: [ launch-with-cached-state, launch-without-cached-state ] + # follow the logs of the test we just launched, up to Sapling activation (or the test finishing) + logs-sprout: + name: Log ${{ inputs.test_id }} test (sprout) # We run exactly one of without-cached-state or with-cached-state, and we always skip the other one. - # If the previous job fails, we also want to run and fail this job, - # so that the branch protection rule fails in Mergify and GitHub. + needs: [ launch-with-cached-state, launch-without-cached-state ] + # If the previous job fails, we still want to show the logs. if: ${{ !cancelled() }} runs-on: ubuntu-latest permissions: @@ -461,6 +469,7 @@ jobs: - uses: actions/checkout@v3.0.2 with: persist-credentials: false + fetch-depth: '2' - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -481,8 +490,12 @@ jobs: service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' - # Show all the logs since the container launched - - name: Show logs for ${{ inputs.test_id }} test + # Show all the logs since the container launched, + # following until Sapling activation (or the test finishes). + # + # The log pipeline ignores the exit status of `docker logs`. + # Errors in the tests are caught by the final test status job. + - name: Show logs for ${{ inputs.test_id }} test (sprout) run: | gcloud compute ssh \ ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ @@ -494,14 +507,127 @@ jobs: docker logs \ --tail all \ --follow \ - ${{ inputs.test_id }} \ + ${{ inputs.test_id }} | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + '(estimated progress.*network_upgrade.*=.*Sapling)|(test result:.*finished in)' \ + " + + # follow the logs of the test we just launched, up to the last checkpoint (or the test finishing) + # TODO: split out sapling logs when the mandatory checkpoint is above NU5 activation + logs-checkpoint: + name: Log ${{ inputs.test_id }} test (checkpoint) + needs: [ logs-sprout ] + # If the previous job fails, we still want to show the logs. + if: ${{ !cancelled() }} + runs-on: ubuntu-latest + permissions: + contents: 'read' + id-token: 'write' + steps: + - uses: actions/checkout@v3.0.2 + with: + persist-credentials: false + fetch-depth: '2' + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Downcase network name for disks + run: | + NETWORK_CAPS=${{ inputs.network }} + echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v0.8.0 + with: + workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' + service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' + token_format: 'access_token' + + # Show recent logs, following until the last checkpoint (or the test finishes) + - name: Show logs for ${{ inputs.test_id }} test (checkpoint) + run: | + gcloud compute ssh \ + ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ env.ZONE }} \ + --quiet \ + --ssh-flag="-o ServerAliveInterval=5" \ + --command \ + "\ + docker logs \ + --tail ${{ env.EXTRA_LOG_LINES }} \ + --follow \ + ${{ inputs.test_id }} | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + '(verified final checkpoint)|(test result:.*finished in)' \ + " + + # follow the logs of the test we just launched, up to the last checkpoint (or the test finishing) + logs-end: + name: Log ${{ inputs.test_id }} test (end) + needs: [ logs-checkpoint ] + # If the previous job fails, we still want to show the logs. + if: ${{ !cancelled() }} + runs-on: ubuntu-latest + permissions: + contents: 'read' + id-token: 'write' + steps: + - uses: actions/checkout@v3.0.2 + with: + persist-credentials: false + fetch-depth: '2' + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Downcase network name for disks + run: | + NETWORK_CAPS=${{ inputs.network }} + echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v0.8.0 + with: + workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' + service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' + token_format: 'access_token' + + # Show recent logs, following until the test finishes + - name: Show logs for ${{ inputs.test_id }} test (end) + run: | + gcloud compute ssh \ + ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ env.ZONE }} \ + --quiet \ + --ssh-flag="-o ServerAliveInterval=5" \ + --command \ + "\ + docker logs \ + --tail ${{ env.EXTRA_LOG_LINES }} \ + --follow \ + ${{ inputs.test_id }} | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + 'test result:.*finished in' \ " + # wait for the result of the test test-result: # TODO: update the job name here, and in the branch protection rules name: Run ${{ inputs.test_id }} test - needs: [ follow-logs ] + needs: [ logs-end ] # If the previous job fails, we also want to run and fail this job, # so that the branch protection rule fails in Mergify and GitHub. if: ${{ !cancelled() }} @@ -513,6 +639,7 @@ jobs: - uses: actions/checkout@v3.0.2 with: persist-credentials: false + fetch-depth: '2' - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -535,8 +662,12 @@ jobs: # Wait for the container to finish, then exit with the test's exit status. # - # `docker wait` prints the container exit status as a string, but we need to exit `ssh` with that status. - # `docker wait` can also wait for multiple containers, but we only ever wait for a single container. + # If the container has already finished, `docker wait` should return its status. + # But sometimes this doesn't work, so we use `docker inspect` as a fallback. + # + # `docker wait` prints the container exit status as a string, but we need to exit the `ssh` command + # with that status. + # (`docker wait` can also wait for multiple containers, but we only ever wait for a single container.) - name: Result of ${{ inputs.test_id }} test run: | gcloud compute ssh \ @@ -544,10 +675,15 @@ jobs: --zone ${{ env.ZONE }} \ --quiet \ --ssh-flag="-o ServerAliveInterval=5" \ - --command \ - "\ - exit $(docker wait ${{ inputs.test_id }}) \ - " + --command=' \ + EXIT_STATUS=$( \ + docker wait ${{ inputs.test_id }} || \ + docker inspect --format "{{.State.ExitCode}}" ${{ inputs.test_id }} || \ + echo "missing container, or missing exit status for container" \ + ); \ + echo "docker exit status: $EXIT_STATUS"; \ + exit "$EXIT_STATUS" \ + ' # create a state image from the instance's state disk, if requested by the caller @@ -563,6 +699,11 @@ jobs: contents: 'read' id-token: 'write' steps: + - uses: actions/checkout@v3.0.2 + with: + persist-credentials: false + fetch-depth: '2' + - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 with: @@ -650,6 +791,11 @@ jobs: contents: 'read' id-token: 'write' steps: + - uses: actions/checkout@v3.0.2 + with: + persist-credentials: false + fetch-depth: '2' + - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 with: From 11dcc13b84e4965b3ff3b377fddaa4296050d7e5 Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 1 Jul 2022 01:14:30 +1000 Subject: [PATCH 48/91] fix(ci): make full sync go all the way to the tip (#4709) * Checkout zebra in each job to avoid warnings But put TODOs where we might be able to skip checkouts * Split log following into sprout checkpoints, sapling/orchard checkpoints, and full validation * Make job IDs shorter * Use /dev/stderr because docker doesn't have a tty * remove pipefail * Revert "remove pipefail" This reverts commit a7ee37bebdc107a4215e7dd307b189d925969234. * Make tee ignore errors writing to a grep pipe * Avoid launching multiple docker instances for duplicate jobs * Ignore broken pipe error messages and statuses * fix(ci): docker wait not finding container We had this issue before, I can't recall if this was a parsing error between GitHub Actions and gcloud `--command` parsing, but we had to change this into two pieces. This implementation keeps it how we did it before https://github.com/ZcashFoundation/zebra/blob/9b9578c99975952a291006dde8d2828fd3e97799/.github/workflows/test.yml#L235-L243 * docs: remove pending TODO We can't remove `actions/checkout` nor set `create_credentials_file` to `false` as next steps won't be able to authenticate to GCP. We can surely remove `actions/checkout` and leave `create_credentials_file` as `true`, but this will raise a warning on each step, and there's no benefit of doing so. * Show `docker wait` and `gcloud ssh` output * If `docker wait` fails, get the exit code using `docker inspect` * Make full sync tests go all the way to the tip Co-authored-by: Conrado Gouvea Co-authored-by: Gustavo Valverde --- zebrad/tests/acceptance.rs | 6 +++--- zebrad/tests/common/sync.rs | 6 ------ 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index d8a4509c92c..727b5e7d855 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -135,8 +135,8 @@ use common::{ sync::{ create_cached_database_height, sync_until, MempoolBehavior, LARGE_CHECKPOINT_TEST_HEIGHT, LARGE_CHECKPOINT_TIMEOUT, MEDIUM_CHECKPOINT_TEST_HEIGHT, STOP_AT_HEIGHT_REGEX, - STOP_ON_LOAD_TIMEOUT, SYNC_FINISHED_REGEX, SYNC_FINISHED_REGEX_TMP_STOP_EARLY, - TINY_CHECKPOINT_TEST_HEIGHT, TINY_CHECKPOINT_TIMEOUT, + STOP_ON_LOAD_TIMEOUT, SYNC_FINISHED_REGEX, TINY_CHECKPOINT_TEST_HEIGHT, + TINY_CHECKPOINT_TIMEOUT, }, }; @@ -882,7 +882,7 @@ fn full_sync_test(network: Network, timeout_argument_name: &str) -> Result<()> { // Use the checkpoints to sync quickly, then do full validation until the chain tip true, // Finish when we reach the chain tip - SYNC_FINISHED_REGEX_TMP_STOP_EARLY, + SYNC_FINISHED_REGEX, ) } else { eprintln!( diff --git a/zebrad/tests/common/sync.rs b/zebrad/tests/common/sync.rs index 6dda7d8404c..f0e6fca1633 100644 --- a/zebrad/tests/common/sync.rs +++ b/zebrad/tests/common/sync.rs @@ -45,12 +45,6 @@ pub const STOP_AT_HEIGHT_REGEX: &str = "stopping at configured height"; pub const SYNC_FINISHED_REGEX: &str = r"finished initial sync to chain tip, using gossiped blocks .*sync_percent.*=.*100\."; -/// Temporary workaround for slow syncs - stop at 97%. -/// -/// TODO: revert this change (#4456) -pub const SYNC_FINISHED_REGEX_TMP_STOP_EARLY: &str = - r"estimated progress to chain tip .*sync_percent.*=.*97\."; - /// The maximum amount of time Zebra should take to reload after shutting down. /// /// This should only take a second, but sometimes CI VMs or RocksDB can be slow. From 32faa94fb4151c9dfb7d5efcbee3fd2c1afd4c96 Mon Sep 17 00:00:00 2001 From: Marek Date: Fri, 1 Jul 2022 01:00:23 +0200 Subject: [PATCH 49/91] fix(state): Update column family names to match Zebra's database design (#4639) * Rename `block_by_height` to `block_header_by_height` in fin state * Rename `tx_by_hash` to `tx_loc_by_hash` in both (non & fin) states * Rename `utxo_by_outpoint` to `utxo_by_out_loc` in finalized state * Reorder the column families so that they match the docs * Update `struct Chain` in the RFCs * Increment `DATABASE_FORMAT_VERSION` to 25 * Remove obsolete docs from `0004-asynchronous-script-verification.md` * Remove an obsolete `TODO` from `disk_db.rs` * Delete unused snapshots Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .../0004-asynchronous-script-verification.md | 13 -- book/src/dev/rfcs/0005-state-updates.md | 120 ++++++++++++++---- zebra-state/src/constants.rs | 2 +- .../src/service/finalized_state/disk_db.rs | 15 +-- ..._header_by_height_raw_data@mainnet_0.snap} | 1 + ..._header_by_height_raw_data@mainnet_1.snap} | 1 + ..._header_by_height_raw_data@mainnet_2.snap} | 1 + ..._header_by_height_raw_data@testnet_0.snap} | 1 + ..._header_by_height_raw_data@testnet_1.snap} | 1 + ..._header_by_height_raw_data@testnet_2.snap} | 1 + .../tests/snapshots/column_family_names.snap | 7 +- .../empty_column_families@mainnet_0.snap | 3 +- .../empty_column_families@no_blocks.snap | 7 +- .../empty_column_families@testnet_0.snap | 3 +- ...=> tx_loc_by_hash_raw_data@mainnet_0.snap} | 1 + ...=> tx_loc_by_hash_raw_data@mainnet_1.snap} | 1 + ...=> tx_loc_by_hash_raw_data@mainnet_2.snap} | 1 + ...=> tx_loc_by_hash_raw_data@testnet_0.snap} | 1 + ...=> tx_loc_by_hash_raw_data@testnet_1.snap} | 1 + ...=> tx_loc_by_hash_raw_data@testnet_2.snap} | 1 + ...> utxo_by_out_loc_raw_data@mainnet_1.snap} | 1 + ...> utxo_by_out_loc_raw_data@mainnet_2.snap} | 1 + ...> utxo_by_out_loc_raw_data@testnet_1.snap} | 1 + ...> utxo_by_out_loc_raw_data@testnet_2.snap} | 1 + .../service/finalized_state/zebra_db/block.rs | 8 +- .../finalized_state/zebra_db/transparent.rs | 6 +- .../src/service/non_finalized_state/chain.rs | 22 ++-- .../non_finalized_state/chain/index.rs | 8 +- .../service/non_finalized_state/tests/prop.rs | 2 +- 29 files changed, 154 insertions(+), 78 deletions(-) rename zebra-state/src/service/finalized_state/disk_format/tests/snapshots/{block_by_height_raw_data@mainnet_0.snap => block_header_by_height_raw_data@mainnet_0.snap} (99%) rename zebra-state/src/service/finalized_state/disk_format/tests/snapshots/{block_by_height_raw_data@mainnet_1.snap => block_header_by_height_raw_data@mainnet_1.snap} (99%) rename zebra-state/src/service/finalized_state/disk_format/tests/snapshots/{block_by_height_raw_data@mainnet_2.snap => block_header_by_height_raw_data@mainnet_2.snap} (99%) rename zebra-state/src/service/finalized_state/disk_format/tests/snapshots/{block_by_height_raw_data@testnet_0.snap => block_header_by_height_raw_data@testnet_0.snap} (99%) rename zebra-state/src/service/finalized_state/disk_format/tests/snapshots/{block_by_height_raw_data@testnet_1.snap => block_header_by_height_raw_data@testnet_1.snap} (99%) rename zebra-state/src/service/finalized_state/disk_format/tests/snapshots/{block_by_height_raw_data@testnet_2.snap => block_header_by_height_raw_data@testnet_2.snap} (99%) rename zebra-state/src/service/finalized_state/disk_format/tests/snapshots/{tx_by_hash_raw_data@mainnet_0.snap => tx_loc_by_hash_raw_data@mainnet_0.snap} (91%) rename zebra-state/src/service/finalized_state/disk_format/tests/snapshots/{tx_by_hash_raw_data@mainnet_1.snap => tx_loc_by_hash_raw_data@mainnet_1.snap} (94%) rename zebra-state/src/service/finalized_state/disk_format/tests/snapshots/{tx_by_hash_raw_data@mainnet_2.snap => tx_loc_by_hash_raw_data@mainnet_2.snap} (95%) rename zebra-state/src/service/finalized_state/disk_format/tests/snapshots/{tx_by_hash_raw_data@testnet_0.snap => tx_loc_by_hash_raw_data@testnet_0.snap} (91%) rename zebra-state/src/service/finalized_state/disk_format/tests/snapshots/{tx_by_hash_raw_data@testnet_1.snap => tx_loc_by_hash_raw_data@testnet_1.snap} (94%) rename zebra-state/src/service/finalized_state/disk_format/tests/snapshots/{tx_by_hash_raw_data@testnet_2.snap => tx_loc_by_hash_raw_data@testnet_2.snap} (95%) rename zebra-state/src/service/finalized_state/disk_format/tests/snapshots/{utxo_by_outpoint_raw_data@mainnet_1.snap => utxo_by_out_loc_raw_data@mainnet_1.snap} (94%) rename zebra-state/src/service/finalized_state/disk_format/tests/snapshots/{utxo_by_outpoint_raw_data@mainnet_2.snap => utxo_by_out_loc_raw_data@mainnet_2.snap} (96%) rename zebra-state/src/service/finalized_state/disk_format/tests/snapshots/{utxo_by_outpoint_raw_data@testnet_1.snap => utxo_by_out_loc_raw_data@testnet_1.snap} (94%) rename zebra-state/src/service/finalized_state/disk_format/tests/snapshots/{utxo_by_outpoint_raw_data@testnet_2.snap => utxo_by_out_loc_raw_data@testnet_2.snap} (96%) diff --git a/book/src/dev/rfcs/0004-asynchronous-script-verification.md b/book/src/dev/rfcs/0004-asynchronous-script-verification.md index c442de9d936..23800002119 100644 --- a/book/src/dev/rfcs/0004-asynchronous-script-verification.md +++ b/book/src/dev/rfcs/0004-asynchronous-script-verification.md @@ -422,16 +422,3 @@ cleaner and the cost is probably not too large. - We need to pick a timeout for UTXO lookup. This should be long enough to account for the fact that we may start verifying blocks before all of their ancestors are downloaded. - -These optimisations can be delayed until after the initial implementation is -complete, and covered by tests: - -- Should we stop storing heights for non-coinbase UTXOs? (#2455) - -- Should we avoid storing any extra data for UTXOs, and just lookup the coinbase - flag and height using `outpoint.hash` and `tx_by_hash`? (#2455) - -- The maturity check can be skipped for UTXOs from the finalized state, -because Zebra only finalizes mature UTXOs. We could implement this -optimisation by adding a `Utxo::MatureCoinbase { output: transparent::Output }` -variant, which only performs the spend checks. (#2455) diff --git a/book/src/dev/rfcs/0005-state-updates.md b/book/src/dev/rfcs/0005-state-updates.md index 4f4e065d354..e47245ad175 100644 --- a/book/src/dev/rfcs/0005-state-updates.md +++ b/book/src/dev/rfcs/0005-state-updates.md @@ -268,20 +268,90 @@ is completely empty. The `Chain` type is defined by the following struct and API: ```rust -#[derive(Debug, Default, Clone)] -struct Chain { - blocks: BTreeMap>, - height_by_hash: HashMap, - tx_by_hash: HashMap, - - created_utxos: HashSet, - spent_utxos: HashSet, - sprout_anchors: HashSet, - sapling_anchors: HashSet, - sprout_nullifiers: HashSet, - sapling_nullifiers: HashSet, - orchard_nullifiers: HashSet, - partial_cumulative_work: PartialCumulativeWork, +#[derive(Debug, Clone)] +pub struct Chain { + // The function `eq_internal_state` must be updated every time a field is added to [`Chain`]. + /// The configured network for this chain. + network: Network, + + /// The contextually valid blocks which form this non-finalized partial chain, in height order. + pub(crate) blocks: BTreeMap, + + /// An index of block heights for each block hash in `blocks`. + pub height_by_hash: HashMap, + + /// An index of [`TransactionLocation`]s for each transaction hash in `blocks`. + pub tx_loc_by_hash: HashMap, + + /// The [`transparent::Utxo`]s created by `blocks`. + /// + /// Note that these UTXOs may not be unspent. + /// Outputs can be spent by later transactions or blocks in the chain. + // + // TODO: replace OutPoint with OutputLocation? + pub(crate) created_utxos: HashMap, + /// The [`transparent::OutPoint`]s spent by `blocks`, + /// including those created by earlier transactions or blocks in the chain. + pub(crate) spent_utxos: HashSet, + + /// The Sprout note commitment tree of the tip of this [`Chain`], + /// including all finalized notes, and the non-finalized notes in this chain. + pub(super) sprout_note_commitment_tree: sprout::tree::NoteCommitmentTree, + /// The Sprout note commitment tree for each anchor. + /// This is required for interstitial states. + pub(crate) sprout_trees_by_anchor: + HashMap, + /// The Sapling note commitment tree of the tip of this [`Chain`], + /// including all finalized notes, and the non-finalized notes in this chain. + pub(super) sapling_note_commitment_tree: sapling::tree::NoteCommitmentTree, + /// The Sapling note commitment tree for each height. + pub(crate) sapling_trees_by_height: BTreeMap, + /// The Orchard note commitment tree of the tip of this [`Chain`], + /// including all finalized notes, and the non-finalized notes in this chain. + pub(super) orchard_note_commitment_tree: orchard::tree::NoteCommitmentTree, + /// The Orchard note commitment tree for each height. + pub(crate) orchard_trees_by_height: BTreeMap, + /// The ZIP-221 history tree of the tip of this [`Chain`], + /// including all finalized blocks, and the non-finalized `blocks` in this chain. + pub(crate) history_tree: HistoryTree, + + /// The Sprout anchors created by `blocks`. + pub(crate) sprout_anchors: MultiSet, + /// The Sprout anchors created by each block in `blocks`. + pub(crate) sprout_anchors_by_height: BTreeMap, + /// The Sapling anchors created by `blocks`. + pub(crate) sapling_anchors: MultiSet, + /// The Sapling anchors created by each block in `blocks`. + pub(crate) sapling_anchors_by_height: BTreeMap, + /// The Orchard anchors created by `blocks`. + pub(crate) orchard_anchors: MultiSet, + /// The Orchard anchors created by each block in `blocks`. + pub(crate) orchard_anchors_by_height: BTreeMap, + + /// The Sprout nullifiers revealed by `blocks`. + pub(super) sprout_nullifiers: HashSet, + /// The Sapling nullifiers revealed by `blocks`. + pub(super) sapling_nullifiers: HashSet, + /// The Orchard nullifiers revealed by `blocks`. + pub(super) orchard_nullifiers: HashSet, + + /// Partial transparent address index data from `blocks`. + pub(super) partial_transparent_transfers: HashMap, + + /// The cumulative work represented by `blocks`. + /// + /// Since the best chain is determined by the largest cumulative work, + /// the work represented by finalized blocks can be ignored, + /// because they are common to all non-finalized chains. + pub(super) partial_cumulative_work: PartialCumulativeWork, + + /// The chain value pool balances of the tip of this [`Chain`], + /// including the block value pool changes from all finalized blocks, + /// and the non-finalized blocks in this chain. + /// + /// When a new chain is created from the finalized tip, + /// it is initialized with the finalized tip chain value pool balances. + pub(crate) chain_value_pools: ValueBalance, } ``` @@ -293,7 +363,7 @@ Push a block into a chain as the new tip - Add the block's hash to `height_by_hash` - Add work to `self.partial_cumulative_work` - For each `transaction` in `block` - - Add key: `transaction.hash` and value: `(height, tx_index)` to `tx_by_hash` + - Add key: `transaction.hash` and value: `(height, tx_index)` to `tx_loc_by_hash` - Add created utxos to `self.created_utxos` - Add spent utxos to `self.spent_utxos` - Add nullifiers to the appropriate `self._nullifiers` @@ -310,7 +380,7 @@ Remove the lowest height block of the non-finalized portion of a chain. - Remove the block's hash from `self.height_by_hash` - Subtract work from `self.partial_cumulative_work` - For each `transaction` in `block` - - Remove `transaction.hash` from `tx_by_hash` + - Remove `transaction.hash` from `tx_loc_by_hash` - Remove created utxos from `self.created_utxos` - Remove spent utxos from `self.spent_utxos` - Remove the nullifiers from the appropriate `self._nullifiers` @@ -340,7 +410,7 @@ Remove the highest height block of the non-finalized portion of a chain. - Remove the corresponding hash from `self.height_by_hash` - Subtract work from `self.partial_cumulative_work` - for each `transaction` in `block` - - remove `transaction.hash` from `tx_by_hash` + - remove `transaction.hash` from `tx_loc_by_hash` - Remove created utxos from `self.created_utxos` - Remove spent utxos from `self.spent_utxos` - Remove the nullifiers from the appropriate `self._nullifiers` @@ -365,7 +435,7 @@ parent block is the tip of the finalized state. This implementation should be handled by `#[derive(Default)]`. 1. initialise cumulative data members - - Construct an empty `self.blocks`, `height_by_hash`, `tx_by_hash`, + - Construct an empty `self.blocks`, `height_by_hash`, `tx_loc_by_hash`, `self.created_utxos`, `self.spent_utxos`, `self._anchors`, `self._nullifiers` - Zero `self.partial_cumulative_work` @@ -1102,13 +1172,14 @@ Returns Implemented by querying: -- (non-finalized) the `tx_by_hash` map (to get the block that contains the +- (non-finalized) the `tx_loc_by_hash` map (to get the block that contains the transaction) of each chain starting with the best chain, and then find block that chain's `blocks` (to get the block containing the transaction data) -- (finalized) the `tx_by_hash` tree (to get the block that contains the - transaction) and then `block_by_height` tree (to get the block containing - the transaction data), if the transaction is not in any non-finalized chain +- (finalized) the `tx_loc_by_hash` tree (to get the block that contains the + transaction) and then `block_header_by_height` tree (to get the block + containing the transaction data), if the transaction is not in any + non-finalized chain ### `Request::Block(block::Hash)` [request-block]: #request-block @@ -1125,8 +1196,9 @@ Implemented by querying: - (non-finalized) the `height_by_hash` of each chain starting with the best chain, then find block that chain's `blocks` (to get the block data) -- (finalized) the `height_by_hash` tree (to get the block height) and then - the `block_by_height` tree (to get the block data), if the block is not in any non-finalized chain +- (finalized) the `height_by_hash` tree (to get the block height) and then the + `block_header_by_height` tree (to get the block data), if the block is not in + any non-finalized chain ### `Request::AwaitSpendableUtxo { outpoint: OutPoint, spend_height: Height, spend_restriction: SpendRestriction }` diff --git a/zebra-state/src/constants.rs b/zebra-state/src/constants.rs index 55a92ded64b..956306201d5 100644 --- a/zebra-state/src/constants.rs +++ b/zebra-state/src/constants.rs @@ -18,7 +18,7 @@ pub use zebra_chain::transparent::MIN_TRANSPARENT_COINBASE_MATURITY; pub const MAX_BLOCK_REORG_HEIGHT: u32 = MIN_TRANSPARENT_COINBASE_MATURITY - 1; /// The database format version, incremented each time the database format changes. -pub const DATABASE_FORMAT_VERSION: u32 = 24; +pub const DATABASE_FORMAT_VERSION: u32 = 25; /// The maximum number of blocks to check for NU5 transactions, /// before we assume we are on a pre-NU5 legacy chain. diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 8361c20a54d..8f998797259 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -369,27 +369,22 @@ impl DiskDb { let column_families = vec![ // Blocks - // TODO: rename to block_header_by_height (#3151) - rocksdb::ColumnFamilyDescriptor::new("block_by_height", db_options.clone()), rocksdb::ColumnFamilyDescriptor::new("hash_by_height", db_options.clone()), rocksdb::ColumnFamilyDescriptor::new("height_by_hash", db_options.clone()), + rocksdb::ColumnFamilyDescriptor::new("block_header_by_height", db_options.clone()), // Transactions rocksdb::ColumnFamilyDescriptor::new("tx_by_loc", db_options.clone()), rocksdb::ColumnFamilyDescriptor::new("hash_by_tx_loc", db_options.clone()), - // TODO: rename to tx_loc_by_hash (#3950) - rocksdb::ColumnFamilyDescriptor::new("tx_by_hash", db_options.clone()), + rocksdb::ColumnFamilyDescriptor::new("tx_loc_by_hash", db_options.clone()), // Transparent rocksdb::ColumnFamilyDescriptor::new("balance_by_transparent_addr", db_options.clone()), - // TODO: #3951 - //rocksdb::ColumnFamilyDescriptor::new("tx_by_transparent_addr_loc", db_options.clone()), - // TODO: rename to utxo_by_out_loc (#3952) - rocksdb::ColumnFamilyDescriptor::new("utxo_by_outpoint", db_options.clone()), rocksdb::ColumnFamilyDescriptor::new( - "utxo_loc_by_transparent_addr_loc", + "tx_loc_by_transparent_addr_loc", db_options.clone(), ), + rocksdb::ColumnFamilyDescriptor::new("utxo_by_out_loc", db_options.clone()), rocksdb::ColumnFamilyDescriptor::new( - "tx_loc_by_transparent_addr_loc", + "utxo_loc_by_transparent_addr_loc", db_options.clone(), ), // Sprout diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_by_height_raw_data@mainnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_header_by_height_raw_data@mainnet_0.snap similarity index 99% rename from zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_by_height_raw_data@mainnet_0.snap rename to zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_header_by_height_raw_data@mainnet_0.snap index 7fe345a56f8..c1c123e5b82 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_by_height_raw_data@mainnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_header_by_height_raw_data@mainnet_0.snap @@ -1,5 +1,6 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 144 expression: cf_data --- [ diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_by_height_raw_data@mainnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_header_by_height_raw_data@mainnet_1.snap similarity index 99% rename from zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_by_height_raw_data@mainnet_1.snap rename to zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_header_by_height_raw_data@mainnet_1.snap index 5ddd398fcdc..9535970d135 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_by_height_raw_data@mainnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_header_by_height_raw_data@mainnet_1.snap @@ -1,5 +1,6 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 144 expression: cf_data --- [ diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_by_height_raw_data@mainnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_header_by_height_raw_data@mainnet_2.snap similarity index 99% rename from zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_by_height_raw_data@mainnet_2.snap rename to zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_header_by_height_raw_data@mainnet_2.snap index d8414b39478..fdf84d8bfa1 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_by_height_raw_data@mainnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_header_by_height_raw_data@mainnet_2.snap @@ -1,5 +1,6 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 144 expression: cf_data --- [ diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_by_height_raw_data@testnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_header_by_height_raw_data@testnet_0.snap similarity index 99% rename from zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_by_height_raw_data@testnet_0.snap rename to zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_header_by_height_raw_data@testnet_0.snap index 6be07b222ef..6d6c5c44e4f 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_by_height_raw_data@testnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_header_by_height_raw_data@testnet_0.snap @@ -1,5 +1,6 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 144 expression: cf_data --- [ diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_by_height_raw_data@testnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_header_by_height_raw_data@testnet_1.snap similarity index 99% rename from zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_by_height_raw_data@testnet_1.snap rename to zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_header_by_height_raw_data@testnet_1.snap index 406ec0865f7..1c427513a60 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_by_height_raw_data@testnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_header_by_height_raw_data@testnet_1.snap @@ -1,5 +1,6 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 144 expression: cf_data --- [ diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_by_height_raw_data@testnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_header_by_height_raw_data@testnet_2.snap similarity index 99% rename from zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_by_height_raw_data@testnet_2.snap rename to zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_header_by_height_raw_data@testnet_2.snap index bd58f0be7e7..208ac7a236f 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_by_height_raw_data@testnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_header_by_height_raw_data@testnet_2.snap @@ -1,5 +1,6 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 144 expression: cf_data --- [ diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/column_family_names.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/column_family_names.snap index c943a2f9c5e..3349d5f806e 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/column_family_names.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/column_family_names.snap @@ -1,10 +1,11 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 72 expression: cf_names --- [ "balance_by_transparent_addr", - "block_by_height", + "block_header_by_height", "default", "hash_by_height", "hash_by_tx_loc", @@ -20,9 +21,9 @@ expression: cf_names "sprout_note_commitment_tree", "sprout_nullifiers", "tip_chain_value_pool", - "tx_by_hash", "tx_by_loc", + "tx_loc_by_hash", "tx_loc_by_transparent_addr_loc", - "utxo_by_outpoint", + "utxo_by_out_loc", "utxo_loc_by_transparent_addr_loc", ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_0.snap index a44c8c51cfa..bd62ada2ef8 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_0.snap @@ -1,5 +1,6 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 154 expression: empty_column_families --- [ @@ -13,6 +14,6 @@ expression: empty_column_families "sprout_nullifiers: no entries", "tip_chain_value_pool: no entries", "tx_loc_by_transparent_addr_loc: no entries", - "utxo_by_outpoint: no entries", + "utxo_by_out_loc: no entries", "utxo_loc_by_transparent_addr_loc: no entries", ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@no_blocks.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@no_blocks.snap index b6fbe93632f..a304b2879d6 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@no_blocks.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@no_blocks.snap @@ -1,10 +1,11 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 154 expression: empty_column_families --- [ "balance_by_transparent_addr: no entries", - "block_by_height: no entries", + "block_header_by_height: no entries", "hash_by_height: no entries", "hash_by_tx_loc: no entries", "height_by_hash: no entries", @@ -19,9 +20,9 @@ expression: empty_column_families "sprout_note_commitment_tree: no entries", "sprout_nullifiers: no entries", "tip_chain_value_pool: no entries", - "tx_by_hash: no entries", "tx_by_loc: no entries", + "tx_loc_by_hash: no entries", "tx_loc_by_transparent_addr_loc: no entries", - "utxo_by_outpoint: no entries", + "utxo_by_out_loc: no entries", "utxo_loc_by_transparent_addr_loc: no entries", ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_0.snap index a44c8c51cfa..bd62ada2ef8 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_0.snap @@ -1,5 +1,6 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 154 expression: empty_column_families --- [ @@ -13,6 +14,6 @@ expression: empty_column_families "sprout_nullifiers: no entries", "tip_chain_value_pool: no entries", "tx_loc_by_transparent_addr_loc: no entries", - "utxo_by_outpoint: no entries", + "utxo_by_out_loc: no entries", "utxo_loc_by_transparent_addr_loc: no entries", ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_by_hash_raw_data@mainnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_loc_by_hash_raw_data@mainnet_0.snap similarity index 91% rename from zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_by_hash_raw_data@mainnet_0.snap rename to zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_loc_by_hash_raw_data@mainnet_0.snap index 4ea246afb4d..0eb160f547c 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_by_hash_raw_data@mainnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_loc_by_hash_raw_data@mainnet_0.snap @@ -1,5 +1,6 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 144 expression: cf_data --- [ diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_by_hash_raw_data@mainnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_loc_by_hash_raw_data@mainnet_1.snap similarity index 94% rename from zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_by_hash_raw_data@mainnet_1.snap rename to zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_loc_by_hash_raw_data@mainnet_1.snap index 5660a3684d2..08c5693b857 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_by_hash_raw_data@mainnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_loc_by_hash_raw_data@mainnet_1.snap @@ -1,5 +1,6 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 144 expression: cf_data --- [ diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_by_hash_raw_data@mainnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_loc_by_hash_raw_data@mainnet_2.snap similarity index 95% rename from zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_by_hash_raw_data@mainnet_2.snap rename to zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_loc_by_hash_raw_data@mainnet_2.snap index b5426feac3f..568a4a0babd 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_by_hash_raw_data@mainnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_loc_by_hash_raw_data@mainnet_2.snap @@ -1,5 +1,6 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 144 expression: cf_data --- [ diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_by_hash_raw_data@testnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_loc_by_hash_raw_data@testnet_0.snap similarity index 91% rename from zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_by_hash_raw_data@testnet_0.snap rename to zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_loc_by_hash_raw_data@testnet_0.snap index 4ea246afb4d..0eb160f547c 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_by_hash_raw_data@testnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_loc_by_hash_raw_data@testnet_0.snap @@ -1,5 +1,6 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 144 expression: cf_data --- [ diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_by_hash_raw_data@testnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_loc_by_hash_raw_data@testnet_1.snap similarity index 94% rename from zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_by_hash_raw_data@testnet_1.snap rename to zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_loc_by_hash_raw_data@testnet_1.snap index 66ff4d26dd3..71e76e5b8cc 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_by_hash_raw_data@testnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_loc_by_hash_raw_data@testnet_1.snap @@ -1,5 +1,6 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 144 expression: cf_data --- [ diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_by_hash_raw_data@testnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_loc_by_hash_raw_data@testnet_2.snap similarity index 95% rename from zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_by_hash_raw_data@testnet_2.snap rename to zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_loc_by_hash_raw_data@testnet_2.snap index f1a52743b89..817a29cc497 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_by_hash_raw_data@testnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tx_loc_by_hash_raw_data@testnet_2.snap @@ -1,5 +1,6 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 144 expression: cf_data --- [ diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_outpoint_raw_data@mainnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_out_loc_raw_data@mainnet_1.snap similarity index 94% rename from zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_outpoint_raw_data@mainnet_1.snap rename to zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_out_loc_raw_data@mainnet_1.snap index 09aae4249b0..630019412c5 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_outpoint_raw_data@mainnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_out_loc_raw_data@mainnet_1.snap @@ -1,5 +1,6 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 144 expression: cf_data --- [ diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_outpoint_raw_data@mainnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_out_loc_raw_data@mainnet_2.snap similarity index 96% rename from zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_outpoint_raw_data@mainnet_2.snap rename to zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_out_loc_raw_data@mainnet_2.snap index cfab20035fe..1fab1427748 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_outpoint_raw_data@mainnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_out_loc_raw_data@mainnet_2.snap @@ -1,5 +1,6 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 144 expression: cf_data --- [ diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_outpoint_raw_data@testnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_out_loc_raw_data@testnet_1.snap similarity index 94% rename from zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_outpoint_raw_data@testnet_1.snap rename to zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_out_loc_raw_data@testnet_1.snap index 32e0d177bcb..143723d5399 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_outpoint_raw_data@testnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_out_loc_raw_data@testnet_1.snap @@ -1,5 +1,6 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 144 expression: cf_data --- [ diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_outpoint_raw_data@testnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_out_loc_raw_data@testnet_2.snap similarity index 96% rename from zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_outpoint_raw_data@testnet_2.snap rename to zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_out_loc_raw_data@testnet_2.snap index c633a918ce4..216474dcb93 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_outpoint_raw_data@testnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/utxo_by_out_loc_raw_data@testnet_2.snap @@ -1,5 +1,6 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +assertion_line: 144 expression: cf_data --- [ diff --git a/zebra-state/src/service/finalized_state/zebra_db/block.rs b/zebra-state/src/service/finalized_state/zebra_db/block.rs index 66ec3c4eec9..416f3b809ae 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block.rs @@ -86,7 +86,7 @@ impl ZebraDb { #[allow(clippy::unwrap_in_result)] pub fn block(&self, hash_or_height: HashOrHeight) -> Option> { // Blocks - let block_header_by_height = self.db.cf_handle("block_by_height").unwrap(); + let block_header_by_height = self.db.cf_handle("block_header_by_height").unwrap(); let height_by_hash = self.db.cf_handle("height_by_hash").unwrap(); let height = @@ -174,7 +174,7 @@ impl ZebraDb { /// if it exists in the finalized chain. #[allow(clippy::unwrap_in_result)] pub fn transaction_location(&self, hash: transaction::Hash) -> Option { - let tx_loc_by_hash = self.db.cf_handle("tx_by_hash").unwrap(); + let tx_loc_by_hash = self.db.cf_handle("tx_loc_by_hash").unwrap(); self.db.zs_get(&tx_loc_by_hash, &hash) } @@ -422,14 +422,14 @@ impl DiskWriteBatch { finalized: &FinalizedBlock, ) -> Result<(), BoxError> { // Blocks - let block_header_by_height = db.cf_handle("block_by_height").unwrap(); + let block_header_by_height = db.cf_handle("block_header_by_height").unwrap(); let hash_by_height = db.cf_handle("hash_by_height").unwrap(); let height_by_hash = db.cf_handle("height_by_hash").unwrap(); // Transactions let tx_by_loc = db.cf_handle("tx_by_loc").unwrap(); let hash_by_tx_loc = db.cf_handle("hash_by_tx_loc").unwrap(); - let tx_loc_by_hash = db.cf_handle("tx_by_hash").unwrap(); + let tx_loc_by_hash = db.cf_handle("tx_loc_by_hash").unwrap(); let FinalizedBlock { block, diff --git a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs index 6453d65b779..a54796d9aa3 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs @@ -95,7 +95,7 @@ impl ZebraDb { &self, output_location: OutputLocation, ) -> Option { - let utxo_by_out_loc = self.db.cf_handle("utxo_by_outpoint").unwrap(); + let utxo_by_out_loc = self.db.cf_handle("utxo_by_out_loc").unwrap(); let output = self.db.zs_get(&utxo_by_out_loc, &output_location)?; @@ -425,7 +425,7 @@ impl DiskWriteBatch { new_outputs_by_out_loc: &BTreeMap, address_balances: &mut HashMap, ) -> Result<(), BoxError> { - let utxo_by_out_loc = db.cf_handle("utxo_by_outpoint").unwrap(); + let utxo_by_out_loc = db.cf_handle("utxo_by_out_loc").unwrap(); let utxo_loc_by_transparent_addr_loc = db.cf_handle("utxo_loc_by_transparent_addr_loc").unwrap(); let tx_loc_by_transparent_addr_loc = @@ -501,7 +501,7 @@ impl DiskWriteBatch { spent_utxos_by_out_loc: &BTreeMap, address_balances: &mut HashMap, ) -> Result<(), BoxError> { - let utxo_by_out_loc = db.cf_handle("utxo_by_outpoint").unwrap(); + let utxo_by_out_loc = db.cf_handle("utxo_by_out_loc").unwrap(); let utxo_loc_by_transparent_addr_loc = db.cf_handle("utxo_loc_by_transparent_addr_loc").unwrap(); diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index 01bdc7c9f78..0ad9794dd5f 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -48,7 +48,7 @@ pub struct Chain { pub height_by_hash: HashMap, /// An index of [`TransactionLocation`]s for each transaction hash in `blocks`. - pub tx_by_hash: HashMap, + pub tx_loc_by_hash: HashMap, /// The [`transparent::Utxo`]s created by `blocks`. /// @@ -135,7 +135,7 @@ impl Chain { network, blocks: Default::default(), height_by_hash: Default::default(), - tx_by_hash: Default::default(), + tx_loc_by_hash: Default::default(), created_utxos: Default::default(), sprout_note_commitment_tree, sapling_note_commitment_tree, @@ -177,7 +177,7 @@ impl Chain { // blocks, heights, hashes self.blocks == other.blocks && self.height_by_hash == other.height_by_hash && - self.tx_by_hash == other.tx_by_hash && + self.tx_loc_by_hash == other.tx_loc_by_hash && // transparent UTXOs self.created_utxos == other.created_utxos && @@ -355,7 +355,7 @@ impl Chain { &self, hash: transaction::Hash, ) -> Option<(&Arc, block::Height)> { - self.tx_by_hash.get(&hash).map(|tx_loc| { + self.tx_loc_by_hash.get(&hash).map(|tx_loc| { ( &self.blocks[&tx_loc.height].block.transactions[tx_loc.index.as_usize()], tx_loc.height, @@ -625,7 +625,9 @@ impl Chain { query_height_range: RangeInclusive, ) -> BTreeMap { self.partial_transparent_indexes(addresses) - .flat_map(|transfers| transfers.tx_ids(&self.tx_by_hash, query_height_range.clone())) + .flat_map(|transfers| { + transfers.tx_ids(&self.tx_loc_by_hash, query_height_range.clone()) + }) .collect() } @@ -646,7 +648,7 @@ impl Chain { network: self.network, blocks: self.blocks.clone(), height_by_hash: self.height_by_hash.clone(), - tx_by_hash: self.tx_by_hash.clone(), + tx_loc_by_hash: self.tx_loc_by_hash.clone(), created_utxos: self.created_utxos.clone(), spent_utxos: self.spent_utxos.clone(), sprout_note_commitment_tree, @@ -784,10 +786,10 @@ impl UpdateWith for Chain { ), }; - // add key `transaction.hash` and value `(height, tx_index)` to `tx_by_hash` + // add key `transaction.hash` and value `(height, tx_index)` to `tx_loc_by_hash` let transaction_location = TransactionLocation::from_usize(height, transaction_index); let prior_pair = self - .tx_by_hash + .tx_loc_by_hash .insert(transaction_hash, transaction_location); assert_eq!( prior_pair, None, @@ -927,9 +929,9 @@ impl UpdateWith for Chain { // reset the utxos this consumed self.revert_chain_with(&(inputs, transaction_hash, spent_outputs), position); - // remove `transaction.hash` from `tx_by_hash` + // remove `transaction.hash` from `tx_loc_by_hash` assert!( - self.tx_by_hash.remove(transaction_hash).is_some(), + self.tx_loc_by_hash.remove(transaction_hash).is_some(), "transactions must be present if block was added to chain" ); diff --git a/zebra-state/src/service/non_finalized_state/chain/index.rs b/zebra-state/src/service/non_finalized_state/chain/index.rs index e272500ae51..90e904fa840 100644 --- a/zebra-state/src/service/non_finalized_state/chain/index.rs +++ b/zebra-state/src/service/non_finalized_state/chain/index.rs @@ -236,24 +236,24 @@ impl TransparentTransfers { /// /// The transactions are returned in chain order. /// - /// `chain_tx_by_hash` should be the `tx_by_hash` field from the + /// `chain_tx_loc_by_hash` should be the `tx_loc_by_hash` field from the /// [`Chain`][1] containing this index. /// /// # Panics /// - /// If `chain_tx_by_hash` is missing some transaction hashes from this + /// If `chain_tx_loc_by_hash` is missing some transaction hashes from this /// index. /// /// [1]: super::super::Chain pub fn tx_ids( &self, - chain_tx_by_hash: &HashMap, + chain_tx_loc_by_hash: &HashMap, query_height_range: RangeInclusive, ) -> BTreeMap { self.tx_ids .distinct_elements() .filter_map(|tx_hash| { - let tx_loc = *chain_tx_by_hash + let tx_loc = *chain_tx_loc_by_hash .get(tx_hash) .expect("all hashes are indexed"); diff --git a/zebra-state/src/service/non_finalized_state/tests/prop.rs b/zebra-state/src/service/non_finalized_state/tests/prop.rs index 22a91474002..e5b9393637b 100644 --- a/zebra-state/src/service/non_finalized_state/tests/prop.rs +++ b/zebra-state/src/service/non_finalized_state/tests/prop.rs @@ -592,7 +592,7 @@ fn different_blocks_different_chains() -> Result<()> { // blocks, heights, hashes chain1.blocks = chain2.blocks.clone(); chain1.height_by_hash = chain2.height_by_hash.clone(); - chain1.tx_by_hash = chain2.tx_by_hash.clone(); + chain1.tx_loc_by_hash = chain2.tx_loc_by_hash.clone(); // transparent UTXOs chain1.created_utxos = chain2.created_utxos.clone(); From 766dd9357b780e49d95398c85259ac1ff4290fa5 Mon Sep 17 00:00:00 2001 From: Andrew Arnott Date: Mon, 4 Jul 2022 16:17:44 -0600 Subject: [PATCH 50/91] Fix walled -> walletd types (#4746) --- book/src/user/lightwalletd.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/book/src/user/lightwalletd.md b/book/src/user/lightwalletd.md index 797d06b3131..5930a89c874 100644 --- a/book/src/user/lightwalletd.md +++ b/book/src/user/lightwalletd.md @@ -9,7 +9,7 @@ Contents: - [RPC section](#rpc-section) - [Sync Zebra](#sync-zebra) - [Download and build lightwalletd](#download-and-build-lightwalletd) -- [Sync lightwalled](#sync-lightwalled) +- [Sync lightwalletd](#sync-lightwalletd) - [Run tests](#run-tests) - [Connect wallet to lightwalletd](#connect-wallet-to-lightwalletd) - [Download and build the cli-wallet](#download-and-build-the-cli-wallet) @@ -96,7 +96,7 @@ make install If everything went good you should have a `lightwalletd` binary in `~/go/bin/`. -## Sync lightwalled +## Sync lightwalletd [#sync-lightwalletd]: (#sync-lightwalletd) Please make sure you have zebrad running (with RPC endpoint and up to date blockchain) to synchronize lightwalletd. From 42ef8846b34887ecd3635202451d75819e2994db Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 5 Jul 2022 11:04:47 +1000 Subject: [PATCH 51/91] fix(clippy): resolve or disable new nightly clippy lints (#4739) * Fix new dead_code lints in generated pin-project code * Fix and ignore new needless_borrow lints --- Cargo.lock | 34 +++++++++---------- tower-fallback/src/future.rs | 3 ++ zebra-chain/src/amount/tests/vectors.rs | 2 ++ zebra-chain/src/block.rs | 34 +++++++++---------- zebra-chain/src/block/hash.rs | 8 +++++ zebra-chain/src/parameters/network.rs | 10 +++--- .../src/primitives/zcash_primitives.rs | 11 +++++- zebra-chain/src/sprout/joinsplit.rs | 9 +++++ zebra-chain/src/transaction/arbitrary.rs | 2 +- zebra-chain/src/transaction/tests/vectors.rs | 8 +++-- zebra-chain/src/transaction/unmined.rs | 2 ++ zebra-consensus/src/primitives/groth16.rs | 3 ++ 12 files changed, 82 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6bcd865cb5d..ffa4e4b99ff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -231,7 +231,7 @@ dependencies = [ "futures-core", "futures-task", "futures-util", - "pin-project 1.0.10", + "pin-project 1.0.11", "rustc_version", "tokio", ] @@ -3310,11 +3310,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" dependencies = [ - "pin-project-internal 1.0.10", + "pin-project-internal 1.0.11", ] [[package]] @@ -3330,9 +3330,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +checksum = "710faf75e1b33345361201d36d04e98ac1ed8909151a017ed384700836104c74" dependencies = [ "proc-macro2 1.0.36", "quote 1.0.15", @@ -3429,7 +3429,7 @@ dependencies = [ "atomic", "crossbeam-queue", "futures", - "pin-project 1.0.10", + "pin-project 1.0.11", "static_assertions", "thiserror", ] @@ -5003,7 +5003,7 @@ dependencies = [ "hyper", "hyper-timeout", "percent-encoding", - "pin-project 1.0.10", + "pin-project 1.0.11", "prost", "prost-derive", "tokio", @@ -5116,7 +5116,7 @@ dependencies = [ "futures", "humantime-serde", "itertools", - "pin-project 1.0.10", + "pin-project 1.0.11", "rand 0.8.5", "retry-error", "serde", @@ -5230,7 +5230,7 @@ dependencies = [ "futures", "humantime-serde", "itertools", - "pin-project 1.0.10", + "pin-project 1.0.11", "rand 0.8.5", "retain_mut", "serde", @@ -5400,7 +5400,7 @@ dependencies = [ "async_executors", "futures", "native-tls", - "pin-project 1.0.10", + "pin-project 1.0.11", "tokio", "tokio-native-tls", "tokio-util 0.6.9", @@ -5426,7 +5426,7 @@ dependencies = [ "futures-util", "hdrhistogram", "indexmap", - "pin-project 1.0.10", + "pin-project 1.0.11", "pin-project-lite", "rand 0.8.5", "slab", @@ -5445,7 +5445,7 @@ dependencies = [ "ed25519-zebra", "futures", "futures-core", - "pin-project 1.0.10", + "pin-project 1.0.11", "rand 0.8.5", "tokio", "tokio-test", @@ -5507,7 +5507,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4546773ffeab9e4ea02b8872faa49bb616a80a7da66afc2f32688943f97efa7" dependencies = [ "futures-util", - "pin-project 1.0.10", + "pin-project 1.0.11", "tokio", "tokio-test", "tower-layer", @@ -5575,7 +5575,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 1.0.10", + "pin-project 1.0.11", "tracing", ] @@ -6400,7 +6400,7 @@ dependencies = [ "lazy_static", "metrics", "ordered-map", - "pin-project 1.0.10", + "pin-project 1.0.11", "proptest", "proptest-derive", "rand 0.8.5", @@ -6569,7 +6569,7 @@ dependencies = [ "metrics-exporter-prometheus", "num-integer", "once_cell", - "pin-project 1.0.10", + "pin-project 1.0.11", "proptest", "proptest-derive", "prost", diff --git a/tower-fallback/src/future.rs b/tower-fallback/src/future.rs index 92dd9ea9a9e..dbbea911ce9 100644 --- a/tower-fallback/src/future.rs +++ b/tower-fallback/src/future.rs @@ -1,5 +1,8 @@ //! Future types for the `Fallback` middleware. +// TODO: remove this lint exception after upgrading to pin-project 1.0.11 or later (#2355) +#![allow(dead_code)] + use std::{ fmt::Debug, future::Future, diff --git a/zebra-chain/src/amount/tests/vectors.rs b/zebra-chain/src/amount/tests/vectors.rs index d1f764b4b94..b4fe100d5a7 100644 --- a/zebra-chain/src/amount/tests/vectors.rs +++ b/zebra-chain/src/amount/tests/vectors.rs @@ -167,6 +167,8 @@ fn add_with_diff_constraints() -> Result<()> { } #[test] +// The borrows are actually needed to call the correct trait impl +#[allow(clippy::needless_borrow)] fn deserialize_checks_bounds() -> Result<()> { zebra_test::init(); diff --git a/zebra-chain/src/block.rs b/zebra-chain/src/block.rs index 078d80aaa51..d21406c3042 100644 --- a/zebra-chain/src/block.rs +++ b/zebra-chain/src/block.rs @@ -1,5 +1,21 @@ //! Blocks and block-related structures (heights, headers, etc.) +use std::{collections::HashMap, fmt, ops::Neg}; + +use crate::{ + amount::NegativeAllowed, + block::merkle::AuthDataRoot, + fmt::DisplayToDebug, + orchard, + parameters::{Network, NetworkUpgrade}, + sapling, + serialization::{TrustedPreallocate, MAX_PROTOCOL_MESSAGE_LEN}, + sprout, + transaction::Transaction, + transparent, + value_balance::{ValueBalance, ValueBalanceError}, +}; + mod commitment; mod error; mod hash; @@ -14,8 +30,6 @@ pub mod arbitrary; #[cfg(any(test, feature = "bench", feature = "proptest-impl"))] pub mod tests; -use std::{collections::HashMap, fmt, ops::Neg}; - pub use commitment::{ ChainHistoryBlockTxAuthCommitmentHash, ChainHistoryMmrRootHash, Commitment, CommitmentError, }; @@ -27,20 +41,6 @@ pub use serialize::{SerializedBlock, MAX_BLOCK_BYTES}; #[cfg(any(test, feature = "proptest-impl"))] pub use arbitrary::LedgerState; -use crate::{ - amount::NegativeAllowed, - block::merkle::AuthDataRoot, - fmt::DisplayToDebug, - orchard, - parameters::{Network, NetworkUpgrade}, - sapling, - serialization::{TrustedPreallocate, MAX_PROTOCOL_MESSAGE_LEN}, - sprout, - transaction::Transaction, - transparent, - value_balance::{ValueBalance, ValueBalanceError}, -}; - /// A Zcash block, containing a header and a list of transactions. #[derive(Clone, Debug, Eq, PartialEq)] #[cfg_attr(any(test, feature = "proptest-impl"), derive(Serialize))] @@ -219,7 +219,7 @@ impl Block { impl<'a> From<&'a Block> for Hash { fn from(block: &'a Block) -> Hash { - (&block.header).into() + block.header.into() } } diff --git a/zebra-chain/src/block/hash.rs b/zebra-chain/src/block/hash.rs index ce679a8d8b1..f9ba8502c99 100644 --- a/zebra-chain/src/block/hash.rs +++ b/zebra-chain/src/block/hash.rs @@ -97,6 +97,14 @@ impl<'a> From<&'a Header> for Hash { } } +impl From
for Hash { + // The borrow is actually needed to use From<&Header> + #[allow(clippy::needless_borrow)] + fn from(block_header: Header) -> Self { + (&block_header).into() + } +} + impl ZcashSerialize for Hash { fn zcash_serialize(&self, mut writer: W) -> Result<(), io::Error> { writer.write_all(&self.0)?; diff --git a/zebra-chain/src/parameters/network.rs b/zebra-chain/src/parameters/network.rs index b6abd95e3dd..aa2178b0476 100644 --- a/zebra-chain/src/parameters/network.rs +++ b/zebra-chain/src/parameters/network.rs @@ -56,8 +56,8 @@ pub enum Network { Testnet, } -impl From<&Network> for &'static str { - fn from(network: &Network) -> &'static str { +impl From for &'static str { + fn from(network: Network) -> &'static str { match network { Network::Mainnet => "Mainnet", Network::Testnet => "Testnet", @@ -65,9 +65,9 @@ impl From<&Network> for &'static str { } } -impl From for &'static str { - fn from(network: Network) -> &'static str { - (&network).into() +impl From<&Network> for &'static str { + fn from(network: &Network) -> &'static str { + (*network).into() } } diff --git a/zebra-chain/src/primitives/zcash_primitives.rs b/zebra-chain/src/primitives/zcash_primitives.rs index 8ddf35cbb66..517d66dda3c 100644 --- a/zebra-chain/src/primitives/zcash_primitives.rs +++ b/zebra-chain/src/primitives/zcash_primitives.rs @@ -198,6 +198,15 @@ impl From<&Script> for zcash_primitives::legacy::Script { } } +/// Convert a Zebra Script into a librustzcash one. +impl From