From 8777173c3028de86f3778be44e142800a19102b1 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Fri, 8 Mar 2024 14:09:36 +0100 Subject: [PATCH] more --- Makefile | 2 +- README.md | 2 +- bin/reth/Cargo.toml | 2 +- bin/reth/src/commands/db/stats.rs | 12 ++++++------ bin/reth/src/commands/db/tui.rs | 2 +- bin/reth/src/commands/debug_cmd/in_memory_merkle.rs | 2 +- book/jsonrpc/trace.md | 2 +- book/run/optimism.md | 4 ++-- crates/blockchain-tree/src/block_indices.rs | 4 ++-- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/consensus/beacon/src/engine/mod.rs | 2 +- crates/consensus/common/src/calc.rs | 2 +- crates/net/discv4/src/proto.rs | 4 ++-- crates/net/downloaders/src/bodies/request.rs | 4 ++-- .../net/downloaders/src/headers/reverse_headers.rs | 2 +- crates/net/eth-wire/src/hello.rs | 2 +- crates/net/nat/src/lib.rs | 2 +- crates/net/network/benches/bench.rs | 4 ++-- crates/net/network/src/session/active.rs | 2 +- crates/net/network/src/state.rs | 2 +- crates/net/network/src/transactions/validation.rs | 4 ++-- crates/net/network/tests/it/big_pooled_txs_req.rs | 2 +- crates/node-api/src/engine/traits.rs | 2 +- crates/payload/builder/src/error.rs | 2 +- crates/primitives/src/chain/spec.rs | 10 +++++----- crates/primitives/src/constants/mod.rs | 2 +- crates/primitives/src/transaction/mod.rs | 2 +- crates/primitives/src/transaction/pooled.rs | 4 ++-- crates/primitives/src/transaction/sidecar.rs | 2 +- crates/primitives/src/trie/subnode.rs | 4 ++-- crates/rpc/rpc-builder/src/lib.rs | 6 +++--- crates/rpc/rpc/src/eth/api/fee_history.rs | 2 +- crates/rpc/rpc/src/eth/api/fees.rs | 2 +- crates/stages/src/stages/execution.rs | 2 +- crates/stages/src/stages/merkle.rs | 2 +- crates/storage/codecs/derive/src/compact/enums.rs | 8 ++++---- crates/storage/libmdbx-rs/src/cursor.rs | 4 ++-- crates/storage/libmdbx-rs/src/flags.rs | 4 ++-- crates/storage/libmdbx-rs/src/transaction.rs | 2 +- crates/storage/nippy-jar/src/cursor.rs | 4 ++-- crates/storage/nippy-jar/src/lib.rs | 6 +++--- crates/storage/nippy-jar/src/writer.rs | 2 +- .../src/bundle_state/bundle_state_with_receipts.rs | 2 +- crates/storage/provider/src/chain.rs | 6 +++--- .../provider/src/providers/database/provider.rs | 2 +- .../provider/src/providers/static_file/jar.rs | 12 ++++++------ .../provider/src/providers/static_file/writer.rs | 2 +- crates/transaction-pool/src/pool/pending.rs | 2 +- crates/trie/src/hashed_cursor/post_state.rs | 2 +- crates/trie/src/walker.rs | 2 +- etc/grafana/dashboards/overview.json | 4 ++-- etc/grafana/dashboards/reth-mempool.json | 4 ++-- examples/db-access.rs | 2 +- examples/rpc-db/src/main.rs | 2 +- 54 files changed, 90 insertions(+), 90 deletions(-) diff --git a/Makefile b/Makefile index 0c2aa5a98885d..d1fb5d7f5ab38 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ FULL_DB_TOOLS_DIR := $(shell pwd)/$(DB_TOOLS_DIR)/ BUILD_PATH = "target" -# List of features to use when building. Can be overriden via the environment. +# List of features to use when building. Can be overridden via the environment. # No jemalloc on Windows ifeq ($(OS),Windows_NT) FEATURES ?= diff --git a/README.md b/README.md index f9c356dbec5de..db263ea9da033 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ While we are aware of parties running Reth staking nodes in production, we do *n More historical context below: * We are releasing 1.0 "production-ready" stable Reth once our Reth & Revm audits are done. ETA ~April 2024. - * Reth is currently undergoing an audit with [Sigma Prime](https://sigmaprime.io/), the developers of [Lighthouse](https://github.com/sigp/lighthouse), the Rust Consensus Layer implemementation. + * Reth is currently undergoing an audit with [Sigma Prime](https://sigmaprime.io/), the developers of [Lighthouse](https://github.com/sigp/lighthouse), the Rust Consensus Layer implementation. * Revm (the EVM used in Reth) is undergoing an audit with [Guido Vranken](https://twitter.com/guidovranken) (#1 [Ethereum Bug Bounty](https://ethereum.org/en/bug-bounty)). * We are releasing [beta](https://github.com/paradigmxyz/reth/releases/tag/v0.2.0-beta.1) on Monday March 4th 2024, our first breaking change to the database model, providing faster query speed, smaller database footprint, and allowing "history" to be mounted on separate drives. * We shipped iterative improvements until the last alpha release on February 28th 2024, [0.1.0-alpha.21](https://github.com/paradigmxyz/reth/releases/tag/v0.1.0-alpha.21). diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 3c974bcc49e5c..1d0100bf481f0 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -77,7 +77,7 @@ rand.workspace = true # tui comfy-table = "7.0" crossterm = "0.27.0" -ratatui = "0.25.0" +ratatouille = "0.25.0" human_bytes = "0.4.1" # async diff --git a/bin/reth/src/commands/db/stats.rs b/bin/reth/src/commands/db/stats.rs index 37228356d145c..c5e3c37e489cb 100644 --- a/bin/reth/src/commands/db/stats.rs +++ b/bin/reth/src/commands/db/stats.rs @@ -84,11 +84,11 @@ impl Command { } let max_widths = table.column_max_content_widths(); - let mut seperator = Row::new(); + let mut separator = Row::new(); for width in max_widths { - seperator.add_cell(Cell::new("-".repeat(width as usize))); + separator.add_cell(Cell::new("-".repeat(width as usize))); } - table.add_row(seperator); + table.add_row(separator); let mut row = Row::new(); row.add_cell(Cell::new("Tables")) @@ -259,11 +259,11 @@ impl Command { } let max_widths = table.column_max_content_widths(); - let mut seperator = Row::new(); + let mut separator = Row::new(); for width in max_widths { - seperator.add_cell(Cell::new("-".repeat(width as usize))); + separator.add_cell(Cell::new("-".repeat(width as usize))); } - table.add_row(seperator); + table.add_row(separator); let mut row = Row::new(); row.add_cell(Cell::new("Total")) diff --git a/bin/reth/src/commands/db/tui.rs b/bin/reth/src/commands/db/tui.rs index 841440a47098d..a7b8258b2c603 100644 --- a/bin/reth/src/commands/db/tui.rs +++ b/bin/reth/src/commands/db/tui.rs @@ -3,7 +3,7 @@ use crossterm::{ execute, terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen}, }; -use ratatui::{ +use ratatouille::{ backend::{Backend, CrosstermBackend}, layout::{Alignment, Constraint, Direction, Layout}, style::{Color, Modifier, Style}, diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index dd703747b1037..69034e7e72dbd 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -259,7 +259,7 @@ impl Command { "Mismatched trie updates" ); - // Drop without comitting. + // Drop without committing. drop(provider_rw); Ok(()) diff --git a/book/jsonrpc/trace.md b/book/jsonrpc/trace.md index b66501ebafb20..ba0f2490b579d 100644 --- a/book/jsonrpc/trace.md +++ b/book/jsonrpc/trace.md @@ -46,7 +46,7 @@ The transaction trace filtering APIs are: Executes the given call and returns a number of possible traces for it. -The first parameter is a transaction object where the `from` field is optional and the `nonce` field is ommitted. +The first parameter is a transaction object where the `from` field is optional and the `nonce` field is omitted. The second parameter is an array of one or more trace types (`vmTrace`, `trace`, `stateDiff`). diff --git a/book/run/optimism.md b/book/run/optimism.md index 77499e5300707..ef5646e102fb2 100644 --- a/book/run/optimism.md +++ b/book/run/optimism.md @@ -66,8 +66,8 @@ This will build the `rethdb-reader` dylib and instruct the `op-node` build to st The `optimism` feature flag in `op-reth` adds several new CLI flags to the `reth` binary: 1. `--rollup.sequencer-http ` - The sequencer endpoint to connect to. Transactions sent to the `op-reth` EL are also forwarded to this sequencer endpoint for inclusion, as the sequencer is the entity that builds blocks on OP Stack chains. -1. `--rollup.disable-tx-pool-gossip` - Disables gossiping of transactions in the mempool to peers. This can be ommitted for personal nodes, though providers should always opt to enable this flag. -1. `--rollup.enable-genesis-walkback` - Disables setting the forkchoice status to tip on startup, making the `op-node` walk back to genesis and verify the integrity of the chain before starting to sync. This can be ommitted unless a corruption of local chainstate is suspected. +1. `--rollup.disable-tx-pool-gossip` - Disables gossiping of transactions in the mempool to peers. This can be omitted for personal nodes, though providers should always opt to enable this flag. +1. `--rollup.enable-genesis-walkback` - Disables setting the forkchoice status to tip on startup, making the `op-node` walk back to genesis and verify the integrity of the chain before starting to sync. This can be omitted unless a corruption of local chainstate is suspected. First, ensure that your L1 archival node is running and synced to tip. Then, start `op-reth` with the `--rollup.sequencer-http` flag set to the `Base Mainnet` sequencer endpoint: ```sh diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index 6e3cace070f07..28b07145342aa 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -149,7 +149,7 @@ impl BlockIndices { } /// Update all block hashes. iterate over present and new list of canonical hashes and compare - /// them. Remove all missmatches, disconnect them and return all chains that needs to be + /// them. Remove all mismatches, disconnect them and return all chains that needs to be /// removed. pub(crate) fn update_block_hashes( &mut self, @@ -211,7 +211,7 @@ impl BlockIndices { } } - // remove childs of removed blocks + // remove children of removed blocks ( removed.into_iter().fold(BTreeSet::new(), |mut fold, (number, hash)| { fold.extend(self.remove_block(number, hash)); diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 72ef5dbfc01e0..dcb69c6f1ee0e 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1164,7 +1164,7 @@ impl BlockchainTree { if self.block_indices().canonical_tip().number <= unwind_to { return Ok(()) } - // revert `N` blocks from current canonical chain and put them inside BlockchanTree + // revert `N` blocks from current canonical chain and put them inside BlockchainTree let old_canon_chain = self.revert_canonical_from_database(unwind_to)?; // check if there is block in chain diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 5ae78a7ab5dc8..a235cfc1aae76 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -788,7 +788,7 @@ where /// Record latency metrics for one call to make a block canonical /// Takes start time of the call and result of the make canonical call /// - /// Handles cases for error, already canonical and commmitted blocks + /// Handles cases for error, already canonical and committed blocks fn record_make_canonical_latency( &self, start: Instant, diff --git a/crates/consensus/common/src/calc.rs b/crates/consensus/common/src/calc.rs index d280d704431b2..ef0f7b76e71cb 100644 --- a/crates/consensus/common/src/calc.rs +++ b/crates/consensus/common/src/calc.rs @@ -80,7 +80,7 @@ pub fn block_reward(base_block_reward: u128, ommers: usize) -> u128 { /// /// From the yellow paper (page 15): /// -/// > If there are collissions of the beneficiary addresses between ommers and the block (i.e. two +/// > If there are collisions of the beneficiary addresses between ommers and the block (i.e. two /// > ommers with the same beneficiary address or an ommer with the same beneficiary address as the /// > present block), additions are applied cumulatively. /// diff --git a/crates/net/discv4/src/proto.rs b/crates/net/discv4/src/proto.rs index cf27f130c029d..e31ae1d2e4d46 100644 --- a/crates/net/discv4/src/proto.rs +++ b/crates/net/discv4/src/proto.rs @@ -790,10 +790,10 @@ mod tests { EnrWrapper::new(builder.build(&key).unwrap()) }; - let enr_respone = EnrResponse { request_hash: rng.gen(), enr }; + let enr_response = EnrResponse { request_hash: rng.gen(), enr }; let mut buf = Vec::new(); - enr_respone.encode(&mut buf); + enr_response.encode(&mut buf); let decoded = EnrResponse::decode(&mut &buf[..]).unwrap(); diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index c6f1447a9f277..c4384eb8be24f 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -155,7 +155,7 @@ where } /// Attempt to buffer body responses. Returns an error if body response fails validation. - /// Every body preceeding the failed one will be buffered. + /// Every body preceding the failed one will be buffered. /// /// This method removes headers from the internal collection. /// If the response fails validation, then the header will be put back. @@ -254,7 +254,7 @@ mod tests { }; use reth_interfaces::test_utils::{generators, generators::random_header_range, TestConsensus}; - /// Check if future returns empty bodies without dispathing any requests. + /// Check if future returns empty bodies without dispatching any requests. #[tokio::test] async fn request_returns_empty_bodies() { let mut rng = generators::rng(); diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index fea7b4201a09f..e8e1fa009c29b 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -1201,7 +1201,7 @@ impl ReverseHeadersDownloaderBuilder { /// Configures and returns the next [HeadersRequest] based on the given parameters /// -/// The request wil start at the given `next_request_block_number` block. +/// The request will start at the given `next_request_block_number` block. /// The `limit` of the request will either be the targeted `request_limit` or the difference of /// `next_request_block_number` and the `local_head` in case this is smaller than the targeted /// `request_limit`. diff --git a/crates/net/eth-wire/src/hello.rs b/crates/net/eth-wire/src/hello.rs index e992021f4be2d..8621e8bf6bcbd 100644 --- a/crates/net/eth-wire/src/hello.rs +++ b/crates/net/eth-wire/src/hello.rs @@ -95,7 +95,7 @@ impl HelloMessageWithProtocols { /// Raw rlpx protocol message used in the `p2p` handshake, containing information about the /// supported RLPx protocol version and capabilities. /// -/// See als +/// See also #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] diff --git a/crates/net/nat/src/lib.rs b/crates/net/nat/src/lib.rs index 0930411a08724..adc72aa83b4a9 100644 --- a/crates/net/nat/src/lib.rs +++ b/crates/net/nat/src/lib.rs @@ -133,7 +133,7 @@ impl ResolveNatInterval { } /// Creates a new [ResolveNatInterval] that attempts to resolve the public IP with interval of - /// period with the first attempt starting at `sart`. See also [tokio::time::interval_at] + /// period with the first attempt starting at `start`. See also [tokio::time::interval_at] #[track_caller] pub fn interval_at( resolver: NatResolver, diff --git a/crates/net/network/benches/bench.rs b/crates/net/network/benches/bench.rs index a4bd2410b5125..c41eb62029c75 100644 --- a/crates/net/network/benches/bench.rs +++ b/crates/net/network/benches/bench.rs @@ -12,7 +12,7 @@ use std::sync::Arc; use tokio::{runtime::Runtime as TokioRuntime, sync::mpsc::unbounded_channel}; criterion_group!( - name = brodcast_benches; + name = broadcast_benches; config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); targets = broadcast_ingress_bench ); @@ -87,4 +87,4 @@ pub fn broadcast_ingress_bench(c: &mut Criterion) { }); } -criterion_main!(brodcast_benches); +criterion_main!(broadcast_benches); diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 07abb6b36d252..1c8c0b735cdbb 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -705,7 +705,7 @@ impl InflightRequest { enum OnIncomingMessageOutcome { /// Message successfully handled. Ok, - /// Message is considered to be in violation fo the protocol + /// Message is considered to be in violation of the protocol BadMessage { error: EthStreamError, message: EthMessage }, /// Currently no capacity to handle the message NoCapacity(ActiveSessionMessage), diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 2da15ddc3b8fb..4e1fa994add3b 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -163,7 +163,7 @@ where /// /// See also pub(crate) fn announce_new_block(&mut self, msg: NewBlockMessage) { - // send a `NewBlock` message to a fraction fo the connected peers (square root of the total + // send a `NewBlock` message to a fraction of the connected peers (square root of the total // number of peers) let num_propagate = (self.active_peers.len() as f64).sqrt() as u64 + 1; diff --git a/crates/net/network/src/transactions/validation.rs b/crates/net/network/src/transactions/validation.rs index a8831b945a6dc..f2049bbca9b94 100644 --- a/crates/net/network/src/transactions/validation.rs +++ b/crates/net/network/src/transactions/validation.rs @@ -20,8 +20,8 @@ pub const SIGNATURE_DECODED_SIZE_BYTES: usize = mem::size_of::(); /// [`NewPooledTransactionHashes68`](reth_eth_wire::NewPooledTransactionHashes68).. pub trait ValidateTx68 { /// Validates a [`NewPooledTransactionHashes68`](reth_eth_wire::NewPooledTransactionHashes68) - /// entry. Returns [`ValidationOutcome`] which signals to the caller wether to fetch the - /// transaction or wether to drop it, and wether the sender of the announcement should be + /// entry. Returns [`ValidationOutcome`] which signals to the caller whether to fetch the + /// transaction or wether to drop it, and whether the sender of the announcement should be /// penalized. fn should_fetch( &self, diff --git a/crates/net/network/tests/it/big_pooled_txs_req.rs b/crates/net/network/tests/it/big_pooled_txs_req.rs index 95e28f85b37c6..e5b639eee1e9a 100644 --- a/crates/net/network/tests/it/big_pooled_txs_req.rs +++ b/crates/net/network/tests/it/big_pooled_txs_req.rs @@ -13,7 +13,7 @@ use reth_transaction_pool::{ }; use tokio::sync::oneshot; -// peer0: `GetPooledTransactions` requestor +// peer0: `GetPooledTransactions` requester // peer1: `GetPooledTransactions` responder #[tokio::test(flavor = "multi_thread")] async fn test_large_tx_req() { diff --git a/crates/node-api/src/engine/traits.rs b/crates/node-api/src/engine/traits.rs index 076a7650ea8c1..01cb8ce065b06 100644 --- a/crates/node-api/src/engine/traits.rs +++ b/crates/node-api/src/engine/traits.rs @@ -45,7 +45,7 @@ pub trait PayloadBuilderAttributes: Send + Sync + std::fmt::Debug { /// Returns the parent block hash for the running payload job. fn parent(&self) -> B256; - /// Returns the timestmap for the running payload job. + /// Returns the timestamp for the running payload job. fn timestamp(&self) -> u64; /// Returns the parent beacon block root for the running payload job, if it exists. diff --git a/crates/payload/builder/src/error.rs b/crates/payload/builder/src/error.rs index 8382b4b16bbad..af95b279b56de 100644 --- a/crates/payload/builder/src/error.rs +++ b/crates/payload/builder/src/error.rs @@ -8,7 +8,7 @@ use tokio::sync::oneshot; /// Possible error variants during payload building. #[derive(Debug, thiserror::Error)] pub enum PayloadBuilderError { - /// Thrown whe the parent block is missing. + /// Thrown when the parent block is missing. #[error("missing parent block {0}")] MissingParentBlock(B256), /// An oneshot channels has been closed. diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 1e85603831dab..b03234e4f3381 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -742,7 +742,7 @@ impl ChainSpec { ForkFilter::new(head, self.genesis_hash(), self.genesis_timestamp(), forks) } - /// Compute the [`ForkId`] for the given [`Head`] folowing eip-6122 spec + /// Compute the [`ForkId`] for the given [`Head`] following eip-6122 spec pub fn fork_id(&self, head: &Head) -> ForkId { let mut forkhash = ForkHash::from(self.genesis_hash()); let mut current_applied = 0; @@ -817,8 +817,8 @@ impl ChainSpec { let mut hardforks_iter = self.forks_iter().peekable(); while let Some((_, curr_cond)) = hardforks_iter.next() { if let Some((_, next_cond)) = hardforks_iter.peek() { - // peek and find the first occurence of ForkCondition::TTD (merge) , or in - // custom ChainSpecs, the first occurence of + // peek and find the first occurrence of ForkCondition::TTD (merge) , or in + // custom ChainSpecs, the first occurrence of // ForkCondition::Timestamp. If curr_cond is ForkCondition::Block at // this point, which it should be in most "normal" ChainSpecs, // return its block_num @@ -946,7 +946,7 @@ pub struct ForkTimestamps { } impl ForkTimestamps { - /// Creates a new [`ForkTimestamps`] from the given hardforks by extracing the timestamps + /// Creates a new [`ForkTimestamps`] from the given hardforks by extracting the timestamps fn from_hardforks(forks: &BTreeMap) -> Self { let mut timestamps = ForkTimestamps::default(); if let Some(shanghai) = forks.get(&Hardfork::Shanghai).and_then(|f| f.as_timestamp()) { @@ -1771,7 +1771,7 @@ Post-merge hard forks (timestamp based): // spec w/ only ForkCondition::Block - test the match arm for ForkCondition::Block to ensure // no regressions, for these ForkConditions(Block/TTD) - a separate chain spec definition is - // technically unecessary - but we include it here for thoroughness + // technically unnecessary - but we include it here for thoroughness let fork_cond_block_only_case = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(empty_genesis) diff --git a/crates/primitives/src/constants/mod.rs b/crates/primitives/src/constants/mod.rs index 0762b5c8004d5..18a41168fd59f 100644 --- a/crates/primitives/src/constants/mod.rs +++ b/crates/primitives/src/constants/mod.rs @@ -46,7 +46,7 @@ pub const ETHEREUM_BLOCK_GAS_LIMIT: u64 = 30_000_000; /// 12.5% of 7 is less than 1. /// /// Note that min base fee under different 1559 parameterizations may differ, but there's no -/// signifant harm in leaving this setting as is. +/// significant harm in leaving this setting as is. pub const MIN_PROTOCOL_BASE_FEE: u64 = 7; /// Same as [MIN_PROTOCOL_BASE_FEE] but as a U256. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 96c259d67699b..ba9116fd14325 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1506,7 +1506,7 @@ impl TransactionSignedEcRecovered { self.signed_transaction } - /// Desolve Self to its component + /// Dissolve Self to its component pub fn to_components(self) -> (TransactionSigned, Address) { (self.signed_transaction, self.signer) } diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index ee7a39b6c8a4a..7c12d9b519a20 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -203,7 +203,7 @@ impl PooledTransactionsElement { let tx_type = *data.first().ok_or(RlpError::InputTooShort)?; if tx_type == EIP4844_TX_TYPE_ID { - // Recall that the blob transaction response `TranactionPayload` is encoded like + // Recall that the blob transaction response `TransactionPayload` is encoded like // this: `rlp([tx_payload_body, blobs, commitments, proofs])` // // Note that `tx_payload_body` is a list: @@ -595,7 +595,7 @@ impl PooledTransactionsElementEcRecovered { tx.into_ecrecovered_transaction(signer) } - /// Desolve Self to its component + /// Dissolve Self to its component pub fn into_components(self) -> (PooledTransactionsElement, Address) { (self.transaction, self.signer) } diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index 04851c1efd5af..248c266e23589 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -172,7 +172,7 @@ impl BlobTransaction { self.sidecar.encode_inner(out); } - /// Ouputs the length of the RLP encoding of the blob transaction, including the tx type byte, + /// Outputs the length of the RLP encoding of the blob transaction, including the tx type byte, /// optionally including the length of a wrapping string header. If `with_header` is `false`, /// the length of the following will be calculated: /// `tx_type (0x03) || rlp([transaction_payload_body, blobs, commitments, proofs])` diff --git a/crates/primitives/src/trie/subnode.rs b/crates/primitives/src/trie/subnode.rs index 94df21219d59f..87ad1142aadd9 100644 --- a/crates/primitives/src/trie/subnode.rs +++ b/crates/primitives/src/trie/subnode.rs @@ -54,8 +54,8 @@ impl Compact for StoredSubNode { let nibbles_exists = buf.get_u8() != 0; let nibble = if nibbles_exists { Some(buf.get_u8()) } else { None }; - let node_exsists = buf.get_u8() != 0; - let node = if node_exsists { + let node_exists = buf.get_u8() != 0; + let node = if node_exists { let (node, rest) = StoredBranchNode::from_compact(buf, 0); buf = rest; Some(node.0) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 3ae0bd8b4b33d..5b1bbaf6ce2a6 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -324,7 +324,7 @@ impl /// Configure a [NoopTransactionPool] instance. /// - /// Caution: This will configure a pool API that does abosultely nothing. + /// Caution: This will configure a pool API that does absolutely nothing. /// This is only intended for allow easier setup of namespaces that depend on the [EthApi] which /// requires a [TransactionPool] implementation. pub fn with_noop_pool( @@ -355,7 +355,7 @@ impl /// Configure a [NoopNetwork] instance. /// - /// Caution: This will configure a network API that does abosultely nothing. + /// Caution: This will configure a network API that does absolutely nothing. /// This is only intended for allow easier setup of namespaces that depend on the [EthApi] which /// requires a [NetworkInfo] implementation. pub fn with_noop_network( @@ -1729,7 +1729,7 @@ impl RpcServerConfig { /// /// This consumes the builder and returns a server. /// - /// Note: The server ist not started and does nothing unless polled, See also [RpcServer::start] + /// Note: The server is not started and does nothing unless polled, See also [RpcServer::start] pub async fn build(mut self, modules: &TransportRpcModules) -> Result { let mut server = RpcServer::empty(); server.ws_http = self.build_ws_http(modules).await?; diff --git a/crates/rpc/rpc/src/eth/api/fee_history.rs b/crates/rpc/rpc/src/eth/api/fee_history.rs index fac185860f74a..c51350fd38d4f 100644 --- a/crates/rpc/rpc/src/eth/api/fee_history.rs +++ b/crates/rpc/rpc/src/eth/api/fee_history.rs @@ -29,7 +29,7 @@ pub struct FeeHistoryCache { } impl FeeHistoryCache { - /// Creates new FeeHistoryCache instance, initialize it with the mose recent data, set bounds + /// Creates new FeeHistoryCache instance, initialize it with the more recent data, set bounds pub fn new(eth_cache: EthStateCache, config: FeeHistoryCacheConfig) -> Self { let inner = FeeHistoryCacheInner { lower_bound: Default::default(), diff --git a/crates/rpc/rpc/src/eth/api/fees.rs b/crates/rpc/rpc/src/eth/api/fees.rs index 9ac17bbb703e1..51fa879b574dc 100644 --- a/crates/rpc/rpc/src/eth/api/fees.rs +++ b/crates/rpc/rpc/src/eth/api/fees.rs @@ -146,7 +146,7 @@ where .map(|h| h.timestamp) .unwrap_or_default(); - // Als need to include the `base_fee_per_gas` and `base_fee_per_blob_gas` for the next + // Also need to include the `base_fee_per_gas` and `base_fee_per_blob_gas` for the next // block base_fee_per_gas.push(U256::from(calculate_next_block_base_fee( last_entry.gas_used, diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 6a1688d758b3f..04c1e7b178cc1 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -217,7 +217,7 @@ impl ExecutionStage { target: "sync::stages::execution", block_fetch = ?fetch_block_duration, execution = ?execution_duration, - write_preperation = ?write_preparation_duration, + write_preparation = ?write_preparation_duration, write = ?db_write_duration, "Execution time" ); diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index b2761267b8d9f..cb61e99314561 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -295,7 +295,7 @@ impl Stage for MerkleStage { let (block_root, updates) = StateRoot::incremental_root_with_updates(tx, range) .map_err(|e| StageError::Fatal(Box::new(e)))?; - // Validate the calulated state root + // Validate the calculated state root let target = provider .header_by_number(input.unwind_to)? .ok_or_else(|| ProviderError::HeaderNotFound(input.unwind_to.into()))?; diff --git a/crates/storage/codecs/derive/src/compact/enums.rs b/crates/storage/codecs/derive/src/compact/enums.rs index a408c59305b3e..64de6c99f43de 100644 --- a/crates/storage/codecs/derive/src/compact/enums.rs +++ b/crates/storage/codecs/derive/src/compact/enums.rs @@ -48,7 +48,7 @@ impl<'a> EnumHandler<'a> { /// Generates `from_compact` code for an enum variant. /// - /// `fields_iterator` might look something like \[VariantUnit, VariantUnamedField, Field, + /// `fields_iterator` might look something like \[VariantUnit, VariantUnnamedField, Field, /// VariantUnit...\]. pub fn from(&mut self, variant_name: &str, ident: &Ident) { let variant_name = format_ident!("{variant_name}"); @@ -65,7 +65,7 @@ impl<'a> EnumHandler<'a> { format_ident!("specialized_from_compact") }; - // Unamed type + // Unnamed type self.enum_lines.push(quote! { #current_variant_index => { let (inner, new_buf) = #field_type::#from_compact_ident(buf, buf.len()); @@ -91,7 +91,7 @@ impl<'a> EnumHandler<'a> { /// Generates `to_compact` code for an enum variant. /// - /// `fields_iterator` might look something like [VariantUnit, VariantUnamedField, Field, + /// `fields_iterator` might look something like [VariantUnit, VariantUnnamedField, Field, /// VariantUnit...]. pub fn to(&mut self, variant_name: &str, ident: &Ident) { let variant_name = format_ident!("{variant_name}"); @@ -106,7 +106,7 @@ impl<'a> EnumHandler<'a> { format_ident!("specialized_to_compact") }; - // Unamed type + // Unnamed type self.enum_lines.push(quote! { #ident::#variant_name(field) => { field.#to_compact_ident(&mut buffer); diff --git a/crates/storage/libmdbx-rs/src/cursor.rs b/crates/storage/libmdbx-rs/src/cursor.rs index 20371a3f6c061..6cb4e6149240d 100644 --- a/crates/storage/libmdbx-rs/src/cursor.rs +++ b/crates/storage/libmdbx-rs/src/cursor.rs @@ -576,7 +576,7 @@ where }; Some(Ok((key, data))) } - // MDBX_ENODATA can occur when the cursor was previously seeked to a + // MDBX_ENODATA can occur when the cursor was previously sought to a // non-existent value, e.g. iter_from with a // key greater than all values in the database. ffi::MDBX_NOTFOUND | ffi::MDBX_ENODATA => None, @@ -671,7 +671,7 @@ where }; Some(Ok((key, data))) } - // MDBX_NODATA can occur when the cursor was previously seeked to a + // MDBX_NODATA can occur when the cursor was previously sought to a // non-existent value, e.g. iter_from with a // key greater than all values in the database. ffi::MDBX_NOTFOUND | ffi::MDBX_ENODATA => None, diff --git a/crates/storage/libmdbx-rs/src/flags.rs b/crates/storage/libmdbx-rs/src/flags.rs index f984ffcaf0245..e6b2697a859af 100644 --- a/crates/storage/libmdbx-rs/src/flags.rs +++ b/crates/storage/libmdbx-rs/src/flags.rs @@ -17,7 +17,7 @@ pub enum SyncMode { /// hardware, with [SyncMode::NoMetaSync] you may get a doubling of write performance. /// /// This trade-off maintains database integrity, but a system crash may undo the last committed - /// transaction. I.e. it preserves the ACI (atomicity, consistency, isolation) but not D + /// transaction. I.e. it preserves the ACPI (atomicity, consistency, isolation) but not D /// (durability) database property. NoMetaSync, @@ -75,7 +75,7 @@ pub enum SyncMode { /// the database, but you can lose the last transactions, if at least one buffer is not yet /// flushed to disk. The risk is governed by how often the system flushes dirty buffers to /// disk and how often [Environment::sync()](crate::Environment::sync) is called. So, - /// transactions exhibit ACI (atomicity, consistency, isolation) properties and only lose D + /// transactions exhibit ACPI (atomicity, consistency, isolation) properties and only lose D /// (durability). I.e. database integrity is maintained, but a system crash may undo the /// final transactions. /// diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index f270f024bc9ec..45259ba7b4269 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -582,7 +582,7 @@ impl TransactionPtr { Ok((f)(self.txn)) } - /// Executes the given closure once the lock on the transaction is acquired. If the tranasction + /// Executes the given closure once the lock on the transaction is acquired. If the transaction /// is timed out, it will be renewed first. /// /// Returns the result of the closure or an error if the transaction renewal fails. diff --git a/crates/storage/nippy-jar/src/cursor.rs b/crates/storage/nippy-jar/src/cursor.rs index d68c387c68eb2..984206c36b42f 100644 --- a/crates/storage/nippy-jar/src/cursor.rs +++ b/crates/storage/nippy-jar/src/cursor.rs @@ -75,7 +75,7 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { /// stored in file. pub fn row_by_key(&mut self, key: &[u8]) -> Result>, NippyJarError> { if let (Some(filter), Some(phf)) = (&self.jar.filter, &self.jar.phf) { - // TODO: is it worth to parallize both? + // TODO: is it worth to parallelize both? // May have false positives if filter.contains(key)? { @@ -143,7 +143,7 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { mask: usize, ) -> Result>, NippyJarError> { if let (Some(filter), Some(phf)) = (&self.jar.filter, &self.jar.phf) { - // TODO: is it worth to parallize both? + // TODO: is it worth to parallelize both? // May have false positives if filter.contains(key)? { diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index 16960404c9648..4d311f2732c3f 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -1120,7 +1120,7 @@ mod tests { let nippy = NippyJar::load_without_header(file_path).unwrap(); assert_eq!(initial_rows, nippy.rows); - // Data was written successfuly + // Data was written successfully let new_data_size = File::open(nippy.data_path()).unwrap().metadata().unwrap().len() as usize; assert_eq!(new_data_size, initial_data_size + col1[2].len() + col2[2].len()); @@ -1170,7 +1170,7 @@ mod tests { let nippy = NippyJar::load_without_header(file_path).unwrap(); assert_eq!(initial_rows, nippy.rows); - // Data was written successfuly + // Data was written successfully let new_data_size = File::open(nippy.data_path()).unwrap().metadata().unwrap().len() as usize; assert_eq!(new_data_size, initial_data_size + col1[2].len() + col2[2].len()); @@ -1230,7 +1230,7 @@ mod tests { // Load and add 1 row { let nippy = NippyJar::load_without_header(file_path).unwrap(); - // Check if it was committed successfuly + // Check if it was committed successfully assert_eq!(nippy.max_row_size, col1[0].len() + col2[0].len()); assert_eq!(nippy.rows, 1); diff --git a/crates/storage/nippy-jar/src/writer.rs b/crates/storage/nippy-jar/src/writer.rs index 3a15ee48e6de3..08f7ee9f4113b 100644 --- a/crates/storage/nippy-jar/src/writer.rs +++ b/crates/storage/nippy-jar/src/writer.rs @@ -80,7 +80,7 @@ impl NippyJarWriter { &mut self.jar.user_header } - /// Gets total writter rows in jar. + /// Gets total writer rows in jar. pub fn rows(&self) -> usize { self.jar.rows() } diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index 5b900ec742cc0..e38acbb6248e3 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -268,7 +268,7 @@ impl BundleStateWithReceipts { /// Extend one state from another /// - /// For state this is very sensitive opperation and should be used only when + /// For state this is very sensitive operation and should be used only when /// we know that other state was build on top of this one. /// In most cases this would be true. pub fn extend(&mut self, other: Self) { diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index d7cb4cbac2b73..5f703426d9923 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -187,7 +187,7 @@ impl Chain { /// /// Attachment includes block number, block hash, transaction hash and transaction index. pub fn receipts_with_attachment(&self) -> Vec { - let mut receipt_attch = Vec::new(); + let mut receipt_attach = Vec::new(); for ((block_num, block), receipts) in self.blocks().iter().zip(self.state.receipts().iter()) { let mut tx_receipts = Vec::new(); @@ -198,9 +198,9 @@ impl Chain { )); } let block_num_hash = BlockNumHash::new(*block_num, block.hash()); - receipt_attch.push(BlockReceipts { block: block_num_hash, tx_receipts }); + receipt_attach.push(BlockReceipts { block: block_num_hash, tx_receipts }); } - receipt_attch + receipt_attach } /// Append a single block with state to the chain. diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 4dc188a82cd83..fa65b967e63cf 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -94,7 +94,7 @@ impl DatabaseProviderRW { } } -/// A provider struct that fetchs data from the database. +/// A provider struct that fetches data from the database. /// Wrapper around [`DbTx`] and [`DbTxMut`]. Example: [`HeaderProvider`] [`BlockHashReader`] #[derive(Debug)] pub struct DatabaseProvider { diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 92bc0bce6fb45..fbf36f9f90cd2 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -26,7 +26,7 @@ pub struct StaticFileJarProvider<'a> { /// Main static file segment jar: LoadedJarRef<'a>, /// Another kind of static file segment to help query data from the main one. - auxiliar_jar: Option>, + auxiliary_jar: Option>, metrics: Option>, } @@ -39,7 +39,7 @@ impl<'a> Deref for StaticFileJarProvider<'a> { impl<'a> From> for StaticFileJarProvider<'a> { fn from(value: LoadedJarRef<'a>) -> Self { - StaticFileJarProvider { jar: value, auxiliar_jar: None, metrics: None } + StaticFileJarProvider { jar: value, auxiliary_jar: None, metrics: None } } } @@ -62,9 +62,9 @@ impl<'a> StaticFileJarProvider<'a> { Ok(result) } - /// Adds a new auxiliar static file to help query data from the main one - pub fn with_auxiliar(mut self, auxiliar_jar: StaticFileJarProvider<'a>) -> Self { - self.auxiliar_jar = Some(Box::new(auxiliar_jar)); + /// Adds a new auxiliary static file to help query data from the main one + pub fn with_auxiliary(mut self, auxiliary_jar: StaticFileJarProvider<'a>) -> Self { + self.auxiliary_jar = Some(Box::new(auxiliary_jar)); self } @@ -296,7 +296,7 @@ impl<'a> ReceiptProvider for StaticFileJarProvider<'a> { } fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { - if let Some(tx_static_file) = &self.auxiliar_jar { + if let Some(tx_static_file) = &self.auxiliary_jar { if let Some(num) = tx_static_file.transaction_id(hash)? { return self.receipt(num) } diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 856431779c620..a34a93317bf82 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -156,7 +156,7 @@ impl StaticFileProviderRW { // We find the maximum block of the segment by checking this writer's last block. // // However if there's no block range (because there's no data), we try to calculate it by - // substracting 1 from the expected block start, resulting on the last block of the + // subtracting 1 from the expected block start, resulting on the last block of the // previous file. // // If that expected block start is 0, then it means that there's no actual block data, and diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 40ee39a0ec8c1..34678b7289c8e 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -447,7 +447,7 @@ impl PendingPool { /// Truncates the pool to the given [SubPoolLimit], removing transactions until the subpool /// limits are met. /// - /// This attempts to remove transactions by rougly the same amount for each sender. For more + /// This attempts to remove transactions by roughly the same amount for each sender. For more /// information on this exact process see docs for /// [remove_to_limit](PendingPool::remove_to_limit). /// diff --git a/crates/trie/src/hashed_cursor/post_state.rs b/crates/trie/src/hashed_cursor/post_state.rs index 7c9e048cb849d..415e7e43701aa 100644 --- a/crates/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/src/hashed_cursor/post_state.rs @@ -315,7 +315,7 @@ where Ok(result) } - /// Return the next account storage entry for the current accont key. + /// Return the next account storage entry for the current account key. /// /// # Panics /// diff --git a/crates/trie/src/walker.rs b/crates/trie/src/walker.rs index 3710648fd201c..005652cd17863 100644 --- a/crates/trie/src/walker.rs +++ b/crates/trie/src/walker.rs @@ -306,7 +306,7 @@ mod tests { let mut walker = TrieWalker::new(&mut trie, Default::default()); assert!(walker.key().unwrap().is_empty()); - // We're traversing the path in lexigraphical order. + // We're traversing the path in lexicographical order. for expected in expected { let got = walker.advance().unwrap(); assert_eq!(got.unwrap(), Nibbles::from_nibbles_unchecked(expected.clone())); diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 843eefb70710d..8031e07c0f5ff 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -5940,7 +5940,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "Durations of nested function calls, in one call to poll `TransactionsManager` future:\n\nNetwork Events - stream peer session updates from `NetworkManager`;\nTransaction Events - stream txns gossip fom `NetworkManager`;\nPending Transactions - stream hashes of txns successfully inserted into pending set in `TransactionPool`;\nPending Pool Imports - flush txns to pool from `TransactionsManager`;\nFetch Events - stream fetch txn events (success case wraps a tx) from `TransactionFetcher`;\n(Transactions Commands - stream commands from testnet to fetch/serve/propagate txns)\n", + "description": "Durations of nested function calls, in one call to poll `TransactionsManager` future:\n\nNetwork Events - stream peer session updates from `NetworkManager`;\nTransaction Events - stream txns gossip from `NetworkManager`;\nPending Transactions - stream hashes of txns successfully inserted into pending set in `TransactionPool`;\nPending Pool Imports - flush txns to pool from `TransactionsManager`;\nFetch Events - stream fetch txn events (success case wraps a tx) from `TransactionFetcher`;\n(Transactions Commands - stream commands from testnet to fetch/serve/propagate txns)\n", "fieldConfig": { "defaults": { "color": { @@ -6232,7 +6232,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "Durations of nested function calls, in one call to poll `NetworkManager` future:\n\nNetwork Handle Message - stream network handle messages from `TransactionsManager`;\nSwarm Events - stream transaction gossip fom `Swarm`", + "description": "Durations of nested function calls, in one call to poll `NetworkManager` future:\n\nNetwork Handle Message - stream network handle messages from `TransactionsManager`;\nSwarm Events - stream transaction gossip from `Swarm`", "fieldConfig": { "defaults": { "color": { diff --git a/etc/grafana/dashboards/reth-mempool.json b/etc/grafana/dashboards/reth-mempool.json index f9673420a7dcf..56fc718a3746d 100644 --- a/etc/grafana/dashboards/reth-mempool.json +++ b/etc/grafana/dashboards/reth-mempool.json @@ -1524,7 +1524,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "Durations of nested function calls, in one call to poll `TransactionsManager` future:\n\nNetwork Events - stream peer session updates from `NetworkManager`;\nTransaction Events - stream txns gossip fom `NetworkManager`;\nPending Transactions - stream hashes of txns successfully inserted into pending set in `TransactionPool`;\nPending Pool Imports - flush txns to pool from `TransactionsManager`;\nFetch Events - stream fetch txn events (success case wraps a tx) from `TransactionFetcher`;\nFetch Pending Hashes - search for hashes announced by an idle peer in cache for hashes pending fetch;\n(Transactions Commands - stream commands from testnet to fetch/serve/propagate txns)\n", + "description": "Durations of nested function calls, in one call to poll `TransactionsManager` future:\n\nNetwork Events - stream peer session updates from `NetworkManager`;\nTransaction Events - stream txns gossip from `NetworkManager`;\nPending Transactions - stream hashes of txns successfully inserted into pending set in `TransactionPool`;\nPending Pool Imports - flush txns to pool from `TransactionsManager`;\nFetch Events - stream fetch txn events (success case wraps a tx) from `TransactionFetcher`;\nFetch Pending Hashes - search for hashes announced by an idle peer in cache for hashes pending fetch;\n(Transactions Commands - stream commands from testnet to fetch/serve/propagate txns)\n", "fieldConfig": { "defaults": { "color": { @@ -2130,7 +2130,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "Durations of nested function calls, in one call to poll `NetworkManager` future:\n\nNetwork Handle Message - stream network handle messages from `TransactionsManager`;\nSwarm Events - stream transaction gossip fom `Swarm`", + "description": "Durations of nested function calls, in one call to poll `NetworkManager` future:\n\nNetwork Handle Message - stream network handle messages from `TransactionsManager`;\nSwarm Events - stream transaction gossip from `Swarm`", "fieldConfig": { "defaults": { "color": { diff --git a/examples/db-access.rs b/examples/db-access.rs index 6edfc6afe166b..b20f021abe4b5 100644 --- a/examples/db-access.rs +++ b/examples/db-access.rs @@ -31,7 +31,7 @@ fn main() -> eyre::Result<()> { // the `provider_rw` function and look for the `Writer` variants of the traits. let provider = factory.provider()?; - // Run basic queryies against the DB + // Run basic queries against the DB let block_num = 100; header_provider_example(&provider, block_num)?; block_provider_example(&provider, block_num)?; diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index 13fbc6223728a..ca55990756bad 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -21,7 +21,7 @@ use reth::{ use reth::rpc::builder::{ RethRpcModule, RpcModuleBuilder, RpcServerConfig, TransportRpcModuleConfig, }; -// Configuring the network parts, ideally also wouldn't ned to think about this. +// Configuring the network parts, ideally also wouldn't need to think about this. use myrpc_ext::{MyRpcExt, MyRpcExtApiServer}; use reth::{ blockchain_tree::noop::NoopBlockchainTree, providers::test_utils::TestCanonStateSubscriptions,