Skip to content

Commit

Permalink
Merge branch 'master' into standard-http
Browse files Browse the repository at this point in the history
  • Loading branch information
paulhauner committed Sep 14, 2020
2 parents 6172437 + c9596fc commit 288488e
Show file tree
Hide file tree
Showing 24 changed files with 338 additions and 529 deletions.
463 changes: 70 additions & 393 deletions Cargo.lock

Large diffs are not rendered by default.

7 changes: 1 addition & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,14 @@

An open-source Ethereum 2.0 client, written in Rust and maintained by Sigma Prime.

[![Build Status]][Build Link] [![Book Status]][Book Link] [![RustDoc Status]][RustDoc Link] [![Chat Badge]][Chat Link]
[![Build Status]][Build Link] [![Book Status]][Book Link] [![Chat Badge]][Chat Link]

[Build Status]: https://github.com/sigp/lighthouse/workflows/test-suite/badge.svg?branch=master
[Build Link]: https://github.com/sigp/lighthouse/actions
[Chat Badge]: https://img.shields.io/badge/chat-discord-%237289da
[Chat Link]: https://discord.gg/cyAszAh
[Book Status]:https://img.shields.io/badge/user--docs-master-informational
[Book Link]: http://lighthouse-book.sigmaprime.io/
[RustDoc Status]:https://img.shields.io/badge/code--docs-master-orange
[RustDoc Link]: http://lighthouse-docs.sigmaprime.io/

[Documentation](http://lighthouse-book.sigmaprime.io/)

Expand Down Expand Up @@ -59,9 +57,6 @@ Current development overview:
The [Lighthouse Book](http://lighthouse-book.sigmaprime.io/) contains information
for testnet users and developers.

Code documentation is generated via `cargo doc` and hosted at
[lighthouse-docs.sigmaprime.io](http://lighthouse-docs.sigmaprime.io/).

If you'd like some background on Sigma Prime, please see the [Lighthouse Update
\#00](https://lighthouse.sigmaprime.io/update-00.html) blog post or
[sigmaprime.io](https://sigmaprime.io).
Expand Down
2 changes: 1 addition & 1 deletion account_manager/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "account_manager"
version = "0.2.8"
version = "0.2.9"
authors = ["Paul Hauner <[email protected]>", "Luke Anderson <[email protected]>"]
edition = "2018"

Expand Down
2 changes: 1 addition & 1 deletion beacon_node/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "beacon_node"
version = "0.2.8"
version = "0.2.9"
authors = ["Paul Hauner <[email protected]>", "Age Manning <[email protected]"]
edition = "2018"

Expand Down
1 change: 1 addition & 0 deletions beacon_node/eth2_libp2p/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ tiny-keccak = "2.0.2"
environment = { path = "../../lighthouse/environment" }
# TODO: Remove rand crate for mainnet
rand = "0.7.3"
regex = "1.3.9"

[dependencies.libp2p]
#version = "0.23.0"
Expand Down
11 changes: 8 additions & 3 deletions beacon_node/eth2_libp2p/src/discovery/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,12 @@ use tokio::sync::mpsc;
use types::{EnrForkId, EthSpec, SubnetId};

mod subnet_predicate;
use subnet_predicate::subnet_predicate;
pub use subnet_predicate::subnet_predicate;

/// Local ENR storage filename.
pub const ENR_FILENAME: &str = "enr.dat";
/// Target number of peers we'd like to have connected to a given long-lived subnet.
const TARGET_SUBNET_PEERS: usize = 3;
pub const TARGET_SUBNET_PEERS: usize = 3;
/// Target number of peers to search for given a grouped subnet query.
const TARGET_PEERS_FOR_GROUPED_QUERY: usize = 6;
/// Number of times to attempt a discovery request.
Expand Down Expand Up @@ -287,6 +287,11 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
self.discv5.local_enr()
}

/// Return the cached enrs.
pub fn cached_enrs(&self) -> impl Iterator<Item = (&PeerId, &Enr)> {
self.cached_enrs.iter()
}

/// This adds a new `FindPeers` query to the queue if one doesn't already exist.
pub fn discover_peers(&mut self) {
// If the discv5 service isn't running or we are in the process of a query, don't bother queuing a new one.
Expand Down Expand Up @@ -558,7 +563,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
.peers_on_subnet(subnet_query.subnet_id)
.count();

if peers_on_subnet > TARGET_SUBNET_PEERS {
if peers_on_subnet >= TARGET_SUBNET_PEERS {
debug!(self.log, "Discovery ignored";
"reason" => "Already connected to desired peers",
"connected_peers_on_subnet" => peers_on_subnet,
Expand Down
73 changes: 62 additions & 11 deletions beacon_node/eth2_libp2p/src/peer_manager/mod.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
//! Implementation of a Lighthouse's peer management system.
pub use self::peerdb::*;
use crate::discovery::{Discovery, DiscoveryEvent};
use crate::discovery::{subnet_predicate, Discovery, DiscoveryEvent, TARGET_SUBNET_PEERS};
use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode};
use crate::{error, metrics};
use crate::{EnrExt, NetworkConfig, NetworkGlobals, PeerId, SubnetDiscovery};
Expand All @@ -19,7 +19,7 @@ use std::{
task::{Context, Poll},
time::{Duration, Instant},
};
use types::EthSpec;
use types::{EthSpec, SubnetId};

pub use libp2p::core::{identity::Keypair, Multiaddr};

Expand Down Expand Up @@ -214,18 +214,45 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {

/// A request to find peers on a given subnet.
pub fn discover_subnet_peers(&mut self, subnets_to_discover: Vec<SubnetDiscovery>) {
// Extend the time to maintain peers if required.
for s in subnets_to_discover.iter() {
if let Some(min_ttl) = s.min_ttl {
self.network_globals
let filtered: Vec<SubnetDiscovery> = subnets_to_discover
.into_iter()
.filter(|s| {
// Extend min_ttl of connected peers on required subnets
if let Some(min_ttl) = s.min_ttl {
self.network_globals
.peers
.write()
.extend_peers_on_subnet(s.subnet_id, min_ttl);
}
// Already have target number of peers, no need for subnet discovery
let peers_on_subnet = self
.network_globals
.peers
.write()
.extend_peers_on_subnet(s.subnet_id, min_ttl);
}
}
.read()
.peers_on_subnet(s.subnet_id)
.count();
if peers_on_subnet >= TARGET_SUBNET_PEERS {
debug!(
self.log,
"Discovery query ignored";
"subnet_id" => format!("{:?}",s.subnet_id),
"reason" => "Already connected to desired peers",
"connected_peers_on_subnet" => peers_on_subnet,
"target_subnet_peers" => TARGET_SUBNET_PEERS,
);
false
// Queue an outgoing connection request to the cached peers that are on `s.subnet_id`.
// If we connect to the cached peers before the discovery query starts, then we potentially
// save a costly discovery query.
} else {
self.dial_cached_enrs_in_subnet(s.subnet_id);
true
}
})
.collect();

// request the subnet query from discovery
self.discovery.discover_subnet_peers(subnets_to_discover);
self.discovery.discover_subnet_peers(filtered);
}

/// A STATUS message has been received from a peer. This resets the status timer.
Expand Down Expand Up @@ -531,6 +558,30 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
self.events.push(PeerManagerEvent::SocketUpdated(multiaddr));
}

/// Dial cached enrs in discovery service that are in the given `subnet_id` and aren't
/// in Connected, Dialing or Banned state.
fn dial_cached_enrs_in_subnet(&mut self, subnet_id: SubnetId) {
let predicate = subnet_predicate::<TSpec>(vec![subnet_id], &self.log);
let peers_to_dial: Vec<PeerId> = self
.discovery()
.cached_enrs()
.filter_map(|(peer_id, enr)| {
let peers = self.network_globals.peers.read();
if predicate(enr)
&& !peers.is_connected_or_dialing(peer_id)
&& !peers.is_banned(peer_id)
{
Some(peer_id.clone())
} else {
None
}
})
.collect();
for peer in &peers_to_dial {
self.dial_peer(peer);
}
}

/// Peers that have been returned by discovery requests are dialed here if they are suitable.
///
/// NOTE: By dialing `PeerId`s and not multiaddrs, libp2p requests the multiaddr associated
Expand Down
8 changes: 4 additions & 4 deletions beacon_node/eth2_libp2p/src/rpc/methods.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
//! Available RPC methods types and ids.
use crate::types::EnrBitfield;
use regex::bytes::Regex;
use serde::Serialize;
use ssz_derive::{Decode, Encode};
use ssz_types::{
Expand Down Expand Up @@ -42,10 +43,9 @@ impl Deref for ErrorType {

impl ToString for ErrorType {
fn to_string(&self) -> String {
match std::str::from_utf8(self.0.deref()) {
Ok(s) => s.to_string(),
Err(_) => format!("{:?}", self.0.deref()), // Display raw bytes if not a UTF-8 string
}
#[allow(clippy::invalid_regex)]
let re = Regex::new("\\p{C}").expect("Regex is valid");
String::from_utf8_lossy(&re.replace_all(self.0.deref(), &b""[..])).to_string()
}
}

Expand Down
25 changes: 24 additions & 1 deletion beacon_node/eth2_libp2p/src/rpc/rate_limiter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,30 @@ impl RPCRateLimiter {
request: &RPCRequest<T>,
) -> Result<(), RateLimitedErr> {
let time_since_start = self.init_time.elapsed();
let tokens = request.expected_responses().max(1);
let mut tokens = request.expected_responses().max(1);

// Increase the rate limit for blocks by range requests with large step counts.
// We count to tokens as a quadratic increase with step size.
// Using (step_size/5)^2 + 1 as penalty factor allows step sizes of 1-4 to have no penalty
// but step sizes higher than this add a quadratic penalty.
// Penalty's go:
// Step size | Penalty Factor
// 1 | 1
// 2 | 1
// 3 | 1
// 4 | 1
// 5 | 2
// 6 | 2
// 7 | 2
// 8 | 3
// 9 | 4
// 10 | 5

if let RPCRequest::BlocksByRange(bbr_req) = request {
let penalty_factor = (bbr_req.step as f64 / 5.0).powi(2) as u64 + 1;
tokens *= penalty_factor;
}

let check =
|limiter: &mut Limiter<PeerId>| limiter.allows(time_since_start, peer_id, tokens);
let limiter = match request.protocol() {
Expand Down
Loading

0 comments on commit 288488e

Please sign in to comment.