Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Reduce consensus spam #1658

Merged
merged 11 commits into from
Feb 1, 2019
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

31 changes: 24 additions & 7 deletions core/finality-grandpa/src/communication.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
//! that sign or re-shape.

use std::collections::HashMap;
use std::sync::Arc;

use grandpa::VoterSet;
use futures::prelude::*;
use futures::sync::mpsc;
Expand All @@ -27,8 +29,6 @@ use runtime_primitives::traits::Block as BlockT;
use tokio::timer::Interval;
use {Error, Network, Message, SignedMessage, Commit, CompactCommit};

use std::sync::Arc;

fn localized_payload<E: Encode>(round: u64, set_id: u64, message: &E) -> Vec<u8> {
(message, round, set_id).encode()
}
Expand All @@ -47,6 +47,8 @@ enum Broadcast<Block: BlockT> {
Announcement(Round, SetId, Block::Hash),
// round, set id being dropped.
DropRound(Round, SetId),
// set_id being dropped.
DropSet(SetId),
}

impl<Block: BlockT> Broadcast<Block> {
Expand All @@ -56,6 +58,7 @@ impl<Block: BlockT> Broadcast<Block> {
Broadcast::Message(_, s, _) => s,
Broadcast::Announcement(_, s, _) => s,
Broadcast::DropRound(_, s) => s,
Broadcast::DropSet(s) => s,
}
}
}
Expand Down Expand Up @@ -187,7 +190,11 @@ impl<B: BlockT, N: Network<B>> Future for BroadcastWorker<B, N> {
Broadcast::DropRound(round, set_id) => {
// stop making announcements for any dead rounds.
self.announcements.retain(|_, &mut r| r > round);
self.network.drop_messages(round.0, set_id.0);
self.network.drop_round_messages(round.0, set_id.0);
}
Broadcast::DropSet(set_id) => {
// stop making announcements for any dead rounds.
self.network.drop_set_messages(set_id.0);
}
}
}
Expand All @@ -207,10 +214,14 @@ impl<B: BlockT, N: Network<B>> Network<B> for BroadcastHandle<B, N> {
let _ = self.relay.unbounded_send(Broadcast::Message(Round(round), SetId(set_id), message));
}

fn drop_messages(&self, round: u64, set_id: u64) {
fn drop_round_messages(&self, round: u64, set_id: u64) {
let _ = self.relay.unbounded_send(Broadcast::DropRound(Round(round), SetId(set_id)));
}

fn drop_set_messages(&self, set_id: u64) {
let _ = self.relay.unbounded_send(Broadcast::DropSet(SetId(set_id)));
}

fn commit_messages(&self, set_id: u64) -> Self::In {
self.network.commit_messages(set_id)
}
Expand Down Expand Up @@ -332,7 +343,7 @@ impl<Block: BlockT, N: Network<Block>> Sink for OutgoingMessages<Block, N>

impl<Block: BlockT, N: Network<Block>> Drop for OutgoingMessages<Block, N> {
fn drop(&mut self) {
self.network.drop_messages(self.round, self.set_id);
self.network.drop_round_messages(self.round, self.set_id);
}
}

Expand Down Expand Up @@ -439,14 +450,14 @@ pub(crate) fn checked_commit_stream<Block: BlockT, S>(
}

/// An output sink for commit messages.
pub(crate) struct CommitsOut<Block, N> {
pub(crate) struct CommitsOut<Block: BlockT, N: Network<Block>> {
network: N,
set_id: u64,
_marker: ::std::marker::PhantomData<Block>,
is_voter: bool,
}

impl<Block, N> CommitsOut<Block, N> {
impl<Block: BlockT, N: Network<Block>> CommitsOut<Block, N> {
/// Create a new commit output stream.
pub(crate) fn new(network: N, set_id: u64, is_voter: bool) -> Self {
CommitsOut {
Expand Down Expand Up @@ -487,3 +498,9 @@ impl<Block: BlockT, N: Network<Block>> Sink for CommitsOut<Block, N> {
fn close(&mut self) -> Poll<(), Error> { Ok(Async::Ready(())) }
fn poll_complete(&mut self) -> Poll<(), Error> { Ok(Async::Ready(())) }
}

impl<Block: BlockT, N: Network<Block>> Drop for CommitsOut<Block, N> {
fn drop(&mut self) {
self.network.drop_set_messages(self.set_id);
}
}
16 changes: 12 additions & 4 deletions core/finality-grandpa/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,10 @@ pub trait Network<Block: BlockT>: Clone {
fn send_message(&self, round: u64, set_id: u64, message: Vec<u8>);

/// Clean up messages for a round.
fn drop_messages(&self, round: u64, set_id: u64);
fn drop_round_messages(&self, round: u64, set_id: u64);

/// Clean up messages for a given authority set id (e.g. commit messages).
fn drop_set_messages(&self, set_id: u64);

/// Get a stream of commit messages for a specific set-id. This stream
/// should never logically conclude.
Expand Down Expand Up @@ -283,9 +286,14 @@ impl<B: BlockT, S: network::specialization::NetworkSpecialization<B>, H: ExHashT
self.service.gossip_consensus_message(topic, message, false);
}

fn drop_messages(&self, round: u64, set_id: u64) {
fn drop_round_messages(&self, round: u64, set_id: u64) {
let topic = message_topic::<B>(round, set_id);
self.service.consensus_gossip().write().collect_garbage(|t| t == &topic);
self.service.consensus_gossip().write().collect_garbage_for_topic(topic);
}

fn drop_set_messages(&self, set_id: u64) {
let topic = commit_topic::<B>(set_id);
self.service.consensus_gossip().write().collect_garbage_for_topic(topic);
}

fn commit_messages(&self, set_id: u64) -> Self::In {
Expand All @@ -294,7 +302,7 @@ impl<B: BlockT, S: network::specialization::NetworkSpecialization<B>, H: ExHashT

fn send_commit(&self, _round: u64, set_id: u64, message: Vec<u8>) {
let topic = commit_topic::<B>(set_id);
self.service.gossip_consensus_message(topic, message, true);
self.service.gossip_consensus_message(topic, message, false);
}

fn announce(&self, round: u64, _set_id: u64, block: B::Hash) {
Expand Down
25 changes: 17 additions & 8 deletions core/finality-grandpa/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,15 @@ impl MessageRouting {
peer_id,
}
}

fn drop_messages(&self, topic: Hash) {
let inner = self.inner.lock();
let peer = inner.peer(self.peer_id);
let mut gossip = peer.consensus_gossip().write();
peer.with_spec(move |_, _| {
gossip.collect_garbage_for_topic(topic);
});
}
}

fn make_topic(round: u64, set_id: u64) -> Hash {
Expand Down Expand Up @@ -199,14 +208,14 @@ impl Network<Block> for MessageRouting {
inner.route_until_complete();
}

fn drop_messages(&self, round: u64, set_id: u64) {
fn drop_round_messages(&self, round: u64, set_id: u64) {
let topic = make_topic(round, set_id);
let inner = self.inner.lock();
let peer = inner.peer(self.peer_id);
let mut gossip = peer.consensus_gossip().write();
peer.with_spec(move |_, _| {
gossip.collect_garbage(|t| t == &topic)
});
self.drop_messages(topic);
}

fn drop_set_messages(&self, set_id: u64) {
let topic = make_commit_topic(set_id);
self.drop_messages(topic);
}

fn commit_messages(&self, set_id: u64) -> Self::In {
Expand All @@ -226,7 +235,7 @@ impl Network<Block> for MessageRouting {

fn send_commit(&self, _round: u64, set_id: u64, message: Vec<u8>) {
let mut inner = self.inner.lock();
inner.peer(self.peer_id).gossip_message(make_commit_topic(set_id), message, true);
inner.peer(self.peer_id).gossip_message(make_commit_topic(set_id), message, false);
inner.route_until_complete();
}

Expand Down
1 change: 1 addition & 0 deletions core/network/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ error-chain = "0.12"
bitflags = "1.0"
futures = "0.1.17"
linked-hash-map = "0.5"
lru-cache = "0.1.1"
rustc-hex = "2.0"
rand = "0.6"
substrate-primitives = { path = "../../core/primitives" }
Expand Down
74 changes: 41 additions & 33 deletions core/network/src/consensus_gossip.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,18 +18,20 @@
//! Handles chain-specific and standard BFT messages.

use std::collections::{HashMap, HashSet};
use futures::sync::mpsc;
use std::time::{Instant, Duration};
use futures::sync::mpsc;
use rand::{self, seq::SliceRandom};
use lru_cache::LruCache;
use network_libp2p::NodeIndex;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Hash, HashFor};
use runtime_primitives::traits::{Block as BlockT, Hash, HashFor};
use runtime_primitives::generic::BlockId;
pub use message::generic::{Message, ConsensusMessage};
use protocol::Context;
use config::Roles;

// FIXME: Add additional spam/DoS attack protection: https://github.com/paritytech/substrate/issues/1115
const MESSAGE_LIFETIME: Duration = Duration::from_secs(600);
const MESSAGE_LIFETIME: Duration = Duration::from_secs(120);
const DEAD_TOPICS_CACHE_SIZE: usize = 4096;

struct PeerConsensus<H> {
known_messages: HashSet<H>,
Expand All @@ -49,6 +51,7 @@ pub struct ConsensusGossip<B: BlockT> {
live_message_sinks: HashMap<B::Hash, Vec<mpsc::UnboundedSender<ConsensusMessage>>>,
messages: Vec<MessageEntry<B>>,
known_messages: HashSet<(B::Hash, B::Hash)>,
known_dead_topics: LruCache<B::Hash, ()>,
message_times: HashMap<(B::Hash, B::Hash), Instant>,
session_start: Option<B::Hash>,
}
Expand All @@ -61,6 +64,7 @@ impl<B: BlockT> ConsensusGossip<B> {
live_message_sinks: HashMap::new(),
messages: Default::default(),
known_messages: Default::default(),
known_dead_topics: LruCache::new(DEAD_TOPICS_CACHE_SIZE),
message_times: Default::default(),
session_start: None
}
Expand Down Expand Up @@ -150,7 +154,9 @@ impl<B: BlockT> ConsensusGossip<B> {
fn register_message<F>(&mut self, message_hash: B::Hash, topic: B::Hash, broadcast: bool, get_message: F)
where F: Fn() -> ConsensusMessage
{
if self.known_messages.insert((topic, message_hash)) {
if !self.known_dead_topics.contains_key(&topic) &&
self.known_messages.insert((topic, message_hash))
{
self.messages.push(MessageEntry {
topic,
message_hash,
Expand All @@ -167,6 +173,11 @@ impl<B: BlockT> ConsensusGossip<B> {
self.peers.remove(&who);
}

pub fn collect_garbage_for_topic(&mut self, topic: B::Hash) {
self.known_dead_topics.insert(topic, ());
self.collect_garbage(|_| true);
}

/// Prune old or no longer relevant consensus messages. Provide a predicate
/// for pruning, which returns `false` when the items with a given topic should be pruned.
pub fn collect_garbage<P: Fn(&B::Hash) -> bool>(&mut self, predicate: P) {
Expand All @@ -177,22 +188,24 @@ impl<B: BlockT> ConsensusGossip<B> {

let message_times = &mut self.message_times;
let known_messages = &mut self.known_messages;
let known_dead_topics = &mut self.known_dead_topics;
let before = self.messages.len();
let now = Instant::now();

self.messages.retain(|entry| {
message_times.get(&(entry.topic, entry.message_hash))
.map(|instant| *instant + MESSAGE_LIFETIME >= now && predicate(&entry.topic))
.unwrap_or(false)
!known_dead_topics.contains_key(&entry.topic) &&
message_times.get(&(entry.topic, entry.message_hash))
.map(|instant| *instant + MESSAGE_LIFETIME >= now && predicate(&entry.topic))
.unwrap_or(false)
});

known_messages.retain(|(topic, message_hash)| {
message_times.get(&(*topic, *message_hash))
.map(|instant| *instant + (2 * MESSAGE_LIFETIME) >= now && predicate(topic))
.map(|instant| *instant + (5 * MESSAGE_LIFETIME) >= now)
.unwrap_or(false)
});

trace!(target:"gossip", "Cleaned up {} stale messages, {} left ({} known)",
trace!(target: "gossip", "Cleaned up {} stale messages, {} left ({} known)",
before - self.messages.len(),
self.messages.len(),
known_messages.len(),
Expand Down Expand Up @@ -230,26 +243,16 @@ impl<B: BlockT> ConsensusGossip<B> {
) -> Option<(B::Hash, ConsensusMessage)> {
let message_hash = HashFor::<B>::hash(&message[..]);

if self.known_messages.contains(&(topic, message_hash)) {
trace!(target:"gossip", "Ignored already known message from {} in {}", who, topic);
if self.known_dead_topics.contains_key(&topic) {
trace!(target:"gossip", "Ignored message from {} in dead topic {}", who, topic);
return None;
}

match (protocol.client().info(), protocol.client().header(&BlockId::Hash(topic))) {
(_, Err(e)) | (Err(e), _) => {
debug!(target:"gossip", "Error reading blockchain: {:?}", e);
return None;
},
(Ok(info), Ok(Some(header))) => {
if header.number() < &info.chain.best_number {
trace!(target:"gossip", "Ignored ancient message from {}, hash={}", who, topic);
return None;
}
},
(Ok(_), Ok(None)) => {},
if self.known_messages.contains(&(topic, message_hash)) {
trace!(target:"gossip", "Ignored already known message from {} in {}", who, topic);
return None;
}


if let Some(ref mut peer) = self.peers.get_mut(&who) {
use std::collections::hash_map::Entry;
peer.known_messages.insert((topic, message_hash));
Expand Down Expand Up @@ -329,13 +332,15 @@ mod tests {

macro_rules! push_msg {
($topic:expr, $hash: expr, $now: expr, $m:expr) => {
consensus.messages.push(MessageEntry {
topic: $topic,
message_hash: $hash,
message: $m,
broadcast: false,
});
consensus.message_times.insert(($topic, $hash), $now);
if consensus.known_messages.insert(($topic, $hash)) {
consensus.messages.push(MessageEntry {
topic: $topic,
message_hash: $hash,
message: $m,
broadcast: false,
});
consensus.message_times.insert(($topic, $hash), $now);
}
}
}

Expand All @@ -357,18 +362,21 @@ mod tests {
// topic that was used in one message.
consensus.collect_garbage(|topic| topic != &prev_hash);
assert_eq!(consensus.messages.len(), 1);
assert_eq!(consensus.known_messages.len(), 1);
// known messages are only pruned based on expiration time
assert_eq!(consensus.known_messages.len(), 2);
assert!(consensus.known_messages.contains(&(best_hash, m2_hash)));

// make timestamp expired, but the message is still kept as known
consensus.messages.clear();
consensus.known_messages.clear();
push_msg!(best_hash, m2_hash, now - MESSAGE_LIFETIME, m2.clone());
consensus.collect_garbage(|_topic| true);
assert!(consensus.messages.is_empty());
assert_eq!(consensus.known_messages.len(), 1);

// make timestamp expired past the known message lifetime
push_msg!(best_hash, m2_hash, now - (2 * MESSAGE_LIFETIME), m2);
consensus.known_messages.clear();
push_msg!(best_hash, m2_hash, now - (5 * MESSAGE_LIFETIME), m2);
consensus.collect_garbage(|_topic| true);
assert!(consensus.messages.is_empty());
assert!(consensus.known_messages.is_empty());
Expand Down
1 change: 1 addition & 0 deletions core/network/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
//! Allows attachment of an optional subprotocol for chain-specific requests.

extern crate linked_hash_map;
extern crate lru_cache;
extern crate parking_lot;
extern crate substrate_primitives as primitives;
extern crate substrate_client as client;
Expand Down