diff --git a/availability-store/src/lib.rs b/availability-store/src/lib.rs
index 831a095be4d7..5e7960d141a6 100644
--- a/availability-store/src/lib.rs
+++ b/availability-store/src/lib.rs
@@ -14,13 +14,17 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see .
-//! Persistent database for parachain data.
+//! Persistent database for parachain data: PoV block data and outgoing messages.
+//!
+//! This will be written into during the block validation pipeline, and queried
+//! by networking code in order to circulate required data and maintain availability
+//! of it.
use codec::{Encode, Decode};
use kvdb::{KeyValueDB, DBTransaction};
use kvdb_rocksdb::{Database, DatabaseConfig};
use polkadot_primitives::Hash;
-use polkadot_primitives::parachain::{Id as ParaId, BlockData, Extrinsic};
+use polkadot_primitives::parachain::{Id as ParaId, BlockData, Message};
use log::warn;
use std::collections::HashSet;
@@ -42,7 +46,7 @@ pub struct Config {
pub path: PathBuf,
}
-/// Some data to keep available.
+/// Some data to keep available about a parachain block candidate.
pub struct Data {
/// The relay chain parent hash this should be localized to.
pub relay_parent: Hash,
@@ -52,18 +56,16 @@ pub struct Data {
pub candidate_hash: Hash,
/// Block data.
pub block_data: BlockData,
- /// Extrinsic data.
- pub extrinsic: Option,
+ /// Outgoing message queues from execution of the block, if any.
+ ///
+ /// The tuple pairs the message queue root and the queue data.
+ pub outgoing_queues: Option)>>,
}
fn block_data_key(relay_parent: &Hash, candidate_hash: &Hash) -> Vec {
(relay_parent, candidate_hash, 0i8).encode()
}
-fn extrinsic_key(relay_parent: &Hash, candidate_hash: &Hash) -> Vec {
- (relay_parent, candidate_hash, 1i8).encode()
-}
-
/// Handle to the availability store.
#[derive(Clone)]
pub struct Store {
@@ -96,6 +98,16 @@ impl Store {
}
/// Make some data available provisionally.
+ ///
+ /// Validators with the responsibility of maintaining availability
+ /// for a block or collators collating a block will call this function
+ /// in order to persist that data to disk and so it can be queried and provided
+ /// to other nodes in the network.
+ ///
+ /// The message data of `Data` is optional but is expected
+ /// to be present with the exception of the case where there is no message data
+ /// due to the block's invalidity. Determination of invalidity is beyond the
+ /// scope of this function.
pub fn make_available(&self, data: Data) -> io::Result<()> {
let mut tx = DBTransaction::new();
@@ -118,12 +130,16 @@ impl Store {
data.block_data.encode()
);
- if let Some(extrinsic) = data.extrinsic {
- tx.put_vec(
- columns::DATA,
- extrinsic_key(&data.relay_parent, &data.candidate_hash).as_slice(),
- extrinsic.encode(),
- );
+ if let Some(outgoing_queues) = data.outgoing_queues {
+ // This is kept forever and not pruned.
+ for (root, messages) in outgoing_queues {
+ tx.put_vec(
+ columns::DATA,
+ root.as_ref(),
+ messages.encode(),
+ );
+ }
+
}
self.inner.write(tx)
@@ -146,7 +162,6 @@ impl Store {
for candidate_hash in v {
if !finalized_candidates.contains(&candidate_hash) {
tx.delete(columns::DATA, block_data_key(&parent, &candidate_hash).as_slice());
- tx.delete(columns::DATA, extrinsic_key(&parent, &candidate_hash).as_slice());
}
}
@@ -168,12 +183,11 @@ impl Store {
}
}
- /// Query extrinsic data.
- pub fn extrinsic(&self, relay_parent: Hash, candidate_hash: Hash) -> Option {
- let encoded_key = extrinsic_key(&relay_parent, &candidate_hash);
- match self.inner.get(columns::DATA, &encoded_key[..]) {
+ /// Query message queue data by message queue root hash.
+ pub fn queue_by_root(&self, queue_root: &Hash) -> Option> {
+ match self.inner.get(columns::DATA, queue_root.as_ref()) {
Ok(Some(raw)) => Some(
- Extrinsic::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed")
+ <_>::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed")
),
Ok(None) => None,
Err(e) => {
@@ -207,7 +221,7 @@ mod tests {
parachain_id: para_id_1,
candidate_hash: candidate_1,
block_data: block_data_1.clone(),
- extrinsic: Some(Extrinsic { outgoing_messages: Vec::new() }),
+ outgoing_queues: None,
}).unwrap();
store.make_available(Data {
@@ -215,21 +229,53 @@ mod tests {
parachain_id: para_id_2,
candidate_hash: candidate_2,
block_data: block_data_2.clone(),
- extrinsic: Some(Extrinsic { outgoing_messages: Vec::new() }),
+ outgoing_queues: None,
}).unwrap();
assert_eq!(store.block_data(relay_parent, candidate_1).unwrap(), block_data_1);
assert_eq!(store.block_data(relay_parent, candidate_2).unwrap(), block_data_2);
- assert!(store.extrinsic(relay_parent, candidate_1).is_some());
- assert!(store.extrinsic(relay_parent, candidate_2).is_some());
-
store.candidates_finalized(relay_parent, [candidate_1].iter().cloned().collect()).unwrap();
assert_eq!(store.block_data(relay_parent, candidate_1).unwrap(), block_data_1);
assert!(store.block_data(relay_parent, candidate_2).is_none());
+ }
+
+ #[test]
+ fn queues_available_by_queue_root() {
+ let relay_parent = [1; 32].into();
+ let para_id = 5.into();
+ let candidate = [2; 32].into();
+ let block_data = BlockData(vec![1, 2, 3]);
+
+ let message_queue_root_1 = [0x42; 32].into();
+ let message_queue_root_2 = [0x43; 32].into();
- assert!(store.extrinsic(relay_parent, candidate_1).is_some());
- assert!(store.extrinsic(relay_parent, candidate_2).is_none());
+ let message_a = Message(vec![1, 2, 3, 4]);
+ let message_b = Message(vec![4, 5, 6, 7]);
+
+ let outgoing_queues = vec![
+ (message_queue_root_1, vec![message_a.clone()]),
+ (message_queue_root_2, vec![message_b.clone()]),
+ ];
+
+ let store = Store::new_in_memory();
+ store.make_available(Data {
+ relay_parent,
+ parachain_id: para_id,
+ candidate_hash: candidate,
+ block_data: block_data.clone(),
+ outgoing_queues: Some(outgoing_queues),
+ }).unwrap();
+
+ assert_eq!(
+ store.queue_by_root(&message_queue_root_1),
+ Some(vec![message_a]),
+ );
+
+ assert_eq!(
+ store.queue_by_root(&message_queue_root_2),
+ Some(vec![message_b]),
+ );
}
}
diff --git a/collator/src/lib.rs b/collator/src/lib.rs
index f739f3060e0e..995f68f7557f 100644
--- a/collator/src/lib.rs
+++ b/collator/src/lib.rs
@@ -57,7 +57,7 @@ use primitives::Pair;
use polkadot_primitives::{
BlockId, Hash, Block,
parachain::{
- self, BlockData, DutyRoster, HeadData, ConsolidatedIngress, Message, Id as ParaId, Extrinsic,
+ self, BlockData, DutyRoster, HeadData, ConsolidatedIngress, Message, Id as ParaId, OutgoingMessages,
PoVBlock, Status as ParachainStatus, ValidatorId, CollatorPair,
}
};
@@ -65,8 +65,8 @@ use polkadot_cli::{
Worker, IntoExit, ProvideRuntimeApi, TaskExecutor, AbstractService,
CustomConfiguration, ParachainHost,
};
-use polkadot_network::validation::{SessionParams, ValidationNetwork};
-use polkadot_network::{NetworkService, PolkadotProtocol};
+use polkadot_network::validation::{LeafWorkParams, ValidationNetwork};
+use polkadot_network::{PolkadotNetworkService, PolkadotProtocol};
use tokio::timer::Timeout;
pub use polkadot_cli::VersionInfo;
@@ -91,7 +91,7 @@ pub trait Network: Send + Sync {
fn checked_statements(&self, relay_parent: Hash) -> Box>;
}
-impl
Network for ValidationNetwork
where
+impl
Network for ValidationNetwork
where
P: 'static + Send + Sync,
E: 'static + Send + Sync,
{
@@ -142,7 +142,7 @@ pub trait BuildParachainContext {
/// This can be implemented through an externally attached service or a stub.
/// This is expected to be a lightweight, shared type like an Arc.
pub trait ParachainContext: Clone {
- type ProduceCandidate: IntoFuture;
+ type ProduceCandidate: IntoFuture;
/// Produce a candidate, given the relay parent hash, the latest ingress queue information
/// and the last parachain head.
@@ -177,7 +177,7 @@ pub fn collate<'a, R, P>(
para_context: P,
key: Arc,
)
- -> impl Future> + 'a
+ -> impl Future> + 'a
where
R: RelayChainContext,
R::Error: 'a,
@@ -197,11 +197,11 @@ pub fn collate<'a, R, P>(
.map(move |x| (ingress, x))
.map_err(Error::Collator)
})
- .and_then(move |(ingress, (block_data, head_data, mut extrinsic))| {
+ .and_then(move |(ingress, (block_data, head_data, mut outgoing))| {
let block_data_hash = block_data.hash();
let signature = key.sign(block_data_hash.as_ref()).into();
let egress_queue_roots =
- polkadot_validation::egress_roots(&mut extrinsic.outgoing_messages);
+ polkadot_validation::egress_roots(&mut outgoing.outgoing_messages);
let receipt = parachain::CandidateReceipt {
parachain_index: local_id,
@@ -214,19 +214,21 @@ pub fn collate<'a, R, P>(
upward_messages: Vec::new(),
};
- Ok(parachain::Collation {
+ let collation = parachain::Collation {
receipt,
pov: PoVBlock {
block_data,
ingress,
},
- })
+ };
+
+ Ok((collation, outgoing))
})
}
/// Polkadot-api context.
struct ApiContext
where
context,
parachain_context,
key,
- ).map(move |collation| {
- network.with_spec(move |spec, ctx| spec.add_local_collation(
- ctx,
- relay_parent,
- targets,
- collation,
- ));
+ ).map(move |(collation, outgoing)| {
+ network.with_spec(move |spec, ctx| {
+ let res = spec.add_local_collation(
+ ctx,
+ relay_parent,
+ targets,
+ collation,
+ outgoing,
+ );
+
+ if let Err(e) = res {
+ warn!("Unable to broadcast local collation: {:?}", e);
+ }
+ })
});
future::Either::B(collation_work)
@@ -450,7 +461,7 @@ pub fn run_collator
(
#[cfg(test)]
mod tests {
use std::collections::HashMap;
- use polkadot_primitives::parachain::{OutgoingMessage, FeeSchedule};
+ use polkadot_primitives::parachain::{TargetedMessage, FeeSchedule};
use keyring::Sr25519Keyring;
use super::*;
@@ -475,20 +486,20 @@ mod tests {
struct DummyParachainContext;
impl ParachainContext for DummyParachainContext {
- type ProduceCandidate = Result<(BlockData, HeadData, Extrinsic), InvalidHead>;
+ type ProduceCandidate = Result<(BlockData, HeadData, OutgoingMessages), InvalidHead>;
fn produce_candidate>(
&self,
_relay_parent: Hash,
_status: ParachainStatus,
ingress: I,
- ) -> Result<(BlockData, HeadData, Extrinsic), InvalidHead> {
+ ) -> Result<(BlockData, HeadData, OutgoingMessages), InvalidHead> {
// send messages right back.
Ok((
BlockData(vec![1, 2, 3, 4, 5,]),
HeadData(vec![9, 9, 9]),
- Extrinsic {
- outgoing_messages: ingress.into_iter().map(|(id, msg)| OutgoingMessage {
+ OutgoingMessages {
+ outgoing_messages: ingress.into_iter().map(|(id, msg)| TargetedMessage {
target: id,
data: msg.0,
}).collect(),
@@ -542,7 +553,7 @@ mod tests {
context.clone(),
DummyParachainContext,
Arc::new(Sr25519Keyring::Alice.pair().into()),
- ).wait().unwrap();
+ ).wait().unwrap().0;
// ascending order by root.
assert_eq!(collation.receipt.egress_queue_roots, vec![(a, root_a), (b, root_b)]);
diff --git a/erasure-coding/src/lib.rs b/erasure-coding/src/lib.rs
index 6460c45794e9..cd89bdfc7491 100644
--- a/erasure-coding/src/lib.rs
+++ b/erasure-coding/src/lib.rs
@@ -27,7 +27,7 @@
use codec::{Encode, Decode};
use reed_solomon::galois_16::{self, ReedSolomon};
use primitives::{Hash as H256, BlakeTwo256, HashT};
-use primitives::parachain::{BlockData, Extrinsic};
+use primitives::parachain::{BlockData, OutgoingMessages};
use substrate_primitives::Blake2Hasher;
use trie::{EMPTY_PREFIX, MemoryDB, Trie, TrieMut, trie_types::{TrieDBMut, TrieDB}};
@@ -124,11 +124,11 @@ fn code_params(n_validators: usize) -> Result {
/// Obtain erasure-coded chunks, one for each validator.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
-pub fn obtain_chunks(n_validators: usize, block_data: &BlockData, extrinsic: &Extrinsic)
+pub fn obtain_chunks(n_validators: usize, block_data: &BlockData, outgoing: &OutgoingMessages)
-> Result>, Error>
{
let params = code_params(n_validators)?;
- let encoded = (block_data, extrinsic).encode();
+ let encoded = (block_data, outgoing).encode();
if encoded.is_empty() {
return Err(Error::BadPayload);
@@ -150,7 +150,7 @@ pub fn obtain_chunks(n_validators: usize, block_data: &BlockData, extrinsic: &Ex
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn reconstruct<'a, I: 'a>(n_validators: usize, chunks: I)
- -> Result<(BlockData, Extrinsic), Error>
+ -> Result<(BlockData, OutgoingMessages), Error>
where I: IntoIterator
{
let params = code_params(n_validators)?;
@@ -399,7 +399,7 @@ mod tests {
#[test]
fn round_trip_block_data() {
let block_data = BlockData((0..255).collect());
- let ex = Extrinsic { outgoing_messages: Vec::new() };
+ let ex = OutgoingMessages { outgoing_messages: Vec::new() };
let chunks = obtain_chunks(
10,
&block_data,
@@ -428,7 +428,7 @@ mod tests {
let chunks = obtain_chunks(
10,
&block_data,
- &Extrinsic { outgoing_messages: Vec::new() },
+ &OutgoingMessages { outgoing_messages: Vec::new() },
).unwrap();
let chunks: Vec<_> = chunks.iter().map(|c| &c[..]).collect();
diff --git a/network/Cargo.toml b/network/Cargo.toml
index 9791a97c23f7..2675f73842c9 100644
--- a/network/Cargo.toml
+++ b/network/Cargo.toml
@@ -18,7 +18,7 @@ sr-primitives = { git = "https://github.com/paritytech/substrate", branch = "pol
futures = "0.1"
log = "0.4"
exit-future = "0.1.4"
+substrate-client = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
[dev-dependencies]
-substrate-client = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
substrate-keyring = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
diff --git a/network/src/gossip.rs b/network/src/gossip.rs
index 84caa6eaeff5..1f6c5382c55e 100644
--- a/network/src/gossip.rs
+++ b/network/src/gossip.rs
@@ -14,37 +14,85 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see .
-//! Gossip messages and the message validator
-
+//! Gossip messages and the message validator.
+//!
+//! At the moment, this module houses 2 gossip protocols central to Polkadot.
+//!
+//! The first is the attestation-gossip system, which aims to circulate parachain
+//! candidate attestations by validators at leaves of the block-DAG.
+//!
+//! The second is the inter-chain message queue routing gossip, which aims to
+//! circulate message queues between parachains, which remain un-routed as of
+//! recent leaves.
+//!
+//! These gossip systems do not have any form of sybil-resistance in terms
+//! of the nodes which can participate. It could be imposed e.g. by limiting only to
+//! validators, but this would prevent message queues from getting into the hands
+//! of collators and of attestations from getting into the hands of fishermen.
+//! As such, we take certain precautions which allow arbitrary full nodes to
+//! join the gossip graph, as well as validators (who are likely to be well-connected
+//! amongst themselves).
+//!
+//! The first is the notion of a neighbor packet. This is a packet sent between
+//! neighbors of the gossip graph to inform each other of their current protocol
+//! state. As of this writing, for both attestation and message-routing gossip,
+//! the only necessary information here is a (length-limited) set of perceived
+//! leaves of the block-DAG.
+//!
+//! These leaves can be used to derive what information a node is willing to accept
+//! There is typically an unbounded amount of possible "future" information relative to
+//! any protocol state. For example, attestations or unrouted message queues from millions
+//! of blocks after a known protocol state. The neighbor packet is meant to avoid being
+//! spammed by illegitimate future information, while informing neighbors of when
+//! previously-future and now current gossip messages would be accepted.
+//!
+//! Peers who send information which was not allowed under a recent neighbor packet
+//! will be noted as non-beneficial to Substrate's peer-set management utility.
+
+use sr_primitives::{generic::BlockId, traits::ProvideRuntimeApi};
+use substrate_client::error::Error as ClientError;
use substrate_network::{config::Roles, PeerId};
use substrate_network::consensus_gossip::{
self as network_gossip, ValidationResult as GossipValidationResult,
ValidatorContext, MessageIntent, ConsensusMessage,
};
-use polkadot_validation::{GenericStatement, SignedStatement};
-use polkadot_primitives::{Block, Hash, parachain::{ValidatorIndex, ValidatorId}};
+use polkadot_validation::SignedStatement;
+use polkadot_primitives::{Block, Hash};
+use polkadot_primitives::parachain::{ParachainHost, ValidatorId, Message as ParachainMessage};
use codec::{Decode, Encode};
-use std::collections::{HashMap, HashSet};
+use std::collections::HashMap;
use std::sync::Arc;
+use arrayvec::ArrayVec;
use parking_lot::RwLock;
use log::warn;
-use super::NetworkService;
+use super::PolkadotNetworkService;
use crate::router::attestation_topic;
+use attestation::{View as AttestationView, PeerData as AttestationPeerData};
+use message_routing::{View as MessageRoutingView};
+
+mod attestation;
+mod message_routing;
+
/// The engine ID of the polkadot attestation system.
-pub const POLKADOT_ENGINE_ID: sr_primitives::ConsensusEngineId = [b'd', b'o', b't', b'1'];
+pub const POLKADOT_ENGINE_ID: sr_primitives::ConsensusEngineId = *b"dot1";
// arbitrary; in practice this should not be more than 2.
-const MAX_CHAIN_HEADS: usize = 5;
+pub(crate) const MAX_CHAIN_HEADS: usize = 5;
+
+/// Type alias for a bounded vector of leaves.
+pub type LeavesVec = ArrayVec<[Hash; MAX_CHAIN_HEADS]>;
mod benefit {
/// When a peer sends us a previously-unknown candidate statement.
pub const NEW_CANDIDATE: i32 = 100;
/// When a peer sends us a previously-unknown attestation.
pub const NEW_ATTESTATION: i32 = 50;
+ /// When a peer sends us a previously-unknown message packet.
+ pub const NEW_ICMP_MESSAGES: i32 = 50;
}
mod cost {
@@ -60,6 +108,15 @@ mod cost {
pub const BAD_SIGNATURE: i32 = -500;
/// A peer sent us a bad neighbor packet.
pub const BAD_NEIGHBOR_PACKET: i32 = -300;
+ /// A peer sent us an ICMP queue we haven't advertised a need for.
+ pub const UNNEEDED_ICMP_MESSAGES: i32 = -100;
+
+ /// A peer sent us an ICMP queue with a bad root.
+ pub fn icmp_messages_root_mismatch(n_messages: usize) -> i32 {
+ const PER_MESSAGE: i32 = -150;
+
+ (0..n_messages).map(|_| PER_MESSAGE).sum()
+ }
}
/// A gossip message.
@@ -72,35 +129,84 @@ pub enum GossipMessage {
/// Non-candidate statements should only be sent to peers who are aware of the candidate.
#[codec(index = "2")]
Statement(GossipStatement),
+ /// A packet of messages from one parachain to another.
+ #[codec(index = "3")]
+ ParachainMessages(GossipParachainMessages),
// TODO: https://github.com/paritytech/polkadot/issues/253
// erasure-coded chunks.
}
+impl GossipMessage {
+ fn to_consensus_message(&self) -> ConsensusMessage {
+ ConsensusMessage {
+ data: self.encode(),
+ engine_id: POLKADOT_ENGINE_ID,
+ }
+ }
+}
+
+impl From for GossipMessage {
+ fn from(packet: NeighborPacket) -> Self {
+ GossipMessage::Neighbor(VersionedNeighborPacket::V1(packet))
+ }
+}
+
impl From for GossipMessage {
fn from(stmt: GossipStatement) -> Self {
GossipMessage::Statement(stmt)
}
}
+impl From for GossipMessage {
+ fn from(messages: GossipParachainMessages) -> Self {
+ GossipMessage::ParachainMessages(messages)
+ }
+}
+
/// A gossip message containing a statement.
#[derive(Encode, Decode, Clone)]
pub struct GossipStatement {
- /// The relay chain parent hash.
- pub relay_parent: Hash,
+ /// The block hash of the relay chain being referred to. In context, this should
+ /// be a leaf.
+ pub relay_chain_leaf: Hash,
/// The signed statement being gossipped.
pub signed_statement: SignedStatement,
}
impl GossipStatement {
/// Create a new instance.
- pub fn new(relay_parent: Hash, signed_statement: SignedStatement) -> Self {
+ pub fn new(relay_chain_leaf: Hash, signed_statement: SignedStatement) -> Self {
Self {
- relay_parent,
+ relay_chain_leaf,
signed_statement,
}
}
}
+/// A packet of messages from one parachain to another.
+///
+/// These are all the messages posted from one parachain to another during the
+/// execution of a single parachain block. Since this parachain block may have been
+/// included in many forks of the relay chain, there is no relay-chain leaf parameter.
+#[derive(Encode, Decode, Clone)]
+pub struct GossipParachainMessages {
+ /// The root of the message queue.
+ pub queue_root: Hash,
+ /// The messages themselves.
+ pub messages: Vec,
+}
+
+impl GossipParachainMessages {
+ // confirms that the queue-root in the struct correctly matches
+ // the messages.
+ fn queue_root_is_correct(&self) -> bool {
+ let root = polkadot_validation::message_queue_root(
+ self.messages.iter().map(|m| &m.0)
+ );
+ root == self.queue_root
+ }
+}
+
/// A versioned neighbor message.
#[derive(Encode, Decode, Clone)]
pub enum VersionedNeighborPacket {
@@ -126,28 +232,60 @@ pub enum Known {
Bad,
}
-/// An oracle for known blocks.
-pub trait KnownOracle: Send + Sync {
+/// Context to the underlying polkadot chain.
+pub trait ChainContext: Send + Sync {
+ /// Provide a closure which is invoked for every unrouted queue hash at a given leaf.
+ fn leaf_unrouted_roots(
+ &self,
+ leaf: &Hash,
+ with_queue_root: &mut dyn FnMut(&Hash),
+ ) -> Result<(), ClientError>;
+
/// whether a block is known. If it's not, returns `None`.
fn is_known(&self, block_hash: &Hash) -> Option;
}
-impl KnownOracle for F where F: Fn(&Hash) -> Option + Send + Sync {
+impl ChainContext for (F, P) where
+ F: Fn(&Hash) -> Option + Send + Sync,
+ P: Send + Sync + std::ops::Deref,
+ P::Target: ProvideRuntimeApi,
+ ::Api: ParachainHost,
+{
fn is_known(&self, block_hash: &Hash) -> Option {
- (self)(block_hash)
+ (self.0)(block_hash)
+ }
+
+ fn leaf_unrouted_roots(
+ &self,
+ &leaf: &Hash,
+ with_queue_root: &mut dyn FnMut(&Hash),
+ ) -> Result<(), ClientError> {
+ let api = self.1.runtime_api();
+
+ let leaf_id = BlockId::Hash(leaf);
+ let active_parachains = api.active_parachains(&leaf_id)?;
+
+ for para_id in active_parachains {
+ if let Some(ingress) = api.ingress(&leaf_id, para_id, None)? {
+ for (_height, _from, queue_root) in ingress.iter() {
+ with_queue_root(queue_root);
+ }
+ }
+ }
+
+ Ok(())
}
}
/// Register a gossip validator on the network service.
-///
-/// This returns a `RegisteredMessageValidator`
// NOTE: since RegisteredMessageValidator is meant to be a type-safe proof
// that we've actually done the registration, this should be the only way
// to construct it outside of tests.
-pub fn register_validator(
- service: Arc,
- oracle: O,
-) -> RegisteredMessageValidator {
+pub fn register_validator(
+ service: Arc,
+ chain: C,
+) -> RegisteredMessageValidator
+{
let s = service.clone();
let report_handle = Box::new(move |peer: &PeerId, cost_benefit| {
s.report_peer(peer.clone(), cost_benefit);
@@ -156,8 +294,9 @@ pub fn register_validator(
report_handle,
inner: RwLock::new(Inner {
peers: HashMap::new(),
- our_view: Default::default(),
- oracle,
+ attestation_view: Default::default(),
+ message_routing_view: Default::default(),
+ chain,
})
});
@@ -169,8 +308,42 @@ pub fn register_validator(
RegisteredMessageValidator { inner: validator as _ }
}
+#[derive(PartialEq)]
+enum NewLeafAction {
+ // (who, message)
+ TargetedMessage(PeerId, ConsensusMessage),
+ // (topic, message)
+ Multicast(Hash, ConsensusMessage),
+}
+
+/// Actions to take after noting a new block-DAG leaf.
+///
+/// This should be consumed by passing a consensus-gossip handle to `perform`.
+#[must_use = "New chain-head gossip actions must be performed"]
+pub struct NewLeafActions {
+ actions: Vec,
+}
+
+impl NewLeafActions {
+ /// Perform the queued actions, feeding into gossip.
+ pub fn perform(
+ self,
+ gossip: &mut dyn crate::GossipService,
+ ctx: &mut dyn substrate_network::Context,
+ ) {
+ for action in self.actions {
+ match action {
+ NewLeafAction::TargetedMessage(who, message)
+ => gossip.send_message(ctx, &who, message),
+ NewLeafAction::Multicast(topic, message)
+ => gossip.multicast(ctx, &topic, message),
+ }
+ }
+ }
+}
+
/// Register a gossip validator for a non-authority node.
-pub fn register_non_authority_validator(service: Arc) {
+pub fn register_non_authority_validator(service: Arc) {
service.with_gossip(|gossip, ctx|
gossip.register_validator(
ctx,
@@ -184,58 +357,92 @@ pub fn register_non_authority_validator(service: Arc) {
/// Create this using `register_validator`.
#[derive(Clone)]
pub struct RegisteredMessageValidator {
- inner: Arc>,
+ inner: Arc>,
}
impl RegisteredMessageValidator {
#[cfg(test)]
- pub(crate) fn new_test(
- oracle: O,
+ pub(crate) fn new_test(
+ chain: C,
report_handle: Box,
) -> Self {
- let validator = Arc::new(MessageValidator::new_test(oracle, report_handle));
+ let validator = Arc::new(MessageValidator::new_test(chain, report_handle));
RegisteredMessageValidator { inner: validator as _ }
}
- /// Note a live attestation session. This must be removed later with
- /// `remove_session`.
- pub(crate) fn note_session(
+ /// Note that we perceive a new leaf of the block-DAG. We will notify our neighbors that
+ /// we now accept parachain candidate attestations and incoming message queues
+ /// relevant to this leaf.
+ pub(crate) fn new_local_leaf(
&self,
- relay_parent: Hash,
+ relay_chain_leaf: Hash,
validation: MessageValidationData,
- send_neighbor_packet: F,
- ) {
- // add an entry in our_view
- // prune any entries from our_view which are no longer leaves
+ lookup_queue_by_root: impl Fn(&Hash) -> Option>,
+ ) -> NewLeafActions {
+ // add an entry in attestation_view
+ // prune any entries from attestation_view which are no longer leaves
let mut inner = self.inner.inner.write();
- inner.our_view.add_session(relay_parent, validation);
- {
+ inner.attestation_view.new_local_leaf(relay_chain_leaf, validation);
- let &mut Inner { ref oracle, ref mut our_view, .. } = &mut *inner;
- our_view.prune_old_sessions(|parent| match oracle.is_known(parent) {
+ let mut actions = Vec::new();
+
+ {
+ let &mut Inner {
+ ref chain,
+ ref mut attestation_view,
+ ref mut message_routing_view,
+ ..
+ } = &mut *inner;
+
+ attestation_view.prune_old_leaves(|hash| match chain.is_known(hash) {
Some(Known::Leaf) => true,
_ => false,
});
+
+ if let Err(e) = message_routing_view.update_leaves(chain, attestation_view.neighbor_info()) {
+ warn!("Unable to fully update leaf-state: {:?}", e);
+ }
}
+
// send neighbor packets to peers
- inner.multicast_neighbor_packet(send_neighbor_packet);
+ inner.multicast_neighbor_packet(
+ |who, message| actions.push(NewLeafAction::TargetedMessage(who.clone(), message))
+ );
+
+ // feed any new unrouted queues into the propagation pool.
+ inner.message_routing_view.sweep_unknown_queues(|topic, queue_root|
+ match lookup_queue_by_root(queue_root) {
+ Some(messages) => {
+ let message = GossipMessage::from(GossipParachainMessages {
+ queue_root: *queue_root,
+ messages,
+ }).to_consensus_message();
+
+ actions.push(NewLeafAction::Multicast(*topic, message));
+
+ true
+ }
+ None => false,
+ }
+ );
+
+ NewLeafActions { actions }
}
}
-/// The data needed for validating gossip.
+/// The data needed for validating gossip messages.
#[derive(Default)]
pub(crate) struct MessageValidationData {
- /// The authorities at a block.
+ /// The authorities' parachain validation keys at a block.
pub(crate) authorities: Vec,
- /// Mapping from validator index to `ValidatorId`.
- pub(crate) index_mapping: HashMap,
}
impl MessageValidationData {
- fn check_statement(&self, relay_parent: &Hash, statement: &SignedStatement) -> Result<(), ()> {
- let sender = match self.index_mapping.get(&statement.sender) {
+ // check a statement's signature.
+ fn check_statement(&self, relay_chain_leaf: &Hash, statement: &SignedStatement) -> Result<(), ()> {
+ let sender = match self.authorities.get(statement.sender as usize) {
Some(val) => val,
None => return Err(()),
};
@@ -245,7 +452,7 @@ impl MessageValidationData {
&statement.statement,
&statement.signature,
sender.clone(),
- relay_parent,
+ relay_chain_leaf,
);
if good {
@@ -256,157 +463,25 @@ impl MessageValidationData {
}
}
-// knowledge about attestations on a single parent-hash.
#[derive(Default)]
-struct Knowledge {
- candidates: HashSet,
-}
-
-impl Knowledge {
- // whether the peer is aware of a candidate with given hash.
- fn is_aware_of(&self, candidate_hash: &Hash) -> bool {
- self.candidates.contains(candidate_hash)
- }
-
- // note that the peer is aware of a candidate with given hash.
- fn note_aware(&mut self, candidate_hash: Hash) {
- self.candidates.insert(candidate_hash);
- }
-}
-
struct PeerData {
- live: HashMap,
+ attestation: AttestationPeerData,
}
impl PeerData {
- fn knowledge_at_mut(&mut self, parent_hash: &Hash) -> Option<&mut Knowledge> {
- self.live.get_mut(parent_hash)
+ fn leaves(&self) -> impl Iterator {
+ self.attestation.leaves()
}
}
-struct OurView {
- live_sessions: Vec<(Hash, SessionView)>,
- topics: HashMap, // maps topic hashes to block hashes.
-}
-
-impl Default for OurView {
- fn default() -> Self {
- OurView {
- live_sessions: Vec::with_capacity(MAX_CHAIN_HEADS),
- topics: Default::default(),
- }
- }
-}
-
-impl OurView {
- fn session_view(&self, relay_parent: &Hash) -> Option<&SessionView> {
- self.live_sessions.iter()
- .find_map(|&(ref h, ref sesh)| if h == relay_parent { Some(sesh) } else { None } )
- }
-
- fn session_view_mut(&mut self, relay_parent: &Hash) -> Option<&mut SessionView> {
- self.live_sessions.iter_mut()
- .find_map(|&mut (ref h, ref mut sesh)| if h == relay_parent { Some(sesh) } else { None } )
- }
-
- fn add_session(&mut self, relay_parent: Hash, validation_data: MessageValidationData) {
- self.live_sessions.push((
- relay_parent,
- SessionView {
- validation_data,
- knowledge: Default::default(),
- },
- ));
- self.topics.insert(attestation_topic(relay_parent), relay_parent);
- }
-
- fn prune_old_sessions bool>(&mut self, is_leaf: F) {
- let live_sessions = &mut self.live_sessions;
- live_sessions.retain(|&(ref relay_parent, _)| is_leaf(relay_parent));
- self.topics.retain(|_, v| live_sessions.iter().find(|(p, _)| p == v).is_some());
- }
-
- fn knows_topic(&self, topic: &Hash) -> bool {
- self.topics.contains_key(topic)
- }
-
- fn topic_block(&self, topic: &Hash) -> Option<&Hash> {
- self.topics.get(topic)
- }
-
- fn neighbor_info(&self) -> Vec {
- self.live_sessions.iter().take(MAX_CHAIN_HEADS).map(|(p, _)| p.clone()).collect()
- }
-}
-
-struct SessionView {
- validation_data: MessageValidationData,
- knowledge: Knowledge,
-}
-
-struct Inner {
+struct Inner {
peers: HashMap,
- our_view: OurView,
- oracle: O,
+ attestation_view: AttestationView,
+ message_routing_view: MessageRoutingView,
+ chain: C,
}
-impl Inner {
- fn validate_statement(&mut self, message: GossipStatement)
- -> (GossipValidationResult, i32)
- {
- // message must reference one of our chain heads and one
- // if message is not a `Candidate` we should have the candidate available
- // in `our_view`.
- match self.our_view.session_view(&message.relay_parent) {
- None => {
- let cost = match self.oracle.is_known(&message.relay_parent) {
- Some(Known::Leaf) => {
- warn!(
- target: "network",
- "Leaf block {} not considered live for attestation",
- message.relay_parent,
- );
-
- 0
- }
- Some(Known::Old) => cost::PAST_MESSAGE,
- _ => cost::FUTURE_MESSAGE,
- };
-
- (GossipValidationResult::Discard, cost)
- }
- Some(view) => {
- // first check that we are capable of receiving this message
- // in a DoS-proof manner.
- let benefit = match message.signed_statement.statement {
- GenericStatement::Candidate(_) => benefit::NEW_CANDIDATE,
- GenericStatement::Valid(ref h) | GenericStatement::Invalid(ref h) => {
- if !view.knowledge.is_aware_of(h) {
- let cost = cost::ATTESTATION_NO_CANDIDATE;
- return (GossipValidationResult::Discard, cost);
- }
-
- benefit::NEW_ATTESTATION
- }
- };
-
- // validate signature.
- let res = view.validation_data.check_statement(
- &message.relay_parent,
- &message.signed_statement,
- );
-
- match res {
- Ok(()) => {
- let topic = attestation_topic(message.relay_parent);
- (GossipValidationResult::ProcessAndKeep(topic), benefit)
- }
- Err(()) => (GossipValidationResult::Discard, cost::BAD_SIGNATURE),
- }
- }
- }
- }
-
+impl Inner {
fn validate_neighbor_packet(&mut self, sender: &PeerId, packet: NeighborPacket)
-> (GossipValidationResult, i32, Vec)
{
@@ -414,16 +489,20 @@ impl Inner {
if chain_heads.len() > MAX_CHAIN_HEADS {
(GossipValidationResult::Discard, cost::BAD_NEIGHBOR_PACKET, Vec::new())
} else {
- let mut new_topics = Vec::new();
- if let Some(ref mut peer) = self.peers.get_mut(sender) {
- peer.live.retain(|k, _| chain_heads.contains(k));
- for head in chain_heads {
- peer.live.entry(head).or_insert_with(|| {
- new_topics.push(attestation_topic(head));
- Default::default()
- });
- }
- }
+ let chain_heads: LeavesVec = chain_heads.into_iter().collect();
+ let new_topics = if let Some(ref mut peer) = self.peers.get_mut(sender) {
+ let new_leaves = peer.attestation.update_leaves(&chain_heads);
+ let new_attestation_topics = new_leaves.iter().cloned().map(attestation_topic);
+
+ // find all topics which are from the intersection of our leaves with the peer's
+ // new leaves.
+ let new_message_routing_topics = self.message_routing_view.intersection_topics(&new_leaves);
+
+ new_attestation_topics.chain(new_message_routing_topics).collect()
+ } else {
+ Vec::new()
+ };
+
(GossipValidationResult::Discard, 0, new_topics)
}
}
@@ -432,40 +511,36 @@ impl Inner {
&self,
mut send_neighbor_packet: F,
) {
- let neighbor_packet = GossipMessage::Neighbor(VersionedNeighborPacket::V1(NeighborPacket {
- chain_heads: self.our_view.neighbor_info()
- }));
-
- let message = ConsensusMessage {
- data: neighbor_packet.encode(),
- engine_id: POLKADOT_ENGINE_ID,
- };
+ let neighbor_packet = GossipMessage::from(NeighborPacket {
+ chain_heads: self.attestation_view.neighbor_info().collect(),
+ }).to_consensus_message();
for peer in self.peers.keys() {
- send_neighbor_packet(peer, message.clone())
+ send_neighbor_packet(peer, neighbor_packet.clone())
}
}
}
/// An unregistered message validator. Register this with `register_validator`.
-pub struct MessageValidator {
+pub struct MessageValidator {
report_handle: Box,
- inner: RwLock>,
+ inner: RwLock>,
}
-impl MessageValidator {
+impl MessageValidator {
#[cfg(test)]
fn new_test(
- oracle: O,
+ chain: C,
report_handle: Box,
- ) -> Self where O: Sized{
+ ) -> Self where C: Sized {
MessageValidator {
report_handle,
inner: RwLock::new(Inner {
peers: HashMap::new(),
- our_view: Default::default(),
- oracle,
- })
+ attestation_view: Default::default(),
+ message_routing_view: Default::default(),
+ chain,
+ }),
}
}
@@ -474,12 +549,10 @@ impl MessageValidator {
}
}
-impl network_gossip::Validator for MessageValidator {
+impl network_gossip::Validator for MessageValidator {
fn new_peer(&self, _context: &mut dyn ValidatorContext, who: &PeerId, _roles: Roles) {
let mut inner = self.inner.write();
- inner.peers.insert(who.clone(), PeerData {
- live: HashMap::new(),
- });
+ inner.peers.insert(who.clone(), PeerData::default());
}
fn peer_disconnected(&self, _context: &mut dyn ValidatorContext, who: &PeerId) {
@@ -487,10 +560,11 @@ impl network_gossip::Validator for MessageValida
inner.peers.remove(who);
}
- fn validate(&self, context: &mut dyn ValidatorContext, sender: &PeerId, mut data: &[u8])
+ fn validate(&self, context: &mut dyn ValidatorContext, sender: &PeerId, data: &[u8])
-> GossipValidationResult
{
- let (res, cost_benefit) = match GossipMessage::decode(&mut data) {
+ let mut decode_data = data;
+ let (res, cost_benefit) = match GossipMessage::decode(&mut decode_data) {
Err(_) => (GossipValidationResult::Discard, cost::MALFORMED_MESSAGE),
Ok(GossipMessage::Neighbor(VersionedNeighborPacket::V1(packet))) => {
let (res, cb, topics) = self.inner.write().validate_neighbor_packet(sender, packet);
@@ -500,7 +574,24 @@ impl network_gossip::Validator for MessageValida
(res, cb)
}
Ok(GossipMessage::Statement(statement)) => {
- let (res, cb) = self.inner.write().validate_statement(statement);
+ let (res, cb) = {
+ let mut inner = self.inner.write();
+ let inner = &mut *inner;
+ inner.attestation_view.validate_statement_signature(statement, &inner.chain)
+ };
+
+ if let GossipValidationResult::ProcessAndKeep(ref topic) = res {
+ context.broadcast_message(topic.clone(), data.to_vec(), false);
+ }
+ (res, cb)
+ }
+ Ok(GossipMessage::ParachainMessages(messages)) => {
+ let (res, cb) = {
+ let mut inner = self.inner.write();
+ let inner = &mut *inner;
+ inner.message_routing_view.validate_queue_and_note_known(&messages)
+ };
+
if let GossipValidationResult::ProcessAndKeep(ref topic) = res {
context.broadcast_message(topic.clone(), data.to_vec(), false);
}
@@ -516,61 +607,56 @@ impl network_gossip::Validator for MessageValida
let inner = self.inner.read();
Box::new(move |topic, _data| {
- // check that topic is one of our live sessions. everything else is expired
- !inner.our_view.knows_topic(&topic)
+ // check that messages from this topic are considered live by one of our protocols.
+ // everything else is expired
+ let live = inner.attestation_view.is_topic_live(&topic)
+ || !inner.message_routing_view.is_topic_live(&topic);
+
+ !live // = expired
})
}
fn message_allowed<'a>(&'a self) -> Box bool + 'a> {
let mut inner = self.inner.write();
Box::new(move |who, intent, topic, data| {
- let &mut Inner { ref mut peers, ref mut our_view, .. } = &mut *inner;
+ let &mut Inner {
+ ref mut peers,
+ ref mut attestation_view,
+ ref mut message_routing_view,
+ ..
+ } = &mut *inner;
match intent {
MessageIntent::PeriodicRebroadcast => return false,
_ => {},
}
- let relay_parent = match our_view.topic_block(topic) {
- None => return false,
- Some(hash) => hash.clone(),
- };
-
- // check that topic is one of our peers' live sessions.
- let peer_knowledge = match peers.get_mut(who)
- .and_then(|p| p.knowledge_at_mut(&relay_parent))
- {
- Some(p) => p,
- None => return false,
- };
+ let attestation_head = attestation_view.topic_block(topic).map(|x| x.clone());
+ let peer = peers.get_mut(who);
match GossipMessage::decode(&mut &data[..]) {
- Ok(GossipMessage::Statement(statement)) => {
- let signed = statement.signed_statement;
-
- match signed.statement {
- GenericStatement::Valid(ref h) | GenericStatement::Invalid(ref h) => {
- // `valid` and `invalid` statements can only be propagated after
- // a candidate message is known by that peer.
- if !peer_knowledge.is_aware_of(h) {
- return false;
- }
- }
- GenericStatement::Candidate(ref c) => {
- // if we are sending a `Candidate` message we should make sure that
- // our_view and their_view reflects that we know about the candidate.
- let hash = c.hash();
- peer_knowledge.note_aware(hash);
- if let Some(our_view) = our_view.session_view_mut(&relay_parent) {
- our_view.knowledge.note_aware(hash);
- }
- }
+ Ok(GossipMessage::Statement(ref statement)) => {
+ // to allow statements, we need peer knowledge.
+ let peer_knowledge = peer.and_then(move |p| attestation_head.map(|r| (p, r)))
+ .and_then(|(p, r)| p.attestation.knowledge_at_mut(&r).map(|k| (k, r)));
+
+ peer_knowledge.map_or(false, |(knowledge, attestation_head)| {
+ attestation_view.statement_allowed(
+ statement,
+ &attestation_head,
+ knowledge,
+ )
+ })
+ }
+ Ok(GossipMessage::ParachainMessages(_)) => match peer {
+ None => false,
+ Some(peer) => {
+ let their_leaves: LeavesVec = peer.leaves().cloned().collect();
+ message_routing_view.allowed_intersecting(&their_leaves, topic)
}
}
- _ => return false,
+ _ => false,
}
-
- true
})
}
}
@@ -583,7 +669,11 @@ mod tests {
use parking_lot::Mutex;
use polkadot_primitives::parachain::{CandidateReceipt, HeadData};
use substrate_primitives::crypto::UncheckedInto;
- use substrate_primitives::sr25519::Signature as Sr25519Signature;
+ use substrate_primitives::sr25519::{Public as Sr25519Public, Signature as Sr25519Signature};
+ use polkadot_validation::GenericStatement;
+ use super::message_routing::queue_topic;
+
+ use crate::tests::TestChainContext;
#[derive(PartialEq, Clone, Debug)]
enum ContextEvent {
@@ -619,14 +709,29 @@ mod tests {
}
}
+ impl NewLeafActions {
+ fn has_message(&self, who: PeerId, message: ConsensusMessage) -> bool {
+ let x = NewLeafAction::TargetedMessage(who, message);
+ self.actions.iter().find(|&m| m == &x).is_some()
+ }
+
+ fn has_multicast(&self, topic: Hash, message: ConsensusMessage) -> bool {
+ let x = NewLeafAction::Multicast(topic, message);
+ self.actions.iter().find(|&m| m == &x).is_some()
+ }
+ }
+
+ fn validator_id(raw: [u8; 32]) -> ValidatorId {
+ Sr25519Public::from_raw(raw).into()
+ }
+
#[test]
fn message_allowed() {
let (tx, _rx) = mpsc::channel();
let tx = Mutex::new(tx);
- let known_map = HashMap::::new();
let report_handle = Box::new(move |peer: &PeerId, cb: i32| tx.lock().send((peer.clone(), cb)).unwrap());
let validator = MessageValidator::new_test(
- move |hash: &Hash| known_map.get(hash).map(|x| x.clone()),
+ TestChainContext::default(),
report_handle,
);
@@ -641,9 +746,9 @@ mod tests {
let hash_b = [2u8; 32].into();
let hash_c = [3u8; 32].into();
- let message = GossipMessage::Neighbor(VersionedNeighborPacket::V1(NeighborPacket {
- chain_heads: vec![hash_a, hash_b],
- })).encode();
+ let message = GossipMessage::from(NeighborPacket {
+ chain_heads: vec![hash_a, hash_b],
+ }).encode();
let res = validator.validate(
&mut validator_context,
&peer_a,
@@ -676,7 +781,7 @@ mod tests {
};
let statement = GossipMessage::Statement(GossipStatement {
- relay_parent: hash_a,
+ relay_chain_leaf: hash_a,
signed_statement: SignedStatement {
statement: GenericStatement::Candidate(candidate_receipt),
signature: Sr25519Signature([255u8; 64]).into(),
@@ -690,7 +795,7 @@ mod tests {
let topic_c = attestation_topic(hash_c);
// topic_a is in all 3 views -> succeed
- validator.inner.write().our_view.add_session(hash_a, MessageValidationData::default());
+ validator.inner.write().attestation_view.new_local_leaf(hash_a, MessageValidationData::default());
// topic_b is in the neighbor's view but not ours -> fail
// topic_c is not in either -> fail
@@ -706,10 +811,9 @@ mod tests {
fn too_many_chain_heads_is_report() {
let (tx, rx) = mpsc::channel();
let tx = Mutex::new(tx);
- let known_map = HashMap::::new();
let report_handle = Box::new(move |peer: &PeerId, cb: i32| tx.lock().send((peer.clone(), cb)).unwrap());
let validator = MessageValidator::new_test(
- move |hash: &Hash| known_map.get(hash).map(|x| x.clone()),
+ TestChainContext::default(),
report_handle,
);
@@ -722,9 +826,9 @@ mod tests {
let chain_heads = (0..MAX_CHAIN_HEADS+1).map(|i| [i as u8; 32].into()).collect();
- let message = GossipMessage::Neighbor(VersionedNeighborPacket::V1(NeighborPacket {
- chain_heads,
- })).encode();
+ let message = GossipMessage::from(NeighborPacket {
+ chain_heads,
+ }).encode();
let res = validator.validate(
&mut validator_context,
&peer_a,
@@ -749,10 +853,9 @@ mod tests {
fn statement_only_sent_when_candidate_known() {
let (tx, _rx) = mpsc::channel();
let tx = Mutex::new(tx);
- let known_map = HashMap::::new();
let report_handle = Box::new(move |peer: &PeerId, cb: i32| tx.lock().send((peer.clone(), cb)).unwrap());
let validator = MessageValidator::new_test(
- move |hash: &Hash| known_map.get(hash).map(|x| x.clone()),
+ TestChainContext::default(),
report_handle,
);
@@ -766,9 +869,10 @@ mod tests {
let hash_a = [1u8; 32].into();
let hash_b = [2u8; 32].into();
- let message = GossipMessage::Neighbor(VersionedNeighborPacket::V1(NeighborPacket {
- chain_heads: vec![hash_a, hash_b],
- })).encode();
+ let message = GossipMessage::from(NeighborPacket {
+ chain_heads: vec![hash_a, hash_b],
+ }).encode();
+
let res = validator.validate(
&mut validator_context,
&peer_a,
@@ -793,7 +897,7 @@ mod tests {
let c_hash = [99u8; 32].into();
let statement = GossipMessage::Statement(GossipStatement {
- relay_parent: hash_a,
+ relay_chain_leaf: hash_a,
signed_statement: SignedStatement {
statement: GenericStatement::Valid(c_hash),
signature: Sr25519Signature([255u8; 64]).into(),
@@ -801,7 +905,7 @@ mod tests {
}
});
let encoded = statement.encode();
- validator.inner.write().our_view.add_session(hash_a, MessageValidationData::default());
+ validator.inner.write().attestation_view.new_local_leaf(hash_a, MessageValidationData::default());
{
let mut message_allowed = validator.message_allowed();
@@ -814,14 +918,336 @@ mod tests {
.peers
.get_mut(&peer_a)
.unwrap()
- .live
- .get_mut(&hash_a)
- .unwrap()
- .note_aware(c_hash);
-
+ .attestation
+ .note_aware_under_leaf(&hash_a, c_hash);
{
let mut message_allowed = validator.message_allowed();
assert!(message_allowed(&peer_a, MessageIntent::Broadcast, &topic_a, &encoded[..]));
}
}
+
+ #[test]
+ fn multicasts_icmp_queues_when_building_on_new_leaf() {
+ let (tx, _rx) = mpsc::channel();
+ let tx = Mutex::new(tx);
+ let report_handle = Box::new(move |peer: &PeerId, cb: i32| tx.lock().send((peer.clone(), cb)).unwrap());
+
+ let hash_a = [1u8; 32].into();
+ let root_a = [11u8; 32].into();
+ let root_a_topic = queue_topic(root_a);
+
+ let root_a_messages = vec![
+ ParachainMessage(vec![1, 2, 3]),
+ ParachainMessage(vec![4, 5, 6]),
+ ];
+
+ let chain = {
+ let mut chain = TestChainContext::default();
+ chain.known_map.insert(hash_a, Known::Leaf);
+ chain.ingress_roots.insert(hash_a, vec![root_a]);
+ chain
+ };
+
+ let validator = RegisteredMessageValidator::new_test(chain, report_handle);
+
+ let authorities: Vec = vec![validator_id([0; 32]), validator_id([10; 32])];
+
+ let peer_a = PeerId::random();
+ let peer_b = PeerId::random();
+
+ let mut validator_context = MockValidatorContext::default();
+ validator.inner.new_peer(&mut validator_context, &peer_a, Roles::FULL);
+ validator.inner.new_peer(&mut validator_context, &peer_b, Roles::FULL);
+ assert!(validator_context.events.is_empty());
+ validator_context.clear();
+
+
+ {
+ let message = GossipMessage::from(NeighborPacket {
+ chain_heads: vec![hash_a],
+ }).encode();
+ let res = validator.inner.validate(
+ &mut validator_context,
+ &peer_a,
+ &message[..],
+ );
+
+ match res {
+ GossipValidationResult::Discard => {},
+ _ => panic!("wrong result"),
+ }
+ assert_eq!(
+ validator_context.events,
+ vec![
+ ContextEvent::SendTopic(peer_a.clone(), attestation_topic(hash_a), false),
+ ],
+ );
+ }
+
+ // ensure that we attempt to multicast all relevant queues after noting a leaf.
+ {
+ let actions = validator.new_local_leaf(
+ hash_a,
+ MessageValidationData { authorities },
+ |root| if root == &root_a {
+ Some(root_a_messages.clone())
+ } else {
+ None
+ },
+ );
+
+ assert!(actions.has_message(peer_a.clone(), GossipMessage::from(NeighborPacket {
+ chain_heads: vec![hash_a],
+ }).to_consensus_message()));
+
+ assert!(actions.has_multicast(root_a_topic, GossipMessage::from(GossipParachainMessages {
+ queue_root: root_a,
+ messages: root_a_messages.clone(),
+ }).to_consensus_message()));
+ }
+
+ // ensure that we are allowed to multicast to a peer with same chain head,
+ // but not to one without.
+ {
+ let message = GossipMessage::from(GossipParachainMessages {
+ queue_root: root_a,
+ messages: root_a_messages.clone(),
+ }).encode();
+
+ let mut allowed = validator.inner.message_allowed();
+ assert!(allowed(&peer_a, MessageIntent::Broadcast, &root_a_topic, &message[..]));
+ assert!(!allowed(&peer_b, MessageIntent::Broadcast, &root_a_topic, &message[..]));
+ }
+ }
+
+ #[test]
+ fn multicasts_icmp_queues_on_neighbor_update() {
+ let (tx, _rx) = mpsc::channel();
+ let tx = Mutex::new(tx);
+ let report_handle = Box::new(move |peer: &PeerId, cb: i32| tx.lock().send((peer.clone(), cb)).unwrap());
+
+ let hash_a = [1u8; 32].into();
+ let root_a = [11u8; 32].into();
+ let root_a_topic = queue_topic(root_a);
+
+ let root_a_messages = vec![
+ ParachainMessage(vec![1, 2, 3]),
+ ParachainMessage(vec![4, 5, 6]),
+ ];
+
+ let chain = {
+ let mut chain = TestChainContext::default();
+ chain.known_map.insert(hash_a, Known::Leaf);
+ chain.ingress_roots.insert(hash_a, vec![root_a]);
+ chain
+ };
+
+ let validator = RegisteredMessageValidator::new_test(chain, report_handle);
+
+ let authorities: Vec = vec![validator_id([0; 32]), validator_id([10; 32])];
+
+ let peer_a = PeerId::random();
+ let peer_b = PeerId::random();
+
+ let mut validator_context = MockValidatorContext::default();
+ validator.inner.new_peer(&mut validator_context, &peer_a, Roles::FULL);
+ validator.inner.new_peer(&mut validator_context, &peer_b, Roles::FULL);
+ assert!(validator_context.events.is_empty());
+ validator_context.clear();
+
+ // ensure that we attempt to multicast all relevant queues after noting a leaf.
+ {
+ let actions = validator.new_local_leaf(
+ hash_a,
+ MessageValidationData { authorities },
+ |root| if root == &root_a {
+ Some(root_a_messages.clone())
+ } else {
+ None
+ },
+ );
+
+ assert!(actions.has_message(peer_a.clone(), GossipMessage::from(NeighborPacket {
+ chain_heads: vec![hash_a],
+ }).to_consensus_message()));
+
+ assert!(actions.has_multicast(root_a_topic, GossipMessage::from(GossipParachainMessages {
+ queue_root: root_a,
+ messages: root_a_messages.clone(),
+ }).to_consensus_message()));
+ }
+
+ // ensure that we are not allowed to multicast to either peer, as they
+ // don't have the chain head.
+ {
+ let message = GossipMessage::from(GossipParachainMessages {
+ queue_root: root_a,
+ messages: root_a_messages.clone(),
+ }).encode();
+
+ let mut allowed = validator.inner.message_allowed();
+ assert!(!allowed(&peer_a, MessageIntent::Broadcast, &root_a_topic, &message[..]));
+ assert!(!allowed(&peer_b, MessageIntent::Broadcast, &root_a_topic, &message[..]));
+ }
+
+ // peer A gets updated to the chain head. now we'll attempt to broadcast
+ // all queues to it.
+ {
+ let message = GossipMessage::from(NeighborPacket {
+ chain_heads: vec![hash_a],
+ }).encode();
+ let res = validator.inner.validate(
+ &mut validator_context,
+ &peer_a,
+ &message[..],
+ );
+
+ match res {
+ GossipValidationResult::Discard => {},
+ _ => panic!("wrong result"),
+ }
+ assert_eq!(
+ validator_context.events,
+ vec![
+ ContextEvent::SendTopic(peer_a.clone(), attestation_topic(hash_a), false),
+ ContextEvent::SendTopic(peer_a.clone(), root_a_topic, false),
+ ],
+ );
+ }
+
+ // ensure that we are allowed to multicast to a peer with same chain head,
+ // but not to one without.
+ {
+ let message = GossipMessage::from(GossipParachainMessages {
+ queue_root: root_a,
+ messages: root_a_messages.clone(),
+ }).encode();
+
+ let mut allowed = validator.inner.message_allowed();
+ assert!(allowed(&peer_a, MessageIntent::Broadcast, &root_a_topic, &message[..]));
+ assert!(!allowed(&peer_b, MessageIntent::Broadcast, &root_a_topic, &message[..]));
+ }
+ }
+
+ #[test]
+ fn accepts_needed_unknown_icmp_message_queue() {
+ let (tx, _rx) = mpsc::channel();
+ let tx = Mutex::new(tx);
+ let report_handle = Box::new(move |peer: &PeerId, cb: i32| tx.lock().send((peer.clone(), cb)).unwrap());
+
+ let hash_a = [1u8; 32].into();
+ let root_a_messages = vec![
+ ParachainMessage(vec![1, 2, 3]),
+ ParachainMessage(vec![4, 5, 6]),
+ ];
+ let not_root_a_messages = vec![
+ ParachainMessage(vec![1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
+ ParachainMessage(vec![4, 5, 6]),
+ ];
+
+ let root_a = polkadot_validation::message_queue_root(
+ root_a_messages.iter().map(|m| &m.0)
+ );
+ let not_root_a = [69u8; 32].into();
+ let root_a_topic = queue_topic(root_a);
+
+ let chain = {
+ let mut chain = TestChainContext::default();
+ chain.known_map.insert(hash_a, Known::Leaf);
+ chain.ingress_roots.insert(hash_a, vec![root_a]);
+ chain
+ };
+
+ let validator = RegisteredMessageValidator::new_test(chain, report_handle);
+
+ let authorities: Vec = vec![validator_id([0; 32]), validator_id([10; 32])];
+
+ let peer_a = PeerId::random();
+ let mut validator_context = MockValidatorContext::default();
+
+ validator.inner.new_peer(&mut validator_context, &peer_a, Roles::FULL);
+ assert!(validator_context.events.is_empty());
+ validator_context.clear();
+
+ let queue_messages = GossipMessage::from(GossipParachainMessages {
+ queue_root: root_a,
+ messages: root_a_messages.clone(),
+ }).to_consensus_message();
+
+ let not_queue_messages = GossipMessage::from(GossipParachainMessages {
+ queue_root: root_a,
+ messages: not_root_a_messages.clone(),
+ }).encode();
+
+ let queue_messages_wrong_root = GossipMessage::from(GossipParachainMessages {
+ queue_root: not_root_a,
+ messages: root_a_messages.clone(),
+ }).encode();
+
+ // ensure that we attempt to multicast all relevant queues after noting a leaf.
+ {
+ let actions = validator.new_local_leaf(
+ hash_a,
+ MessageValidationData { authorities },
+ |_root| None,
+ );
+
+ assert!(actions.has_message(peer_a.clone(), GossipMessage::from(NeighborPacket {
+ chain_heads: vec![hash_a],
+ }).to_consensus_message()));
+
+ // we don't know this queue! no broadcast :(
+ assert!(!actions.has_multicast(root_a_topic, queue_messages.clone()));
+ }
+
+ // rejects right queue with unknown root.
+ {
+ let res = validator.inner.validate(
+ &mut validator_context,
+ &peer_a,
+ &queue_messages_wrong_root[..],
+ );
+
+ match res {
+ GossipValidationResult::Discard => {},
+ _ => panic!("wrong result"),
+ }
+
+ assert_eq!(validator_context.events, Vec::new());
+ }
+
+ // rejects bad queue.
+ {
+ let res = validator.inner.validate(
+ &mut validator_context,
+ &peer_a,
+ ¬_queue_messages[..],
+ );
+
+ match res {
+ GossipValidationResult::Discard => {},
+ _ => panic!("wrong result"),
+ }
+
+ assert_eq!(validator_context.events, Vec::new());
+ }
+
+ // accepts the right queue.
+ {
+ let res = validator.inner.validate(
+ &mut validator_context,
+ &peer_a,
+ &queue_messages.data[..],
+ );
+
+ match res {
+ GossipValidationResult::ProcessAndKeep(topic) if topic == root_a_topic => {},
+ _ => panic!("wrong result"),
+ }
+
+ assert_eq!(validator_context.events, vec![
+ ContextEvent::BroadcastMessage(root_a_topic, queue_messages.data.clone(), false),
+ ]);
+ }
+ }
}
diff --git a/network/src/gossip/attestation.rs b/network/src/gossip/attestation.rs
new file mode 100644
index 000000000000..07bfefe71b37
--- /dev/null
+++ b/network/src/gossip/attestation.rs
@@ -0,0 +1,264 @@
+// Copyright 2019 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+//! Gossip messages and structures for dealing with attestations (statements of
+//! validity of invalidity on parachain candidates).
+//!
+//! This follows the same principles as other gossip modules (see parent
+//! documentation for more details) by being aware of our current chain
+//! heads and accepting only information relative to them. Attestations are localized to
+//! relay chain head, so this is easily doable.
+//!
+//! This module also provides a filter, so we can only broadcast messages to
+//! peers that are relevant to chain heads they have advertised.
+//!
+//! Furthermore, since attestations are bottlenecked by the `Candidate` statement,
+//! we only accept attestations which are themselves `Candidate` messages, or reference
+//! a `Candidate` we are aware of. Otherwise, it is possible we could be forced to
+//! consider an infinite amount of attestations produced by a misbehaving validator.
+
+use substrate_network::consensus_gossip::{ValidationResult as GossipValidationResult};
+use polkadot_validation::GenericStatement;
+use polkadot_primitives::Hash;
+
+use std::collections::{HashMap, HashSet};
+
+use log::warn;
+use crate::router::attestation_topic;
+
+use super::{cost, benefit, MAX_CHAIN_HEADS, LeavesVec, ChainContext, Known, MessageValidationData, GossipStatement};
+
+// knowledge about attestations on a single parent-hash.
+#[derive(Default)]
+pub(super) struct Knowledge {
+ candidates: HashSet,
+}
+
+impl Knowledge {
+ // whether the peer is aware of a candidate with given hash.
+ fn is_aware_of(&self, candidate_hash: &Hash) -> bool {
+ self.candidates.contains(candidate_hash)
+ }
+
+ // note that the peer is aware of a candidate with given hash. this should
+ // be done after observing an incoming candidate message via gossip.
+ fn note_aware(&mut self, candidate_hash: Hash) {
+ self.candidates.insert(candidate_hash);
+ }
+}
+
+#[derive(Default)]
+pub(super) struct PeerData {
+ live: HashMap,
+}
+
+impl PeerData {
+ /// Update leaves, returning a list of which leaves are new.
+ pub(super) fn update_leaves(&mut self, leaves: &LeavesVec) -> LeavesVec {
+ let mut new = LeavesVec::new();
+ self.live.retain(|k, _| leaves.contains(k));
+ for &leaf in leaves {
+ self.live.entry(leaf).or_insert_with(|| {
+ new.push(leaf);
+ Default::default()
+ });
+ }
+
+ new
+ }
+
+ #[cfg(test)]
+ pub(super) fn note_aware_under_leaf(&mut self, relay_chain_leaf: &Hash, candidate_hash: Hash) {
+ if let Some(knowledge) = self.live.get_mut(relay_chain_leaf) {
+ knowledge.note_aware(candidate_hash);
+ }
+ }
+
+ pub(super) fn knowledge_at_mut(&mut self, parent_hash: &Hash) -> Option<&mut Knowledge> {
+ self.live.get_mut(parent_hash)
+ }
+
+ /// Get an iterator over all live leaves of this peer.
+ pub(super) fn leaves(&self) -> impl Iterator {
+ self.live.keys()
+ }
+}
+
+/// An impartial view of what topics and data are valid based on attestation session data.
+pub(super) struct View {
+ leaf_work: Vec<(Hash, LeafView)>, // hashes of the best DAG-leaves paired with validation data.
+ topics: HashMap, // maps topic hashes to block hashes.
+}
+
+impl Default for View {
+ fn default() -> Self {
+ View {
+ leaf_work: Vec::with_capacity(MAX_CHAIN_HEADS),
+ topics: Default::default(),
+ }
+ }
+}
+
+impl View {
+ fn leaf_view(&self, relay_chain_leaf: &Hash) -> Option<&LeafView> {
+ self.leaf_work.iter()
+ .find_map(|&(ref h, ref leaf)| if h == relay_chain_leaf { Some(leaf) } else { None } )
+ }
+
+ fn leaf_view_mut(&mut self, relay_chain_leaf: &Hash) -> Option<&mut LeafView> {
+ self.leaf_work.iter_mut()
+ .find_map(|&mut (ref h, ref mut leaf)| if h == relay_chain_leaf { Some(leaf) } else { None } )
+ }
+
+ /// Get our leaves-set. Guaranteed to have length <= MAX_CHAIN_HEADS.
+ pub(super) fn neighbor_info<'a>(&'a self) -> impl Iterator + 'a + Clone {
+ self.leaf_work.iter().take(MAX_CHAIN_HEADS).map(|(p, _)| p.clone())
+ }
+
+ /// Note new leaf in our local view and validation data necessary to check signatures
+ /// of statements issued under this leaf.
+ ///
+ /// This will be pruned later on a call to `prune_old_leaves`, when this leaf
+ /// is not a leaf anymore.
+ pub(super) fn new_local_leaf(&mut self, relay_chain_leaf: Hash, validation_data: MessageValidationData) {
+ self.leaf_work.push((
+ relay_chain_leaf,
+ LeafView {
+ validation_data,
+ knowledge: Default::default(),
+ },
+ ));
+ self.topics.insert(attestation_topic(relay_chain_leaf), relay_chain_leaf);
+ }
+
+ /// Prune old leaf-work that fails the leaf predicate.
+ pub(super) fn prune_old_leaves bool>(&mut self, is_leaf: F) {
+ let leaf_work = &mut self.leaf_work;
+ leaf_work.retain(|&(ref relay_chain_leaf, _)| is_leaf(relay_chain_leaf));
+ self.topics.retain(|_, v| leaf_work.iter().find(|(p, _)| p == v).is_some());
+ }
+
+ /// Whether a message topic is considered live relative to our view. non-live
+ /// topics do not pertain to our perceived leaves, and are uninteresting to us.
+ pub(super) fn is_topic_live(&self, topic: &Hash) -> bool {
+ self.topics.contains_key(topic)
+ }
+
+ /// The relay-chain block hash corresponding to a topic.
+ pub(super) fn topic_block(&self, topic: &Hash) -> Option<&Hash> {
+ self.topics.get(topic)
+ }
+
+
+ /// Validate the signature on an attestation statement of some kind. Should be done before
+ /// any repropagation of that statement.
+ pub(super) fn validate_statement_signature(
+ &mut self,
+ message: GossipStatement,
+ chain: &C,
+ )
+ -> (GossipValidationResult, i32)
+ {
+ // message must reference one of our chain heads and
+ // if message is not a `Candidate` we should have the candidate available
+ // in `attestation_view`.
+ match self.leaf_view(&message.relay_chain_leaf) {
+ None => {
+ let cost = match chain.is_known(&message.relay_chain_leaf) {
+ Some(Known::Leaf) => {
+ warn!(
+ target: "network",
+ "Leaf block {} not considered live for attestation",
+ message.relay_chain_leaf,
+ );
+
+ 0
+ }
+ Some(Known::Old) => cost::PAST_MESSAGE,
+ _ => cost::FUTURE_MESSAGE,
+ };
+
+ (GossipValidationResult::Discard, cost)
+ }
+ Some(view) => {
+ // first check that we are capable of receiving this message
+ // in a DoS-proof manner.
+ let benefit = match message.signed_statement.statement {
+ GenericStatement::Candidate(_) => benefit::NEW_CANDIDATE,
+ GenericStatement::Valid(ref h) | GenericStatement::Invalid(ref h) => {
+ if !view.knowledge.is_aware_of(h) {
+ let cost = cost::ATTESTATION_NO_CANDIDATE;
+ return (GossipValidationResult::Discard, cost);
+ }
+
+ benefit::NEW_ATTESTATION
+ }
+ };
+
+ // validate signature.
+ let res = view.validation_data.check_statement(
+ &message.relay_chain_leaf,
+ &message.signed_statement,
+ );
+
+ match res {
+ Ok(()) => {
+ let topic = attestation_topic(message.relay_chain_leaf);
+ (GossipValidationResult::ProcessAndKeep(topic), benefit)
+ }
+ Err(()) => (GossipValidationResult::Discard, cost::BAD_SIGNATURE),
+ }
+ }
+ }
+ }
+
+ /// whether it's allowed to send a statement to a peer with given knowledge
+ /// about the relay parent the statement refers to.
+ pub(super) fn statement_allowed(
+ &mut self,
+ statement: &GossipStatement,
+ relay_chain_leaf: &Hash,
+ peer_knowledge: &mut Knowledge,
+ ) -> bool {
+ let signed = &statement.signed_statement;
+
+ match signed.statement {
+ GenericStatement::Valid(ref h) | GenericStatement::Invalid(ref h) => {
+ // `valid` and `invalid` statements can only be propagated after
+ // a candidate message is known by that peer.
+ peer_knowledge.is_aware_of(h)
+ }
+ GenericStatement::Candidate(ref c) => {
+ // if we are sending a `Candidate` message we should make sure that
+ // attestation_view and their_view reflects that we know about the candidate.
+ let hash = c.hash();
+ peer_knowledge.note_aware(hash);
+ if let Some(attestation_view) = self.leaf_view_mut(&relay_chain_leaf) {
+ attestation_view.knowledge.note_aware(hash);
+ }
+
+ // at this point, the peer hasn't seen the message or the candidate
+ // and has knowledge of the relevant relay-chain parent.
+ true
+ }
+ }
+ }
+}
+
+struct LeafView {
+ validation_data: MessageValidationData,
+ knowledge: Knowledge,
+}
diff --git a/network/src/gossip/message_routing.rs b/network/src/gossip/message_routing.rs
new file mode 100644
index 000000000000..01482e4671f8
--- /dev/null
+++ b/network/src/gossip/message_routing.rs
@@ -0,0 +1,339 @@
+// Copyright 2019 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+//! Data structures and synchronous logic for ICMP message gossip.
+//!
+//! The parent-module documentation describes some rationale of the general
+//! gossip protocol design.
+//!
+//! The ICMP message-routing gossip works according to those rationale.
+//!
+//! In this protocol, we perform work under 4 conditions:
+//! ### 1. Upon observation of a new leaf in the block-DAG.
+//!
+//! We first communicate the best leaves to our neighbors in the gossip graph
+//! by the means of a neighbor packet. Then, we query to discover the trie roots
+//! of all un-routed message queues from the perspective of each of those leaves.
+//!
+//! For any trie root in the unrouted set for the new leaf, if we have the corresponding
+//! queue, we send it to any peers with the new leaf in their latest advertised set.
+//!
+//! Which parachain those messages go to and from is unimportant, because this is
+//! an everybody-sees-everything style protocol. The only important property is "liveness":
+//! that the queue root is un-routed at one of the leaves we perceive to be at the head
+//! of the block-DAG.
+//!
+//! In Substrate gossip, every message is associated with a topic. Typically,
+//! many messages are grouped under a single topic. In this gossip system, each queue
+//! gets its own topic, which is based on the root hash of the queue. This is because
+//! many different chain leaves may have the same queue as un-routed, so it's better than
+//! attempting to group message packets by the leaf they appear unrouted at.
+//!
+//! ### 2. Upon a neighbor packet from a peer.
+//!
+//! The neighbor packet from a peer should contain perceived chain heads of that peer.
+//! If there is any overlap between our perceived chain heads and theirs, we send
+//! them any known, un-routed message queue from either set.
+//!
+//! ### 3. Upon receiving a message queue from a peer.
+//!
+//! If the message queue is in the un-routed set of one of the latest leaves we've updated to,
+//! we accept it and relay to any peers who need that queue as well.
+//!
+//! If not, we report the peer to the peer-set manager for sending us bad data.
+//!
+//! ### 4. Periodic Pruning
+//!
+//! We prune messages that are not un-routed from the view of any leaf and cease
+//! to attempt to send them to any peer.
+
+use sr_primitives::traits::{BlakeTwo256, Hash as HashT};
+use polkadot_primitives::Hash;
+use std::collections::{HashMap, HashSet};
+use substrate_client::error::Error as ClientError;
+use super::{MAX_CHAIN_HEADS, GossipValidationResult, LeavesVec, ChainContext};
+
+/// Construct a topic for a message queue root deterministically.
+pub fn queue_topic(queue_root: Hash) -> Hash {
+ let mut v = queue_root.as_ref().to_vec();
+ v.extend(b"message_queue");
+
+ BlakeTwo256::hash(&v[..])
+}
+
+/// A view of which queue roots are current for a given set of leaves.
+#[derive(Default)]
+pub struct View {
+ leaves: LeavesVec,
+ leaf_topics: HashMap>, // leaf_hash -> { topics }
+ expected_queues: HashMap, // topic -> (queue-root, known)
+}
+
+impl View {
+ /// Update the set of current leaves. This is called when we perceive a new bset leaf-set.
+ pub fn update_leaves(&mut self, context: &T, new_leaves: I)
+ -> Result<(), ClientError>
+ where I: Iterator
+ {
+ let new_leaves = new_leaves.take(MAX_CHAIN_HEADS);
+ let old_leaves = std::mem::replace(&mut self.leaves, new_leaves.collect());
+
+ let expected_queues = &mut self.expected_queues;
+ let leaves = &self.leaves;
+ self.leaf_topics.retain(|l, topics| {
+ if leaves.contains(l) { return true }
+
+ // prune out all data about old leaves we don't follow anymore.
+ for topic in topics.iter() {
+ expected_queues.remove(topic);
+ }
+ false
+ });
+
+ let mut res = Ok(());
+
+ // add in new data about fresh leaves.
+ for new_leaf in &self.leaves {
+ if old_leaves.contains(new_leaf) { continue }
+
+ let mut this_leaf_topics = HashSet::new();
+
+ let r = context.leaf_unrouted_roots(new_leaf, &mut |&queue_root| {
+ let topic = queue_topic(queue_root);
+ this_leaf_topics.insert(topic);
+ expected_queues.entry(topic).or_insert((queue_root, false));
+ });
+
+ if r.is_err() {
+ if let Err(e) = res {
+ log::debug!(target: "message_routing", "Ignored duplicate error {}", e)
+ };
+ res = r;
+ }
+
+ self.leaf_topics.insert(*new_leaf, this_leaf_topics);
+ }
+
+ res
+ }
+
+ /// Validate an incoming message queue against this view. If it is accepted
+ /// by our view of un-routed message queues, we will keep and re-propagate.
+ pub fn validate_queue_and_note_known(&mut self, messages: &super::GossipParachainMessages)
+ -> (GossipValidationResult, i32)
+ {
+ let ostensible_topic = queue_topic(messages.queue_root);
+ match self.expected_queues.get_mut(&ostensible_topic) {
+ None => (GossipValidationResult::Discard, super::cost::UNNEEDED_ICMP_MESSAGES),
+ Some(&mut (_, ref mut known)) => {
+ if !messages.queue_root_is_correct() {
+ (
+ GossipValidationResult::Discard,
+ super::cost::icmp_messages_root_mismatch(messages.messages.len()),
+ )
+ } else {
+ *known = true;
+ (
+ GossipValidationResult::ProcessAndKeep(ostensible_topic),
+ super::benefit::NEW_ICMP_MESSAGES,
+ )
+ }
+ }
+ }
+ }
+
+ /// Whether a message with given topic is live.
+ pub fn is_topic_live(&self, topic: &Hash) -> bool {
+ self.expected_queues.get(topic).is_some()
+ }
+
+ /// Whether a message is allowed under the intersection of the given leaf-set
+ /// and our own.
+ pub fn allowed_intersecting(&self, other_leaves: &LeavesVec, topic: &Hash) -> bool {
+ for i in other_leaves {
+ for j in &self.leaves {
+ if i == j {
+ let leaf_topics = self.leaf_topics.get(i)
+ .expect("leaf_topics are mutated only in update_leaves; \
+ we have an entry for each item in self.leaves; \
+ i is in self.leaves; qed");
+
+ if leaf_topics.contains(topic) {
+ return true;
+ }
+ }
+ }
+ }
+
+ false
+ }
+
+ /// Get topics of all message queues a peer is interested in - this is useful
+ /// when a peer has informed us of their new best leaves.
+ pub fn intersection_topics(&self, other_leaves: &LeavesVec) -> impl Iterator {
+ let deduplicated = other_leaves.iter()
+ .filter_map(|l| self.leaf_topics.get(l))
+ .flat_map(|topics| topics.iter().cloned())
+ .collect::>();
+
+ deduplicated.into_iter()
+ }
+
+ /// Iterate over all live message queues for which the data is marked as not locally known,
+ /// calling a closure with `(topic, root)`. The closure will return whether the queue data is
+ /// unknown.
+ ///
+ /// This is called when we should send un-routed message queues that we are
+ /// newly aware of to peers - as in when we update our leaves.
+ pub fn sweep_unknown_queues(&mut self, mut check_known: impl FnMut(&Hash, &Hash) -> bool) {
+ for (topic, &mut (ref queue_root, ref mut known)) in self.expected_queues.iter_mut() {
+ if !*known {
+ *known = check_known(topic, queue_root)
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::tests::TestChainContext;
+ use crate::gossip::{Known, GossipParachainMessages};
+ use polkadot_primitives::parachain::Message as ParachainMessage;
+
+ fn hash(x: u8) -> Hash {
+ [x; 32].into()
+ }
+
+ fn message_queue(from: u8, to: u8) -> Option<[[u8; 2]; 1]> {
+ if from == to {
+ None
+ } else {
+ Some([[from, to]])
+ }
+ }
+
+ fn message_queue_root(from: u8, to: u8) -> Option {
+ message_queue(from, to).map(
+ |q| polkadot_validation::message_queue_root(q.iter())
+ )
+ }
+
+ // check that our view has all of the roots of the message queues
+ // emitted in the heads identified in `our_heads`, and none of the others.
+ fn check_roots(view: &mut View, our_heads: &[u8], n_heads: u8) -> bool {
+ for i in 0..n_heads {
+ for j in 0..n_heads {
+ if let Some(messages) = message_queue(i, j) {
+ let queue_root = message_queue_root(i, j).unwrap();
+ let messages = GossipParachainMessages {
+ queue_root,
+ messages: messages.iter().map(|m| ParachainMessage(m.to_vec())).collect(),
+ };
+
+ let had_queue = match view.validate_queue_and_note_known(&messages).0 {
+ GossipValidationResult::ProcessAndKeep(topic) => topic == queue_topic(queue_root),
+ _ => false,
+ };
+
+ if our_heads.contains(&i) != had_queue {
+ return false
+ }
+ }
+ }
+ }
+
+ true
+ }
+
+ #[test]
+ fn update_leaves_none_in_common() {
+ let mut ctx = TestChainContext::default();
+ let n_heads = 5;
+
+ for i in 0..n_heads {
+ ctx.known_map.insert(hash(i as u8), Known::Leaf);
+
+ let messages_out: Vec<_> = (0..n_heads).filter_map(|j| message_queue_root(i, j)).collect();
+
+ if !messages_out.is_empty() {
+ ctx.ingress_roots.insert(hash(i as u8), messages_out);
+ }
+ }
+
+ // initialize the view with 2 leaves.
+
+ let mut view = View::default();
+ view.update_leaves(
+ &ctx,
+ [hash(0), hash(1)].iter().cloned(),
+ ).unwrap();
+
+ // we should have all queue roots that were
+ // un-routed from the perspective of those 2
+ // leaves and no others.
+
+ assert!(check_roots(&mut view, &[0, 1], n_heads));
+
+ // after updating to a disjoint set,
+ // the property that we are aware of all un-routed
+ // from the perspective of our known leaves should
+ // remain the same.
+
+ view.update_leaves(
+ &ctx,
+ [hash(2), hash(3), hash(4)].iter().cloned(),
+ ).unwrap();
+
+ assert!(check_roots(&mut view, &[2, 3, 4], n_heads));
+ }
+
+ #[test]
+ fn update_leaves_overlapping() {
+ let mut ctx = TestChainContext::default();
+ let n_heads = 5;
+
+ for i in 0..n_heads {
+ ctx.known_map.insert(hash(i as u8), Known::Leaf);
+
+ let messages_out: Vec<_> = (0..n_heads).filter_map(|j| message_queue_root(i, j)).collect();
+
+ if !messages_out.is_empty() {
+ ctx.ingress_roots.insert(hash(i as u8), messages_out);
+ }
+ }
+
+ let mut view = View::default();
+ view.update_leaves(
+ &ctx,
+ [hash(0), hash(1), hash(2)].iter().cloned(),
+ ).unwrap();
+
+ assert!(check_roots(&mut view, &[0, 1, 2], n_heads));
+
+ view.update_leaves(
+ &ctx,
+ [hash(2), hash(3), hash(4)].iter().cloned(),
+ ).unwrap();
+
+ // after updating to a leaf-set overlapping with the prior,
+ // the property that we are aware of all un-routed
+ // from the perspective of our known leaves should
+ // remain the same.
+
+ assert!(check_roots(&mut view, &[2, 3, 4], n_heads));
+ }
+}
diff --git a/network/src/lib.rs b/network/src/lib.rs
index 40337a8170eb..4eaca9af6678 100644
--- a/network/src/lib.rs
+++ b/network/src/lib.rs
@@ -16,8 +16,8 @@
//! Polkadot-specific network implementation.
//!
-//! This manages routing for parachain statements, parachain block and extrinsic data fetching,
-//! communication between collators and validators, and more.
+//! This manages routing for parachain statements, parachain block and outgoing message
+//! data fetching, communication between collators and validators, and more.
mod collator_pool;
mod local_collations;
@@ -26,23 +26,29 @@ pub mod validation;
pub mod gossip;
use codec::{Decode, Encode};
-use futures::sync::oneshot;
+use futures::sync::{oneshot, mpsc};
+use futures::prelude::*;
use polkadot_primitives::{Block, Hash, Header};
use polkadot_primitives::parachain::{
Id as ParaId, BlockData, CollatorId, CandidateReceipt, Collation, PoVBlock,
- StructuredUnroutedIngress, ValidatorId
+ StructuredUnroutedIngress, ValidatorId, OutgoingMessages,
};
use substrate_network::{
PeerId, RequestId, Context, StatusMessage as GenericFullStatus,
specialization::{Event, NetworkSpecialization as Specialization},
};
-use self::validation::{LiveValidationSessions, RecentValidatorIds, InsertedRecentKey};
+use substrate_network::consensus_gossip::{
+ self, TopicNotification, MessageRecipient as GossipMessageRecipient, ConsensusMessage,
+};
+use self::validation::{LiveValidationLeaves, RecentValidatorIds, InsertedRecentKey};
use self::collator_pool::{CollatorPool, Role, Action};
use self::local_collations::LocalCollations;
use log::{trace, debug, warn};
use std::collections::{HashMap, HashSet};
+use crate::gossip::{POLKADOT_ENGINE_ID, GossipMessage};
+
#[cfg(test)]
mod tests;
@@ -69,7 +75,112 @@ mod benefit {
type FullStatus = GenericFullStatus;
/// Specialization of the network service for the polkadot protocol.
-pub type NetworkService = substrate_network::NetworkService;
+pub type PolkadotNetworkService = substrate_network::NetworkService;
+
+/// Basic functionality that a network has to fulfill.
+pub trait NetworkService: Send + Sync + 'static {
+ /// Get a stream of gossip messages for a given hash.
+ fn gossip_messages_for(&self, topic: Hash) -> GossipMessageStream;
+
+ /// Gossip a message on given topic.
+ fn gossip_message(&self, topic: Hash, message: GossipMessage);
+
+ /// Execute a closure with the gossip service.
+ fn with_gossip(&self, with: F)
+ where F: FnOnce(&mut dyn GossipService, &mut dyn Context);
+
+ /// Execute a closure with the polkadot protocol.
+ fn with_spec(&self, with: F)
+ where F: FnOnce(&mut PolkadotProtocol, &mut dyn Context);
+}
+
+impl NetworkService for PolkadotNetworkService {
+ fn gossip_messages_for(&self, topic: Hash) -> GossipMessageStream {
+ let (tx, rx) = std::sync::mpsc::channel();
+
+ PolkadotNetworkService::with_gossip(self, move |gossip, _| {
+ let inner_rx = gossip.messages_for(POLKADOT_ENGINE_ID, topic);
+ let _ = tx.send(inner_rx);
+ });
+
+ let topic_stream = match rx.recv() {
+ Ok(rx) => rx,
+ Err(_) => mpsc::unbounded().1, // return empty channel.
+ };
+
+ GossipMessageStream::new(topic_stream)
+ }
+
+ fn gossip_message(&self, topic: Hash, message: GossipMessage) {
+ self.gossip_consensus_message(
+ topic,
+ POLKADOT_ENGINE_ID,
+ message.encode(),
+ GossipMessageRecipient::BroadcastToAll,
+ );
+ }
+
+ fn with_gossip(&self, with: F)
+ where F: FnOnce(&mut dyn GossipService, &mut dyn Context)
+ {
+ PolkadotNetworkService::with_gossip(self, move |gossip, ctx| with(gossip, ctx))
+ }
+
+ fn with_spec(&self, with: F)
+ where F: FnOnce(&mut PolkadotProtocol, &mut dyn Context)
+ {
+ PolkadotNetworkService::with_spec(self, with)
+ }
+}
+
+/// A gossip network subservice.
+pub trait GossipService {
+ fn send_message(&mut self, ctx: &mut dyn Context, who: &PeerId, message: ConsensusMessage);
+ fn multicast(&mut self, ctx: &mut dyn Context, topic: &Hash, message: ConsensusMessage);
+}
+
+impl GossipService for consensus_gossip::ConsensusGossip {
+ fn send_message(&mut self, ctx: &mut dyn Context, who: &PeerId, message: ConsensusMessage) {
+ consensus_gossip::ConsensusGossip::send_message(self, ctx, who, message)
+ }
+
+ fn multicast(&mut self, ctx: &mut dyn Context, topic: &Hash, message: ConsensusMessage) {
+ consensus_gossip::ConsensusGossip::multicast(self, ctx, *topic, message, false)
+ }
+}
+
+/// A stream of gossip messages and an optional sender for a topic.
+pub struct GossipMessageStream {
+ topic_stream: mpsc::UnboundedReceiver,
+}
+
+impl GossipMessageStream {
+ /// Create a new instance with the given topic stream.
+ pub fn new(topic_stream: mpsc::UnboundedReceiver) -> Self {
+ Self {
+ topic_stream
+ }
+ }
+}
+
+impl Stream for GossipMessageStream {
+ type Item = (GossipMessage, Option);
+ type Error = ();
+
+ fn poll(&mut self) -> Poll