diff --git a/Cargo.lock b/Cargo.lock index 5b84808ad444..d3f1d83495b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -705,6 +705,15 @@ dependencies = [ "once_cell", ] +[[package]] +name = "bounded-vec" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afdd1dffefe5fc66262a524b91087c43b16e478b2e3dc49eb11b0e2fd6b6ec90" +dependencies = [ + "thiserror", +] + [[package]] name = "bp-header-chain" version = "0.1.0" @@ -6319,6 +6328,7 @@ dependencies = [ name = "polkadot-node-primitives" version = "0.9.9" dependencies = [ + "bounded-vec", "futures 0.3.16", "parity-scale-codec", "polkadot-parachain", diff --git a/erasure-coding/src/lib.rs b/erasure-coding/src/lib.rs index 3c8dd35077b1..3c5e7f10e73a 100644 --- a/erasure-coding/src/lib.rs +++ b/erasure-coding/src/lib.rs @@ -24,8 +24,10 @@ //! f is the maximum number of faulty validators in the system. //! The data is coded so any f+1 chunks can be used to reconstruct the full data. +use std::convert::TryFrom; + use parity_scale_codec::{Decode, Encode}; -use polkadot_node_primitives::AvailableData; +use polkadot_node_primitives::{AvailableData, Proof}; use polkadot_primitives::v0::{self, BlakeTwo256, Hash as H256, HashT}; use sp_core::Blake2Hasher; use thiserror::Error; @@ -245,7 +247,7 @@ impl<'a, I: AsRef<[u8]>> Branches<'a, I> { } impl<'a, I: AsRef<[u8]>> Iterator for Branches<'a, I> { - type Item = (Vec>, &'a [u8]); + type Item = (Proof, &'a [u8]); fn next(&mut self) -> Option { use trie::Recorder; @@ -258,13 +260,12 @@ impl<'a, I: AsRef<[u8]>> Iterator for Branches<'a, I> { match res.expect("all nodes in trie present; qed") { Some(_) => { - let nodes = recorder.drain().into_iter().map(|r| r.data).collect(); + let nodes: Vec> = recorder.drain().into_iter().map(|r| r.data).collect(); let chunk = self.chunks.get(self.current_pos).expect( "there is a one-to-one mapping of chunks to valid merkle branches; qed", ); - self.current_pos += 1; - Some((nodes, chunk.as_ref())) + Proof::try_from(nodes).ok().map(|proof| (proof, chunk.as_ref())) }, None => None, } @@ -421,7 +422,10 @@ mod tests { assert_eq!(proofs.len(), 10); for (i, proof) in proofs.into_iter().enumerate() { - assert_eq!(branch_hash(&root, &proof, i).unwrap(), BlakeTwo256::hash(&chunks[i])); + assert_eq!( + branch_hash(&root, &proof.as_vec(), i).unwrap(), + BlakeTwo256::hash(&chunks[i]) + ); } } } diff --git a/node/core/av-store/src/tests.rs b/node/core/av-store/src/tests.rs index c96b2dc4f6c6..1101edde928f 100644 --- a/node/core/av-store/src/tests.rs +++ b/node/core/av-store/src/tests.rs @@ -16,11 +16,13 @@ use super::*; +use std::convert::TryFrom; + use assert_matches::assert_matches; use futures::{channel::oneshot, executor, future, Future}; use parking_lot::Mutex; -use polkadot_node_primitives::{AvailableData, BlockData, PoV}; +use polkadot_node_primitives::{AvailableData, BlockData, PoV, Proof}; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::v1::{ @@ -287,7 +289,7 @@ fn store_chunk_works() { let chunk = ErasureChunk { chunk: vec![1, 2, 3], index: validator_index, - proof: vec![vec![3, 4, 5]], + proof: Proof::try_from(vec![vec![3, 4, 5]]).unwrap(), }; // Ensure an entry already exists. In reality this would come from watching @@ -333,7 +335,7 @@ fn store_chunk_does_nothing_if_no_entry_already() { let chunk = ErasureChunk { chunk: vec![1, 2, 3], index: validator_index, - proof: vec![vec![3, 4, 5]], + proof: Proof::try_from(vec![vec![3, 4, 5]]).unwrap(), }; let (tx, rx) = oneshot::channel(); @@ -441,8 +443,11 @@ fn store_block_works() { let mut branches = erasure::branches(chunks.as_ref()); let branch = branches.nth(5).unwrap(); - let expected_chunk = - ErasureChunk { chunk: branch.1.to_vec(), index: ValidatorIndex(5), proof: branch.0 }; + let expected_chunk = ErasureChunk { + chunk: branch.1.to_vec(), + index: ValidatorIndex(5), + proof: Proof::try_from(branch.0).unwrap(), + }; assert_eq!(chunk, expected_chunk); virtual_overseer @@ -545,7 +550,7 @@ fn query_all_chunks_works() { let chunk = ErasureChunk { chunk: vec![1, 2, 3], index: ValidatorIndex(1), - proof: vec![vec![3, 4, 5]], + proof: Proof::try_from(vec![vec![3, 4, 5]]).unwrap(), }; let (tx, rx) = oneshot::channel(); diff --git a/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/node/network/availability-distribution/src/requester/fetch_task/mod.rs index 4800de26d523..4eed9440952a 100644 --- a/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -363,7 +363,7 @@ impl RunningTask { fn validate_chunk(&self, validator: &AuthorityDiscoveryId, chunk: &ErasureChunk) -> bool { let anticipated_hash = - match branch_hash(&self.erasure_root, &chunk.proof, chunk.index.0 as usize) { + match branch_hash(&self.erasure_root, &chunk.proof_as_vec(), chunk.index.0 as usize) { Ok(hash) => hash, Err(e) => { tracing::warn!( diff --git a/node/network/availability-distribution/src/requester/fetch_task/tests.rs b/node/network/availability-distribution/src/requester/fetch_task/tests.rs index 48616e336ae5..e39a9d5ef164 100644 --- a/node/network/availability-distribution/src/requester/fetch_task/tests.rs +++ b/node/network/availability-distribution/src/requester/fetch_task/tests.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::collections::HashMap; +use std::{collections::HashMap, convert::TryFrom}; use parity_scale_codec::Encode; @@ -29,7 +29,7 @@ use sc_network as network; use sp_keyring::Sr25519Keyring; use polkadot_node_network_protocol::request_response::{v1, Recipient}; -use polkadot_node_primitives::{BlockData, PoV}; +use polkadot_node_primitives::{BlockData, PoV, Proof}; use polkadot_primitives::v1::{CandidateHash, ValidatorIndex}; use super::*; @@ -60,7 +60,7 @@ fn task_does_not_accept_invalid_chunk() { Recipient::Authority(Sr25519Keyring::Alice.public().into()), ChunkFetchingResponse::Chunk(v1::ChunkResponse { chunk: vec![1, 2, 3], - proof: vec![vec![9, 8, 2], vec![2, 3, 4]], + proof: Proof::try_from(vec![vec![9, 8, 2], vec![2, 3, 4]]).unwrap(), }), ); m @@ -170,7 +170,7 @@ fn task_stores_valid_chunk_if_there_is_one() { Recipient::Authority(Sr25519Keyring::Charlie.public().into()), ChunkFetchingResponse::Chunk(v1::ChunkResponse { chunk: vec![1, 2, 3], - proof: vec![vec![9, 8, 2], vec![2, 3, 4]], + proof: Proof::try_from(vec![vec![9, 8, 2], vec![2, 3, 4]]).unwrap(), }), ); diff --git a/node/network/availability-distribution/src/tests/mock.rs b/node/network/availability-distribution/src/tests/mock.rs index 0b11b22bf3be..fb01e0b2f9c7 100644 --- a/node/network/availability-distribution/src/tests/mock.rs +++ b/node/network/availability-distribution/src/tests/mock.rs @@ -16,12 +16,12 @@ //! Helper functions and tools to generate mock data useful for testing this subsystem. -use std::sync::Arc; +use std::{convert::TryFrom, sync::Arc}; use sp_keyring::Sr25519Keyring; use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks}; -use polkadot_node_primitives::{AvailableData, BlockData, ErasureChunk, PoV}; +use polkadot_node_primitives::{AvailableData, BlockData, ErasureChunk, PoV, Proof}; use polkadot_primitives::v1::{ CandidateCommitments, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt, GroupIndex, Hash, HeadData, Id as ParaId, OccupiedCore, PersistedValidationData, SessionInfo, @@ -139,7 +139,7 @@ pub fn get_valid_chunk_data(pov: PoV) -> (Hash, ErasureChunk) { .map(|(index, (proof, chunk))| ErasureChunk { chunk: chunk.to_vec(), index: ValidatorIndex(index as _), - proof, + proof: Proof::try_from(proof).unwrap(), }) .next() .expect("There really should be 10 chunks."); diff --git a/node/network/availability-recovery/src/lib.rs b/node/network/availability-recovery/src/lib.rs index ea5822e896a9..732bb373f1f4 100644 --- a/node/network/availability-recovery/src/lib.rs +++ b/node/network/availability-recovery/src/lib.rs @@ -301,9 +301,11 @@ impl RequestChunksPhase { let validator_index = chunk.index; - if let Ok(anticipated_hash) = - branch_hash(¶ms.erasure_root, &chunk.proof, chunk.index.0 as usize) - { + if let Ok(anticipated_hash) = branch_hash( + ¶ms.erasure_root, + &chunk.proof_as_vec(), + chunk.index.0 as usize, + ) { let erasure_chunk_hash = BlakeTwo256::hash(&chunk.chunk); if erasure_chunk_hash != anticipated_hash { diff --git a/node/network/availability-recovery/src/tests.rs b/node/network/availability-recovery/src/tests.rs index fcd2575026e1..ed9b5b7ebaa1 100644 --- a/node/network/availability-recovery/src/tests.rs +++ b/node/network/availability-recovery/src/tests.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::{sync::Arc, time::Duration}; +use std::{convert::TryFrom, sync::Arc, time::Duration}; use assert_matches::assert_matches; use futures::{executor, future}; @@ -28,7 +28,7 @@ use super::*; use sc_network::config::RequestResponseConfig; use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks}; -use polkadot_node_primitives::{BlockData, PoV}; +use polkadot_node_primitives::{BlockData, PoV, Proof}; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::v1::{AuthorityDiscoveryId, HeadData, PersistedValidationData}; use polkadot_subsystem::{ @@ -371,7 +371,7 @@ fn derive_erasure_chunks_with_proofs_and_root( .map(|(index, (proof, chunk))| ErasureChunk { chunk: chunk.to_vec(), index: ValidatorIndex(index as _), - proof, + proof: Proof::try_from(proof).unwrap(), }) .collect::>(); diff --git a/node/network/protocol/src/request_response/v1.rs b/node/network/protocol/src/request_response/v1.rs index 184bcf5f030e..8a63d653e8c7 100644 --- a/node/network/protocol/src/request_response/v1.rs +++ b/node/network/protocol/src/request_response/v1.rs @@ -19,7 +19,7 @@ use parity_scale_codec::{Decode, Encode}; use polkadot_node_primitives::{ - AvailableData, DisputeMessage, ErasureChunk, PoV, UncheckedDisputeMessage, + AvailableData, DisputeMessage, ErasureChunk, PoV, Proof, UncheckedDisputeMessage, }; use polkadot_primitives::v1::{ CandidateHash, CandidateReceipt, CommittedCandidateReceipt, Hash, Id as ParaId, ValidatorIndex, @@ -67,7 +67,7 @@ pub struct ChunkResponse { /// The erasure-encoded chunk of data belonging to the candidate block. pub chunk: Vec, /// Proof for this chunk's branch in the Merkle tree. - pub proof: Vec>, + pub proof: Proof, } impl From for ChunkResponse { diff --git a/node/primitives/Cargo.toml b/node/primitives/Cargo.toml index e1aa4ebf3de0..5f7146eddd82 100644 --- a/node/primitives/Cargo.toml +++ b/node/primitives/Cargo.toml @@ -6,6 +6,7 @@ edition = "2018" description = "Primitives types for the Node-side" [dependencies] +bounded-vec = "0.4" futures = "0.3.15" polkadot-primitives = { path = "../../primitives" } polkadot-statement-table = { path = "../../statement-table" } diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index 19ee14f05acf..ae982708c8cb 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -22,11 +22,12 @@ #![deny(missing_docs)] -use std::pin::Pin; +use std::{convert::TryFrom, pin::Pin}; +use bounded_vec::BoundedVec; use futures::Future; -use parity_scale_codec::{Decode, Encode}; -use serde::{Deserialize, Serialize}; +use parity_scale_codec::{Decode, Encode, Error as CodecError, Input}; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; pub use sp_consensus_babe::{ AllowedSlots as BabeAllowedSlots, BabeEpochConfiguration, Epoch as BabeEpoch, @@ -51,6 +52,11 @@ pub use disputes::{ SignedDisputeStatement, UncheckedDisputeMessage, ValidDisputeVote, }; +// For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node. +const MERKLE_NODE_MAX_SIZE: usize = 512; +// 16-ary Merkle Prefix Trie for 32-bit ValidatorIndex has depth at most 8. +const MERKLE_PROOF_MAX_DEPTH: usize = 8; + /// The bomb limit for decompressing code blobs. pub const VALIDATION_CODE_BOMB_LIMIT: usize = (MAX_CODE_SIZE * 4u32) as usize; @@ -287,6 +293,95 @@ pub struct AvailableData { pub validation_data: PersistedValidationData, } +/// This is a convenience type to allow the Erasure chunk proof to Decode into a nested BoundedVec +#[derive(PartialEq, Eq, Clone, Debug, Hash)] +pub struct Proof(BoundedVec, 1, MERKLE_PROOF_MAX_DEPTH>); + +impl Proof { + /// This function allows to convert back to the standard nested Vec format + pub fn as_vec(&self) -> Vec> { + self.0.as_vec().iter().map(|v| v.as_vec().clone()).collect() + } +} + +#[derive(thiserror::Error, Debug)] +/// +pub enum MerkleProofError { + #[error("Merkle max proof depth exceeded {0} > {} .", MERKLE_PROOF_MAX_DEPTH)] + /// This error signifies that the Proof length exceeds the trie's max depth + MerkleProofDepthExceeded(usize), + + #[error("Merkle node max size exceeded {0} > {} .", MERKLE_NODE_MAX_SIZE)] + /// This error signifies that a Proof node exceeds the 16-ary max node size + MerkleProofNodeSizeExceeded(usize), +} + +impl TryFrom>> for Proof { + type Error = MerkleProofError; + + fn try_from(input: Vec>) -> Result { + if input.len() > MERKLE_PROOF_MAX_DEPTH { + return Err(Self::Error::MerkleProofDepthExceeded(input.len())) + } + let mut out = Vec::new(); + for element in input.into_iter() { + let length = element.len(); + let data: BoundedVec = BoundedVec::from_vec(element) + .map_err(|_| Self::Error::MerkleProofNodeSizeExceeded(length))?; + out.push(data); + } + Ok(Proof(BoundedVec::from_vec(out).expect("Buffer size is deterined above. qed"))) + } +} + +impl Decode for Proof { + fn decode(value: &mut I) -> Result { + let temp: Vec> = Decode::decode(value)?; + let mut out = Vec::new(); + for element in temp.into_iter() { + let bounded_temp: Result, CodecError> = + BoundedVec::from_vec(element) + .map_err(|_| "Inner node exceeds maximum node size.".into()); + out.push(bounded_temp?); + } + BoundedVec::from_vec(out) + .map(Self) + .map_err(|_| "Merkle proof depth exceeds maximum trie depth".into()) + } +} + +impl Encode for Proof { + fn size_hint(&self) -> usize { + MERKLE_NODE_MAX_SIZE * MERKLE_PROOF_MAX_DEPTH + } + + fn using_encoded R>(&self, f: F) -> R { + let temp = self.as_vec(); + temp.using_encoded(f) + } +} + +impl Serialize for Proof { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_bytes(&self.encode()) + } +} + +impl<'de> Deserialize<'de> for Proof { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + // Deserialize the string and get individual components + let s = Vec::::deserialize(deserializer)?; + let mut slice = s.as_slice(); + Decode::decode(&mut slice).map_err(de::Error::custom) + } +} + /// A chunk of erasure-encoded block data. #[derive(PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Debug, Hash)] pub struct ErasureChunk { @@ -295,7 +390,14 @@ pub struct ErasureChunk { /// The index of this erasure-encoded chunk of data. pub index: ValidatorIndex, /// Proof for this chunk's branch in the Merkle tree. - pub proof: Vec>, + pub proof: Proof, +} + +impl ErasureChunk { + /// Convert bounded Vec Proof to regular Vec> + pub fn proof_as_vec(&self) -> Vec> { + self.proof.as_vec() + } } /// Compress a PoV, unless it exceeds the [`POV_BOMB_LIMIT`].