diff --git a/.github/workflows/rustsec-audit.yml b/.github/workflows/rustsec-audit.yml index 19eb1e81e4..ad93828343 100644 --- a/.github/workflows/rustsec-audit.yml +++ b/.github/workflows/rustsec-audit.yml @@ -20,5 +20,6 @@ jobs: - uses: rustsec/audit-check@69366f33c96575abad1ee0dba8212993eecbe998 #v2.0.0 with: token: ${{ secrets.GITHUB_TOKEN }} - # TODO: Remove once Substrate upgrades litep2p and we no longer have rustls 0.20.9 in our dependencies - ignore: RUSTSEC-2024-0336 + # TODO: Remove first once Substrate upgrades litep2p and we no longer have rustls 0.20.9 in our dependencies + # TODO: Remove second once Substrate upgrades libp2p and we no longer have old idna in our dependencies + ignore: RUSTSEC-2024-0336,RUSTSEC-2024-0421 diff --git a/crates/pallet-domains/src/block_tree.rs b/crates/pallet-domains/src/block_tree.rs index 02dbe764d3..6c0294c21f 100644 --- a/crates/pallet-domains/src/block_tree.rs +++ b/crates/pallet-domains/src/block_tree.rs @@ -598,7 +598,7 @@ pub(crate) fn prune_receipt( // If the pruned ER is the operator's `latest_submitted_er` for this domain, it means either: // // - All the ER the operator submitted for this domain are confirmed and pruned, so the operator - // can't be targetted by fraud proof later unless it submit other new ERs. + // can't be targeted by fraud proof later unless it submit other new ERs. // // - All the bad ER the operator submitted for this domain are pruned and the operator is already // slashed, so wwe don't need `LatestSubmittedER` to determine if the operator is pending slash. diff --git a/crates/pallet-domains/src/tests.rs b/crates/pallet-domains/src/tests.rs index 1a691b70f4..2459685bd7 100644 --- a/crates/pallet-domains/src/tests.rs +++ b/crates/pallet-domains/src/tests.rs @@ -777,7 +777,7 @@ fn test_basic_fraud_proof_processing() { for block_number in bad_receipt_at..=head_domain_number { if block_number == bad_receipt_at { - // The targetted ER should be removed from the block tree + // The targeted ER should be removed from the block tree assert!(BlockTree::::get(domain_id, block_number).is_none()); } else { // All the bad ER's descendants should be marked as pending to prune and the submitter diff --git a/crates/sp-domains-fraud-proof/src/fraud_proof.rs b/crates/sp-domains-fraud-proof/src/fraud_proof.rs index eb7a397e14..157b4c3280 100644 --- a/crates/sp-domains-fraud-proof/src/fraud_proof.rs +++ b/crates/sp-domains-fraud-proof/src/fraud_proof.rs @@ -320,13 +320,13 @@ pub struct FraudProof { pub domain_id: DomainId, /// Hash of the bad receipt this fraud proof targeted pub bad_receipt_hash: HeaderHashFor, - /// The MMR proof for the consensus state root that used to verify the storage proof + /// The MMR proof for the consensus state root that is used to verify the storage proof /// - /// It is set `None` if the specific fraud proof variant doesn't contains storage proof + /// It is set `None` if the specific fraud proof variant doesn't contain a storage proof pub maybe_mmr_proof: Option>, /// The domain runtime code storage proof /// - /// It is set `None` if the specific fraud proof variant doesn't required domain runtime code + /// It is set `None` if the specific fraud proof variant doesn't require domain runtime code /// or the required domain runtime code is available from the current runtime state. pub maybe_domain_runtime_code_proof: Option>, /// The specific fraud proof variant @@ -485,7 +485,7 @@ pub struct InvalidStateTransitionProof { /// Fraud proof for the valid bundles in `ExecutionReceipt::inboxed_bundles` #[derive(Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo)] pub struct ValidBundleProof { - /// The targetted bundle with proof + /// The targeted bundle with proof pub bundle_with_proof: OpaqueBundleWithProof, } @@ -493,10 +493,12 @@ pub struct ValidBundleProof { pub struct InvalidExtrinsicsRootProof { /// Valid Bundle digests pub valid_bundle_digests: Vec, - /// Block randomness storage proof - pub block_randomness_proof: BlockRandomnessProof, + /// The storage proof used during verification - pub domain_inherent_extrinsic_data_proof: DomainInherentExtrinsicDataProof, + pub invalid_inherent_extrinsic_proof: InvalidInherentExtrinsicProof, + + /// Optional sudo extrinsic call storage proof + pub domain_sudo_call_proof: DomainSudoCallStorageProof, } #[derive(Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo)] diff --git a/crates/sp-domains-fraud-proof/src/host_functions.rs b/crates/sp-domains-fraud-proof/src/host_functions.rs index eede593383..21add775b0 100644 --- a/crates/sp-domains-fraud-proof/src/host_functions.rs +++ b/crates/sp-domains-fraud-proof/src/host_functions.rs @@ -262,6 +262,8 @@ where domain_inherent_extrinsic_data: DomainInherentExtrinsicData, ) -> Option { let DomainInherentExtrinsicData { + // Used by caller + block_randomness: _, timestamp, maybe_domain_runtime_upgrade, consensus_transaction_byte_fee, diff --git a/crates/sp-domains-fraud-proof/src/lib.rs b/crates/sp-domains-fraud-proof/src/lib.rs index f061c28796..24f30cfa7c 100644 --- a/crates/sp-domains-fraud-proof/src/lib.rs +++ b/crates/sp-domains-fraud-proof/src/lib.rs @@ -54,7 +54,7 @@ use sp_runtime::transaction_validity::{InvalidTransaction, TransactionValidity}; use sp_runtime::OpaqueExtrinsic; use sp_runtime_interface::pass_by; use sp_runtime_interface::pass_by::PassBy; -use subspace_core_primitives::U256; +use subspace_core_primitives::{Randomness, U256}; use subspace_runtime_primitives::{Balance, Moment}; /// Custom invalid validity code for the extrinsics in pallet-domains. @@ -108,6 +108,7 @@ pub enum DomainChainAllowlistUpdateExtrinsic { #[derive(Debug, Decode, Encode, TypeInfo, PartialEq, Eq, Clone)] pub struct DomainInherentExtrinsicData { + pub block_randomness: Randomness, pub timestamp: Moment, pub maybe_domain_runtime_upgrade: Option>, pub consensus_transaction_byte_fee: Balance, diff --git a/crates/sp-domains-fraud-proof/src/storage_proof.rs b/crates/sp-domains-fraud-proof/src/storage_proof.rs index cda26cfd54..e0c15e3db7 100644 --- a/crates/sp-domains-fraud-proof/src/storage_proof.rs +++ b/crates/sp-domains-fraud-proof/src/storage_proof.rs @@ -45,7 +45,7 @@ pub enum VerificationError { RuntimeRegistryStorageProof(StorageProofVerificationError), DynamicCostOfStorageStorageProof(StorageProofVerificationError), DigestStorageProof(StorageProofVerificationError), - BlockFessStorageProof(StorageProofVerificationError), + BlockFeesStorageProof(StorageProofVerificationError), TransfersStorageProof(StorageProofVerificationError), ExtrinsicStorageProof(StorageProofVerificationError), DomainSudoCallStorageProof(StorageProofVerificationError), @@ -414,16 +414,27 @@ impl MaybeDomainRuntimeUpgradedProof { } #[derive(Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo)] -pub struct DomainInherentExtrinsicDataProof { +pub struct InvalidInherentExtrinsicProof { + /// Block randomness storage proof + pub block_randomness_proof: BlockRandomnessProof, + + /// Block timestamp storage proof pub timestamp_proof: TimestampStorageProof, + + /// Optional domain runtime code upgrade storage proof pub maybe_domain_runtime_upgrade_proof: MaybeDomainRuntimeUpgradedProof, + + /// Boolean indicating if dynamic cost of storage was used (but as a storage proof) pub dynamic_cost_of_storage_proof: DynamicCostOfStorageProof, + + /// Transaction fee storage proof pub consensus_chain_byte_fee_proof: ConsensusTransactionByteFeeProof, + + /// Change in the allowed chains storage proof pub domain_chain_allowlist_proof: DomainChainsAllowlistUpdateStorageProof, - pub domain_sudo_call_proof: DomainSudoCallStorageProof, } -impl DomainInherentExtrinsicDataProof { +impl InvalidInherentExtrinsicProof { #[cfg(feature = "std")] #[allow(clippy::let_and_return)] pub fn generate< @@ -437,6 +448,8 @@ impl DomainInherentExtrinsicDataProof { block_hash: Block::Hash, maybe_runtime_id: Option, ) -> Result { + let block_randomness_proof = + BlockRandomnessProof::generate(proof_provider, block_hash, (), storage_key_provider)?; let timestamp_proof = TimestampStorageProof::generate(proof_provider, block_hash, (), storage_key_provider)?; let maybe_domain_runtime_upgrade_proof = MaybeDomainRuntimeUpgradedProof::generate( @@ -464,20 +477,13 @@ impl DomainInherentExtrinsicDataProof { storage_key_provider, )?; - let domain_sudo_call_proof = DomainSudoCallStorageProof::generate( - proof_provider, - block_hash, - domain_id, - storage_key_provider, - )?; - Ok(Self { + block_randomness_proof, timestamp_proof, maybe_domain_runtime_upgrade_proof, dynamic_cost_of_storage_proof, consensus_chain_byte_fee_proof, domain_chain_allowlist_proof, - domain_sudo_call_proof, }) } @@ -487,6 +493,12 @@ impl DomainInherentExtrinsicDataProof { runtime_id: RuntimeId, state_root: &Block::Hash, ) -> Result { + let block_randomness = >::verify::( + self.block_randomness_proof.clone(), + (), + state_root, + )?; + let timestamp = >::verify::( self.timestamp_proof.clone(), (), @@ -523,18 +535,14 @@ impl DomainInherentExtrinsicDataProof { state_root, )?; - let domain_sudo_call = >::verify::< - SKP, - >( - self.domain_sudo_call_proof.clone(), domain_id, state_root - )?; - Ok(DomainInherentExtrinsicData { + block_randomness, timestamp, maybe_domain_runtime_upgrade, consensus_transaction_byte_fee, domain_chain_allowlist, - maybe_sudo_runtime_call: domain_sudo_call.maybe_call, + // Populated by caller + maybe_sudo_runtime_call: None, }) } } diff --git a/crates/sp-domains-fraud-proof/src/verification.rs b/crates/sp-domains-fraud-proof/src/verification.rs index 4baaffef68..af8715cadb 100644 --- a/crates/sp-domains-fraud-proof/src/verification.rs +++ b/crates/sp-domains-fraud-proof/src/verification.rs @@ -65,19 +65,24 @@ where { let InvalidExtrinsicsRootProof { valid_bundle_digests, - block_randomness_proof, - domain_inherent_extrinsic_data_proof, - .. + invalid_inherent_extrinsic_proof, + domain_sudo_call_proof, } = fraud_proof; - let domain_inherent_extrinsic_data = domain_inherent_extrinsic_data_proof + let mut domain_inherent_extrinsic_data = invalid_inherent_extrinsic_proof .verify::(domain_id, runtime_id, &state_root)?; - let block_randomness = >::verify::( - block_randomness_proof.clone(), - (), + let domain_sudo_call = >::verify::( + domain_sudo_call_proof.clone(), + domain_id, &state_root, )?; + domain_inherent_extrinsic_data.maybe_sudo_runtime_call = domain_sudo_call.maybe_call; + + let shuffling_seed = H256::from_slice( + extrinsics_shuffling_seed::(domain_inherent_extrinsic_data.block_randomness) + .as_ref(), + ); let DomainInherentExtrinsic { domain_timestamp_extrinsic, @@ -110,9 +115,6 @@ where bundle_extrinsics_digests.extend(bundle_digest.bundle_digest.clone()); } - let shuffling_seed = - H256::from_slice(extrinsics_shuffling_seed::(block_randomness).as_ref()); - let mut ordered_extrinsics = deduplicate_and_shuffle_extrinsics( bundle_extrinsics_digests, Randomness::from(shuffling_seed.to_fixed_bytes()), @@ -379,7 +381,7 @@ where ) .map_err(|err| { VerificationError::StorageProof( - storage_proof::VerificationError::BlockFessStorageProof(err), + storage_proof::VerificationError::BlockFeesStorageProof(err), ) })?; diff --git a/crates/subspace-core-primitives/src/pieces.rs b/crates/subspace-core-primitives/src/pieces.rs index e22a9349d2..556ff0b0e0 100644 --- a/crates/subspace-core-primitives/src/pieces.rs +++ b/crates/subspace-core-primitives/src/pieces.rs @@ -100,6 +100,12 @@ impl PieceIndex { /// Piece index 1. pub const ONE: PieceIndex = PieceIndex(1); + /// Create new instance + #[inline] + pub const fn new(n: u64) -> Self { + Self(n) + } + /// Create piece index from bytes. #[inline] pub const fn from_bytes(bytes: [u8; Self::SIZE]) -> Self { @@ -114,8 +120,8 @@ impl PieceIndex { /// Segment index piece index corresponds to #[inline] - pub fn segment_index(&self) -> SegmentIndex { - SegmentIndex::from(self.0 / ArchivedHistorySegment::NUM_PIECES as u64) + pub const fn segment_index(&self) -> SegmentIndex { + SegmentIndex::new(self.0 / ArchivedHistorySegment::NUM_PIECES as u64) } /// Position of a piece in a segment @@ -130,28 +136,42 @@ impl PieceIndex { #[inline] pub const fn source_position(&self) -> u32 { assert!(self.is_source()); - self.position() / (Self::source_ratio() as u32) + + let source_start = self.position() / RecordedHistorySegment::ERASURE_CODING_RATE.1 as u32 + * RecordedHistorySegment::ERASURE_CODING_RATE.0 as u32; + let source_offset = self.position() % RecordedHistorySegment::ERASURE_CODING_RATE.1 as u32; + + source_start + source_offset + } + + /// Returns the piece index for a source position and segment index. + /// Overflows to the next segment if the position is greater than the last source position. + #[inline] + pub const fn from_source_position( + source_position: u32, + segment_index: SegmentIndex, + ) -> PieceIndex { + let source_position = source_position as u64; + let start = source_position / RecordedHistorySegment::ERASURE_CODING_RATE.0 as u64 + * RecordedHistorySegment::ERASURE_CODING_RATE.1 as u64; + let offset = source_position % RecordedHistorySegment::ERASURE_CODING_RATE.0 as u64; + + PieceIndex(segment_index.first_piece_index().0 + start + offset) } /// Is this piece index a source piece? #[inline] pub const fn is_source(&self) -> bool { // Source pieces are interleaved with parity pieces, source first - self.0 % Self::source_ratio() == 0 + self.0 % (RecordedHistorySegment::ERASURE_CODING_RATE.1 as u64) + < (RecordedHistorySegment::ERASURE_CODING_RATE.0 as u64) } - /// Returns the next source piece index + /// Returns the next source piece index. + /// Panics if the piece is not a source piece. #[inline] pub const fn next_source_index(&self) -> PieceIndex { - PieceIndex(self.0.next_multiple_of(Self::source_ratio())) - } - - /// The ratio of source pieces to all pieces - #[inline] - const fn source_ratio() -> u64 { - // Assumes the result is an integer - (RecordedHistorySegment::ERASURE_CODING_RATE.1 - / RecordedHistorySegment::ERASURE_CODING_RATE.0) as u64 + PieceIndex::from_source_position(self.source_position() + 1, self.segment_index()) } } diff --git a/crates/subspace-core-primitives/src/segments.rs b/crates/subspace-core-primitives/src/segments.rs index 274a4d587c..bb4957695c 100644 --- a/crates/subspace-core-primitives/src/segments.rs +++ b/crates/subspace-core-primitives/src/segments.rs @@ -86,13 +86,15 @@ impl SegmentIndex { } /// Get the first piece index in this segment. - pub fn first_piece_index(&self) -> PieceIndex { - PieceIndex::from(self.0 * ArchivedHistorySegment::NUM_PIECES as u64) + #[inline] + pub const fn first_piece_index(&self) -> PieceIndex { + PieceIndex::new(self.0 * ArchivedHistorySegment::NUM_PIECES as u64) } /// Get the last piece index in this segment. - pub fn last_piece_index(&self) -> PieceIndex { - PieceIndex::from((self.0 + 1) * ArchivedHistorySegment::NUM_PIECES as u64 - 1) + #[inline] + pub const fn last_piece_index(&self) -> PieceIndex { + PieceIndex::new((self.0 + 1) * ArchivedHistorySegment::NUM_PIECES as u64 - 1) } /// List of piece indexes that belong to this segment. diff --git a/crates/subspace-core-primitives/src/tests.rs b/crates/subspace-core-primitives/src/tests.rs index 5e86aae379..c8a7e4433e 100644 --- a/crates/subspace-core-primitives/src/tests.rs +++ b/crates/subspace-core-primitives/src/tests.rs @@ -1,6 +1,159 @@ +use crate::pieces::PieceIndex; +use crate::segments::{ArchivedHistorySegment, RecordedHistorySegment, SegmentIndex}; use crate::U256; #[test] fn piece_distance_middle() { assert_eq!(U256::MIDDLE, U256::MAX / 2); } + +/// piece index, piece position, source position, segment index, next source index +const SOURCE_PIECE_INDEX_TEST_CASES: &[(u64, u32, u32, u64, u64)] = &[ + (0, 0, 0, 0, 2), + (2, 2, 1, 0, 4), + (126, 126, 63, 0, 128), + (128, 128, 64, 0, 130), + (252, 252, 126, 0, 254), + (254, 254, 127, 0, 256), + (256, 0, 0, 1, 258), + (510, 254, 127, 1, 512), + (512, 0, 0, 2, 514), + // Extreme values + ( + u64::MAX / 4 - 1, + 254, + 127, + u64::MAX / 1024, + u64::MAX / 4 + 1, + ), + // Overflows + //(u64::MAX - 3, 252, 126, u64::MAX / 256, u64::MAX - 1), + //(u64::MAX - 1, 254, 127, u64::MAX/256, overflows), +]; + +/// piece index, piece position, segment index +const PARITY_PIECE_INDEX_TEST_CASES: &[(u64, u32, u64)] = &[ + (1, 1, 0), + (3, 3, 0), + (127, 127, 0), + (129, 129, 0), + (253, 253, 0), + (255, 255, 0), + (257, 1, 1), + (511, 255, 1), + (513, 1, 2), + // Extreme values + (u64::MAX / 4, 255, u64::MAX / 1024), + // Overflows + //(u64::MAX - 2, 253, u64::MAX / 256), + //(u64::MAX, 255, u64::MAX / 256), +]; + +#[test] +fn source_piece_index_conversion() { + for &(piece_index, piece_position, source_position, segment_index, next_source_piece_index) in + SOURCE_PIECE_INDEX_TEST_CASES + { + let piece_index = PieceIndex::new(piece_index); + let segment_index = SegmentIndex::new(segment_index); + let next_source_piece_index = PieceIndex::new(next_source_piece_index); + + println!( + "{:?} {:?} {:?} {:?} {:?}", + piece_index, piece_position, source_position, segment_index, next_source_piece_index + ); + + assert_eq!(piece_index.position(), piece_position); + + assert_eq!(piece_index.source_position(), source_position); + assert_eq!( + PieceIndex::from_source_position(source_position, segment_index), + piece_index + ); + + assert_eq!(piece_index.segment_index(), segment_index); + assert_eq!(piece_index.next_source_index(), next_source_piece_index); + assert!(piece_index.is_source(), "{:?}", piece_index); + + if piece_position == 0 { + assert_eq!(segment_index.first_piece_index(), piece_index); + } + + // Is at piece_position index in SegmentIndex::segment_piece_indexes() + assert_eq!( + segment_index + .segment_piece_indexes() + .get(piece_position as usize), + Some(&piece_index) + ); + + // Is at source_position index in SegmentIndex::segment_piece_indexes_source_first() + assert_eq!( + segment_index + .segment_piece_indexes_source_first() + .get(source_position as usize), + Some(&piece_index) + ); + } +} + +#[test] +fn parity_piece_index_conversion() { + for &(piece_index, piece_position, segment_index) in PARITY_PIECE_INDEX_TEST_CASES { + let piece_index = PieceIndex::new(piece_index); + let segment_index = SegmentIndex::new(segment_index); + + println!("{:?} {:?} {:?}", piece_index, piece_position, segment_index,); + + assert_eq!(piece_index.position(), piece_position); + + assert_eq!(piece_index.segment_index(), segment_index); + assert!(!piece_index.is_source(), "{:?}", piece_index); + + if piece_position as usize == ArchivedHistorySegment::NUM_PIECES - 1 { + assert_eq!(segment_index.last_piece_index(), piece_index); + } + + // Is at piece_position index in SegmentIndex::segment_piece_indexes() + assert_eq!( + segment_index + .segment_piece_indexes() + .get(piece_position as usize), + Some(&piece_index) + ); + + // Is at the corresponding index in the second half of SegmentIndex::segment_piece_indexes_source_first() + assert_eq!( + segment_index + .segment_piece_indexes_source_first() + .get(piece_position as usize / 2 + RecordedHistorySegment::NUM_RAW_RECORDS), + Some(&piece_index) + ); + } +} + +#[test] +#[should_panic] +fn parity_piece_index_position_panic() { + for &(piece_index, piece_position, segment_index) in PARITY_PIECE_INDEX_TEST_CASES { + let piece_index = PieceIndex::new(piece_index); + + println!("{:?} {:?} {:?}", piece_index, piece_position, segment_index); + + // Always panics + piece_index.source_position(); + } +} + +#[test] +#[should_panic] +fn parity_piece_index_next_source_panic() { + for &(piece_index, piece_position, segment_index) in PARITY_PIECE_INDEX_TEST_CASES { + let piece_index = PieceIndex::new(piece_index); + + println!("{:?} {:?} {:?}", piece_index, piece_position, segment_index); + + // Always panics + piece_index.next_source_index(); + } +} diff --git a/crates/subspace-farmer/src/cluster/plotter.rs b/crates/subspace-farmer/src/cluster/plotter.rs index 4d67819ed5..cc11200e15 100644 --- a/crates/subspace-farmer/src/cluster/plotter.rs +++ b/crates/subspace-farmer/src/cluster/plotter.rs @@ -10,6 +10,7 @@ use crate::cluster::nats_client::{GenericRequest, GenericStreamRequest, NatsClie use crate::plotter::{Plotter, SectorPlottingProgress}; use crate::utils::AsyncJoinOnDrop; use anyhow::anyhow; +use async_nats::RequestErrorKind; use async_trait::async_trait; use backoff::backoff::Backoff; use backoff::ExponentialBackoff; @@ -494,19 +495,37 @@ where return None; } } - // TODO: Handle different kinds of errors differently, not all of them are - // fatal - Err(error) => { - progress_updater - .update_progress_and_events( - progress_sender, - SectorPlottingProgress::Error { - error: format!("Failed to get free plotter instance: {error}"), - }, - ) - .await; - return None; - } + Err(error) => match error.kind() { + RequestErrorKind::NoResponders => { + if let Some(delay) = retry_backoff_policy.next_backoff() { + debug!("No plotters, retrying"); + + tokio::time::sleep(delay).await; + continue; + } else { + progress_updater + .update_progress_and_events( + progress_sender, + SectorPlottingProgress::Error { + error: "No plotters, exiting".to_string(), + }, + ) + .await; + return None; + } + } + RequestErrorKind::TimedOut | RequestErrorKind::Other => { + progress_updater + .update_progress_and_events( + progress_sender, + SectorPlottingProgress::Error { + error: format!("Failed to get free plotter instance: {error}"), + }, + ) + .await; + return None; + } + }, }; } } diff --git a/crates/subspace-gateway/src/commands/http.rs b/crates/subspace-gateway/src/commands/http.rs index 43ef3d2e74..fceac97021 100644 --- a/crates/subspace-gateway/src/commands/http.rs +++ b/crates/subspace-gateway/src/commands/http.rs @@ -1,5 +1,5 @@ //! Gateway http command. -//! This command start an HTTP server to serve object requests. +//! This command starts an HTTP server to serve object requests. pub(crate) mod server; @@ -15,14 +15,14 @@ pub(crate) struct HttpCommandOptions { #[clap(flatten)] gateway_options: GatewayOptions, - #[arg(long, default_value = "127.0.0.1:3000")] + #[arg(long, default_value = "http://127.0.0.1:3000")] indexer_endpoint: String, #[arg(long, default_value = "127.0.0.1:8080")] http_listen_on: String, } -/// Runs an HTTP server +/// Runs an HTTP server which fetches DSN objects based on object hashes. pub async fn run(run_options: HttpCommandOptions) -> anyhow::Result<()> { let signal = shutdown_signal(); diff --git a/crates/subspace-gateway/src/commands/http/server.rs b/crates/subspace-gateway/src/commands/http/server.rs index e9d23c33cd..d222509e63 100644 --- a/crates/subspace-gateway/src/commands/http/server.rs +++ b/crates/subspace-gateway/src/commands/http/server.rs @@ -1,3 +1,5 @@ +//! HTTP server which fetches objects from the DSN based on a hash, using a mapping indexer service. + use actix_web::{web, App, HttpResponse, HttpServer, Responder}; use serde::{Deserialize, Deserializer, Serialize}; use std::default::Default; @@ -9,6 +11,7 @@ use subspace_data_retrieval::object_fetcher::ObjectFetcher; use subspace_data_retrieval::piece_getter::PieceGetter; use tracing::{debug, error, trace}; +/// Parameters for the DSN object HTTP server. pub(crate) struct ServerParameters where PG: PieceGetter + Send + Sync + 'static, @@ -18,6 +21,7 @@ where pub(crate) http_endpoint: String, } +/// Object mapping format from the indexer service. #[derive(Serialize, Deserialize, Debug, Default)] #[serde(rename_all = "camelCase")] struct ObjectMapping { @@ -28,6 +32,7 @@ struct ObjectMapping { block_number: BlockNumber, } +/// Utility function to deserialize a JSON string into a u32. fn string_to_u32<'de, D>(deserializer: D) -> Result where D: Deserializer<'de>, @@ -36,68 +41,52 @@ where s.parse::().map_err(serde::de::Error::custom) } -async fn request_object_mappings(endpoint: String, key: String) -> anyhow::Result { +/// Requests an object mapping with `hash` from the indexer service. +async fn request_object_mapping(endpoint: &str, hash: Blake3Hash) -> anyhow::Result { let client = reqwest::Client::new(); - let object_mappings_url = format!("http://{}/objects/{}", endpoint, key,); + let object_mappings_url = format!("{}/objects/{}", endpoint, hex::encode(hash)); - debug!(?key, ?object_mappings_url, "Requesting object mapping..."); + debug!(?hash, ?object_mappings_url, "Requesting object mapping..."); let response = client - .get(object_mappings_url.clone()) + .get(&object_mappings_url) .send() .await? .json::() .await; match &response { Ok(json) => { - trace!(?key, ?json, "Requested object mapping."); + trace!(?hash, ?json, "Received object mapping"); } Err(err) => { - error!(?key, ?err, ?object_mappings_url, "Request failed"); + error!(?hash, ?err, ?object_mappings_url, "Request failed"); } } response.map_err(|err| err.into()) } +/// Fetches a DSN object with `hash`, using the mapping indexer service. async fn serve_object( - key: web::Path, + hash: web::Path, additional_data: web::Data>>, ) -> impl Responder where PG: PieceGetter + Send + Sync + 'static, { let server_params = additional_data.into_inner(); - let key = key.into_inner(); - - // Validate object hash - let decode_result = hex::decode(key.clone()); - let object_hash = match decode_result { - Ok(hash) => { - if hash.len() != Blake3Hash::SIZE { - error!(?key, ?hash, "Invalid hash provided."); - return HttpResponse::BadRequest().finish(); - } - - Blake3Hash::try_from(hash.as_slice()).expect("Hash size was confirmed.") - } - Err(err) => { - error!(?key, ?err, "Invalid hash provided."); - return HttpResponse::BadRequest().finish(); - } - }; + let hash = hash.into_inner(); - let Ok(object_mapping) = - request_object_mappings(server_params.indexer_endpoint.clone(), key.clone()).await + let Ok(object_mapping) = request_object_mapping(&server_params.indexer_endpoint, hash).await else { return HttpResponse::BadRequest().finish(); }; - if object_mapping.hash != object_hash { + if object_mapping.hash != hash { error!( - ?key, - object_mapping_hash=?object_mapping.hash, - "Requested hash doesn't match object mapping." + ?object_mapping, + ?hash, + "Returned object mapping doesn't match requested hash" ); return HttpResponse::ServiceUnavailable().finish(); } @@ -109,22 +98,24 @@ where let object = match object_fetcher_result { Ok(object) => { - trace!(?key, size=%object.len(), "Object fetched successfully"); + trace!(?hash, size = %object.len(), "Object fetched successfully"); let data_hash = blake3_hash(&object); - if data_hash != object_hash { + if data_hash != hash { error!( ?data_hash, - ?object_hash, - "Retrieved data did not match mapping hash" + data_size = %object.len(), + ?hash, + "Retrieved data doesn't match requested mapping hash" ); + trace!(data = %hex::encode(object), "Retrieved data"); return HttpResponse::ServiceUnavailable().finish(); } object } Err(err) => { - error!(?key, ?err, "Failed to fetch object."); + error!(?hash, ?err, "Failed to fetch object"); return HttpResponse::ServiceUnavailable().finish(); } }; @@ -134,6 +125,7 @@ where .body(object) } +/// Starts the DSN object HTTP server. pub async fn start_server(server_params: ServerParameters) -> std::io::Result<()> where PG: PieceGetter + Send + Sync + 'static, diff --git a/crates/subspace-gateway/src/commands/rpc.rs b/crates/subspace-gateway/src/commands/rpc.rs index 0edd2c1a3d..9c5de1cad5 100644 --- a/crates/subspace-gateway/src/commands/rpc.rs +++ b/crates/subspace-gateway/src/commands/rpc.rs @@ -1,5 +1,5 @@ //! Gateway rpc command. -//! This command start an RPC server to serve object requests. +//! This command starts an RPC server to serve object requests from the DSN. pub(crate) mod server; use crate::commands::rpc::server::{launch_rpc_server, RpcOptions, RPC_DEFAULT_PORT}; @@ -21,7 +21,7 @@ pub(crate) struct RpcCommandOptions { rpc_options: RpcOptions, } -/// Runs an RPC server +/// Runs an RPC server which fetches DSN objects based on mappings. pub async fn run(run_options: RpcCommandOptions) -> anyhow::Result<()> { let signal = shutdown_signal(); diff --git a/crates/subspace-runtime/src/lib.rs b/crates/subspace-runtime/src/lib.rs index 79392ad9e4..88ec5a160c 100644 --- a/crates/subspace-runtime/src/lib.rs +++ b/crates/subspace-runtime/src/lib.rs @@ -322,9 +322,9 @@ parameter_types! { // Price per byte = Min Number of validators * Storage duration (years) * Storage cost per year // Account data size (80 bytes) // Min Number of redundant validators (100) - For a stable and redundant blockchain we need at least a certain number of full nodes/collators. - // Storage duration (20 years) - It is theoretically unlimited, accounts will stay around while the chain is alive. + // Storage duration (1 year) - It is theoretically unlimited, accounts will stay around while the chain is alive. // Storage cost per year of (12 * 1e-9 * 0.1 ) - SSD storage on cloud hosting costs about 0.1 USD per Gb per month - pub const ExistentialDeposit: Balance = 200_000_000_000_000 * SHANNON; + pub const ExistentialDeposit: Balance = 10_000_000_000_000 * SHANNON; } #[derive( diff --git a/docker/.dockerignore b/docker/.dockerignore index 983f1aefff..8c1b3a993a 100644 --- a/docker/.dockerignore +++ b/docker/.dockerignore @@ -1,7 +1,6 @@ * !/crates !/domains -!/orml !/shared !/test !/Cargo.lock diff --git a/docker/bootstrap-node.Dockerfile b/docker/bootstrap-node.Dockerfile index 3ff6bb7caf..331a8f9420 100644 --- a/docker/bootstrap-node.Dockerfile +++ b/docker/bootstrap-node.Dockerfile @@ -55,17 +55,10 @@ RUN \ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain $RUSTC_VERSION && \ /root/.cargo/bin/rustup target add wasm32-unknown-unknown -COPY Cargo.lock /code/Cargo.lock -COPY Cargo.toml /code/Cargo.toml -COPY rust-toolchain.toml /code/rust-toolchain.toml - -COPY crates /code/crates -COPY domains /code/domains -COPY shared /code/shared -COPY test /code/test - # Up until this line all Rust images in this repo should be the same to share the same layers +COPY . /code + ARG TARGETVARIANT RUN \ diff --git a/docker/farmer.Dockerfile b/docker/farmer.Dockerfile index 7c637abfaa..0c88b2e6b8 100644 --- a/docker/farmer.Dockerfile +++ b/docker/farmer.Dockerfile @@ -55,15 +55,6 @@ RUN \ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain $RUSTC_VERSION && \ /root/.cargo/bin/rustup target add wasm32-unknown-unknown -COPY Cargo.lock /code/Cargo.lock -COPY Cargo.toml /code/Cargo.toml -COPY rust-toolchain.toml /code/rust-toolchain.toml - -COPY crates /code/crates -COPY domains /code/domains -COPY shared /code/shared -COPY test /code/test - # Up until this line all Rust images in this repo should be the same to share the same layers # CUDA toolchain, including support for cross-compilation from x86-64 to aarch64, but NOT from aarch64 to x86-64 @@ -104,6 +95,8 @@ RUN \ ldconfig \ ; fi +COPY . /code + ARG TARGETVARIANT # ROCm is only used on x86-64 since they don't have other packages diff --git a/docker/gateway.Dockerfile b/docker/gateway.Dockerfile index 85f4c62f1c..f9a24df5fe 100644 --- a/docker/gateway.Dockerfile +++ b/docker/gateway.Dockerfile @@ -55,17 +55,10 @@ RUN \ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain $RUSTC_VERSION && \ /root/.cargo/bin/rustup target add wasm32-unknown-unknown -COPY Cargo.lock /code/Cargo.lock -COPY Cargo.toml /code/Cargo.toml -COPY rust-toolchain.toml /code/rust-toolchain.toml - -COPY crates /code/crates -COPY domains /code/domains -COPY shared /code/shared -COPY test /code/test - # Up until this line all Rust images in this repo should be the same to share the same layers +COPY . /code + ARG TARGETVARIANT RUN \ diff --git a/docker/node.Dockerfile b/docker/node.Dockerfile index 1342f9a3bc..395ebf0989 100644 --- a/docker/node.Dockerfile +++ b/docker/node.Dockerfile @@ -55,17 +55,10 @@ RUN \ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain $RUSTC_VERSION && \ /root/.cargo/bin/rustup target add wasm32-unknown-unknown -COPY Cargo.lock /code/Cargo.lock -COPY Cargo.toml /code/Cargo.toml -COPY rust-toolchain.toml /code/rust-toolchain.toml - -COPY crates /code/crates -COPY domains /code/domains -COPY shared /code/shared -COPY test /code/test - # Up until this line all Rust images in this repo should be the same to share the same layers +COPY . /code + ARG SUBSTRATE_CLI_GIT_COMMIT_HASH ARG TARGETVARIANT diff --git a/docker/runtime.Dockerfile b/docker/runtime.Dockerfile index cdd02c4d44..7c56113a29 100644 --- a/docker/runtime.Dockerfile +++ b/docker/runtime.Dockerfile @@ -55,17 +55,10 @@ RUN \ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain $RUSTC_VERSION && \ /root/.cargo/bin/rustup target add wasm32-unknown-unknown -COPY Cargo.lock /code/Cargo.lock -COPY Cargo.toml /code/Cargo.toml -COPY rust-toolchain.toml /code/rust-toolchain.toml - -COPY crates /code/crates -COPY domains /code/domains -COPY shared /code/shared -COPY test /code/test - # Up until this line all Rust images in this repo should be the same to share the same layers +COPY . /code + ARG TARGETVARIANT RUN \ diff --git a/domains/client/cross-domain-message-gossip/src/aux_schema.rs b/domains/client/cross-domain-message-gossip/src/aux_schema.rs index b2fa4ae0de..60d9d72001 100644 --- a/domains/client/cross-domain-message-gossip/src/aux_schema.rs +++ b/domains/client/cross-domain-message-gossip/src/aux_schema.rs @@ -1,9 +1,10 @@ //! Schema for channel update storage. -use crate::message_listener::LOG_TARGET; +use crate::RELAYER_PREFIX; use parity_scale_codec::{Decode, Encode}; use sc_client_api::backend::AuxStore; use sp_blockchain::{Error as ClientError, Info, Result as ClientResult}; +use sp_core::bytes::to_hex; use sp_core::H256; use sp_messenger::messages::{ChainId, ChannelId, ChannelState, Nonce}; use sp_messenger::{ChannelNonce, XdmId}; @@ -11,6 +12,7 @@ use sp_runtime::traits::{Block as BlockT, NumberFor}; use subspace_runtime_primitives::BlockNumber; const CHANNEL_DETAIL: &[u8] = b"channel_detail"; +const LOG_TARGET: &str = "gossip_aux_schema"; fn channel_detail_key( src_chain_id: ChainId, @@ -58,8 +60,8 @@ pub struct ChannelDetail { /// Load the channel state of self_chain_id on chain_id. pub fn get_channel_state( backend: &Backend, - self_chain_id: ChainId, - chain_id: ChainId, + dst_chain_id: ChainId, + src_chain_id: ChainId, channel_id: ChannelId, ) -> ClientResult> where @@ -67,15 +69,15 @@ where { load_decode( backend, - channel_detail_key(chain_id, self_chain_id, channel_id).as_slice(), + channel_detail_key(src_chain_id, dst_chain_id, channel_id).as_slice(), ) } /// Set the channel state of self_chain_id on chain_id. pub fn set_channel_state( backend: &Backend, - self_chain_id: ChainId, - chain_id: ChainId, + dst_chain_id: ChainId, + src_chain_id: ChainId, channel_detail: ChannelDetail, ) -> ClientResult<()> where @@ -83,10 +85,23 @@ where { backend.insert_aux( &[( - channel_detail_key(chain_id, self_chain_id, channel_detail.channel_id).as_slice(), + channel_detail_key(src_chain_id, dst_chain_id, channel_detail.channel_id).as_slice(), channel_detail.encode().as_slice(), )], vec![], + )?; + + let channel_nonce = ChannelNonce { + relay_msg_nonce: Some(channel_detail.next_inbox_nonce), + relay_response_msg_nonce: channel_detail.latest_response_received_message_nonce, + }; + let prefix = (RELAYER_PREFIX, src_chain_id).encode(); + cleanup_chain_channel_storages( + backend, + &prefix, + src_chain_id, + channel_detail.channel_id, + channel_nonce, ) } @@ -101,25 +116,36 @@ mod xdm_keys { const XDM_RELAY_RESPONSE: &[u8] = b"relay_msg_response"; const XDM_LAST_CLEANUP_NONCE: &[u8] = b"xdm_last_cleanup_nonce"; - pub(super) fn get_key_for_xdm_id(xdm_id: XdmId) -> Vec { + pub(super) fn get_key_for_xdm_id(prefix: &[u8], xdm_id: XdmId) -> Vec { match xdm_id { - XdmId::RelayMessage(id) => get_key_for_xdm_relay(id), - XdmId::RelayResponseMessage(id) => get_key_for_xdm_relay_response(id), + XdmId::RelayMessage(id) => get_key_for_xdm_relay(prefix, id), + XdmId::RelayResponseMessage(id) => get_key_for_xdm_relay_response(prefix, id), } } pub(super) fn get_key_for_last_cleanup_relay_nonce( + prefix: &[u8], chain_id: ChainId, channel_id: ChannelId, ) -> Vec { - (XDM, XDM_RELAY, XDM_LAST_CLEANUP_NONCE, chain_id, channel_id).encode() + ( + prefix, + XDM, + XDM_RELAY, + XDM_LAST_CLEANUP_NONCE, + chain_id, + channel_id, + ) + .encode() } pub(super) fn get_key_for_last_cleanup_relay_response_nonce( + prefix: &[u8], chain_id: ChainId, channel_id: ChannelId, ) -> Vec { ( + prefix, XDM, XDM_RELAY_RESPONSE, XDM_LAST_CLEANUP_NONCE, @@ -129,19 +155,19 @@ mod xdm_keys { .encode() } - pub(super) fn get_key_for_xdm_relay(id: MessageKey) -> Vec { - (XDM, XDM_RELAY, id).encode() + pub(super) fn get_key_for_xdm_relay(prefix: &[u8], id: MessageKey) -> Vec { + (prefix, XDM, XDM_RELAY, id).encode() } - pub(super) fn get_key_for_xdm_relay_response(id: MessageKey) -> Vec { - (XDM, XDM_RELAY_RESPONSE, id).encode() + pub(super) fn get_key_for_xdm_relay_response(prefix: &[u8], id: MessageKey) -> Vec { + (prefix, XDM, XDM_RELAY_RESPONSE, id).encode() } } #[derive(Debug, Encode, Decode, Clone)] -pub(super) struct BlockId { - pub(super) number: NumberFor, - pub(super) hash: Block::Hash, +pub struct BlockId { + pub number: NumberFor, + pub hash: Block::Hash, } impl From> for BlockId { @@ -156,6 +182,7 @@ impl From> for BlockId { /// Store the given XDM ID as processed at given block. pub fn set_xdm_message_processed_at( backend: &Backend, + prefix: &[u8], xdm_id: XdmId, block_id: BlockId, ) -> ClientResult<()> @@ -163,25 +190,30 @@ where Backend: AuxStore, Block: BlockT, { - let key = xdm_keys::get_key_for_xdm_id(xdm_id); + let key = xdm_keys::get_key_for_xdm_id(prefix, xdm_id); backend.insert_aux(&[(key.as_slice(), block_id.encode().as_slice())], vec![]) } /// Returns the maybe last processed block number for given xdm. pub fn get_xdm_processed_block_number( backend: &Backend, + prefix: &[u8], xdm_id: XdmId, ) -> ClientResult>> where Backend: AuxStore, Block: BlockT, { - load_decode(backend, xdm_keys::get_key_for_xdm_id(xdm_id).as_slice()) + load_decode( + backend, + xdm_keys::get_key_for_xdm_id(prefix, xdm_id).as_slice(), + ) } /// Cleans up all the xdm storages until the latest nonces. pub fn cleanup_chain_channel_storages( backend: &Backend, + prefix: &[u8], chain_id: ChainId, channel_id: ChannelId, channel_nonce: ChannelNonce, @@ -193,7 +225,7 @@ where let mut to_delete = vec![]; if let Some(latest_relay_nonce) = channel_nonce.relay_msg_nonce { let last_cleanup_relay_nonce_key = - xdm_keys::get_key_for_last_cleanup_relay_nonce(chain_id, channel_id); + xdm_keys::get_key_for_last_cleanup_relay_nonce(prefix, chain_id, channel_id); let last_cleaned_up_nonce = load_decode::<_, Nonce>(backend, last_cleanup_relay_nonce_key.as_slice())?; @@ -204,7 +236,8 @@ where tracing::debug!( target: LOG_TARGET, - "Cleaning Relay xdm keys for {:?} channel: {:?} from: {:?} to: {:?}", + "[{:?}]Cleaning Relay xdm keys for {:?} channel: {:?} from: {:?} to: {:?}", + to_hex(prefix, false), chain_id, channel_id, from_nonce, @@ -212,9 +245,10 @@ where ); while from_nonce <= latest_relay_nonce { - to_delete.push(xdm_keys::get_key_for_xdm_relay(( - chain_id, channel_id, from_nonce, - ))); + to_delete.push(xdm_keys::get_key_for_xdm_relay( + prefix, + (chain_id, channel_id, from_nonce), + )); from_nonce = from_nonce.saturating_add(Nonce::one()); } @@ -223,7 +257,7 @@ where if let Some(latest_relay_response_nonce) = channel_nonce.relay_response_msg_nonce { let last_cleanup_relay_response_nonce_key = - xdm_keys::get_key_for_last_cleanup_relay_response_nonce(chain_id, channel_id); + xdm_keys::get_key_for_last_cleanup_relay_response_nonce(prefix, chain_id, channel_id); let last_cleaned_up_nonce = load_decode::<_, Nonce>(backend, last_cleanup_relay_response_nonce_key.as_slice())?; @@ -234,7 +268,8 @@ where tracing::debug!( target: LOG_TARGET, - "Cleaning Relay response xdm keys for {:?} channel: {:?} from: {:?} to: {:?}", + "[{:?}]Cleaning Relay response xdm keys for {:?} channel: {:?} from: {:?} to: {:?}", + to_hex(prefix, false), chain_id, channel_id, from_nonce, @@ -242,9 +277,10 @@ where ); while from_nonce <= latest_relay_response_nonce { - to_delete.push(xdm_keys::get_key_for_xdm_relay_response(( - chain_id, channel_id, from_nonce, - ))); + to_delete.push(xdm_keys::get_key_for_xdm_relay_response( + prefix, + (chain_id, channel_id, from_nonce), + )); from_nonce = from_nonce.saturating_add(Nonce::one()); } diff --git a/domains/client/cross-domain-message-gossip/src/lib.rs b/domains/client/cross-domain-message-gossip/src/lib.rs index 2f2feb7e07..541345727f 100644 --- a/domains/client/cross-domain-message-gossip/src/lib.rs +++ b/domains/client/cross-domain-message-gossip/src/lib.rs @@ -5,9 +5,14 @@ mod aux_schema; mod gossip_worker; mod message_listener; -pub use aux_schema::{get_channel_state, set_channel_state, ChannelDetail}; +pub use aux_schema::{ + get_channel_state, get_xdm_processed_block_number, set_channel_state, + set_xdm_message_processed_at, BlockId, ChannelDetail, +}; pub use gossip_worker::{ xdm_gossip_peers_set_config, ChainMsg, ChainSink, ChannelUpdate, GossipWorker, GossipWorkerBuilder, Message, MessageData, }; -pub use message_listener::start_cross_chain_message_listener; +pub use message_listener::{ + can_allow_xdm_submission, start_cross_chain_message_listener, RELAYER_PREFIX, +}; diff --git a/domains/client/cross-domain-message-gossip/src/message_listener.rs b/domains/client/cross-domain-message-gossip/src/message_listener.rs index 2243990b25..11359d9f0a 100644 --- a/domains/client/cross-domain-message-gossip/src/message_listener.rs +++ b/domains/client/cross-domain-message-gossip/src/message_listener.rs @@ -31,6 +31,9 @@ use subspace_runtime_primitives::{Balance, BlockNumber}; use thiserror::Error; pub(crate) const LOG_TARGET: &str = "domain_message_listener"; +const TX_POOL_PREFIX: &[u8] = b"xdm_tx_pool_listener"; +pub const RELAYER_PREFIX: &[u8] = b"xdm_relayer"; + /// Number of blocks an already submitted XDM is not accepted since last submission. const XDM_ACCEPT_BLOCK_LIMIT: u32 = 5; @@ -476,7 +479,7 @@ where Ok(()) } -fn can_allow_xdm_submission( +pub fn can_allow_xdm_submission( client: &Arc, xdm_id: XdmId, submitted_block_id: BlockId, @@ -564,7 +567,7 @@ where runtime_api.channel_nonce(block_id.hash, src_chain_id, channel_id)?; if let Some(submitted_block_id) = - get_xdm_processed_block_number::<_, BlockOf>(&**client, xdm_id)? + get_xdm_processed_block_number::<_, BlockOf>(&**client, TX_POOL_PREFIX, xdm_id)? && !can_allow_xdm_submission( client, xdm_id, @@ -614,11 +617,17 @@ where block_id ); - set_xdm_message_processed_at(&**client, xdm_id, block_id)?; + set_xdm_message_processed_at(&**client, TX_POOL_PREFIX, xdm_id, block_id)?; } if let Some(channel_nonce) = maybe_channel_nonce { - cleanup_chain_channel_storages(&**client, src_chain_id, channel_id, channel_nonce)?; + cleanup_chain_channel_storages( + &**client, + TX_POOL_PREFIX, + src_chain_id, + channel_id, + channel_nonce, + )?; } Ok(true) diff --git a/domains/client/domain-operator/src/fraud_proof.rs b/domains/client/domain-operator/src/fraud_proof.rs index 3402e69735..7236273d35 100644 --- a/domains/client/domain-operator/src/fraud_proof.rs +++ b/domains/client/domain-operator/src/fraud_proof.rs @@ -379,17 +379,10 @@ where let maybe_domain_runtime_code_proof = self.maybe_generate_domain_runtime_code_proof_for_receipt(domain_id, local_receipt)?; - let block_randomness_proof = BlockRandomnessProof::generate( - self.consensus_client.as_ref(), - consensus_block_hash, - (), - &self.storage_key_provider, - )?; - let maybe_runtime_id = - self.is_domain_runtime_updraded_at(domain_id, consensus_block_hash)?; + self.is_domain_runtime_upgraded_at(domain_id, consensus_block_hash)?; - let domain_inherent_extrinsic_data_proof = DomainInherentExtrinsicDataProof::generate( + let invalid_inherent_extrinsic_proof = InvalidInherentExtrinsicProof::generate( &self.storage_key_provider, self.consensus_client.as_ref(), domain_id, @@ -397,6 +390,13 @@ where maybe_runtime_id, )?; + let domain_sudo_call_proof = DomainSudoCallStorageProof::generate( + self.consensus_client.as_ref(), + consensus_block_hash, + domain_id, + &self.storage_key_provider, + )?; + let invalid_domain_extrinsics_root_proof = FraudProof { domain_id, bad_receipt_hash, @@ -404,15 +404,15 @@ where maybe_domain_runtime_code_proof, proof: FraudProofVariant::InvalidExtrinsicsRoot(InvalidExtrinsicsRootProof { valid_bundle_digests, - block_randomness_proof, - domain_inherent_extrinsic_data_proof, + invalid_inherent_extrinsic_proof, + domain_sudo_call_proof, }), }; Ok(invalid_domain_extrinsics_root_proof) } - pub fn is_domain_runtime_updraded_at( + pub fn is_domain_runtime_upgraded_at( &self, domain_id: DomainId, at: CBlock::Hash, diff --git a/domains/client/relayer/src/lib.rs b/domains/client/relayer/src/lib.rs index 59682c3d52..664bf113d6 100644 --- a/domains/client/relayer/src/lib.rs +++ b/domains/client/relayer/src/lib.rs @@ -7,18 +7,20 @@ pub mod worker; use async_channel::TrySendError; use cross_domain_message_gossip::{ - get_channel_state, Message as GossipMessage, MessageData as GossipMessageData, + can_allow_xdm_submission, get_channel_state, get_xdm_processed_block_number, + set_xdm_message_processed_at, BlockId, Message as GossipMessage, + MessageData as GossipMessageData, RELAYER_PREFIX, }; use parity_scale_codec::{Codec, Encode}; use sc_client_api::{AuxStore, HeaderBackend, ProofProvider, StorageProof}; use sc_utils::mpsc::TracingUnboundedSender; -use sp_api::ProvideRuntimeApi; +use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_core::H256; use sp_domains::DomainsApi; use sp_messenger::messages::{ BlockMessageWithStorageKey, BlockMessagesWithStorageKey, ChainId, CrossDomainMessage, Proof, }; -use sp_messenger::{MessengerApi, RelayerApi}; +use sp_messenger::{MessengerApi, RelayerApi, XdmId}; use sp_mmr_primitives::MmrApi; use sp_runtime::traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, One}; use sp_runtime::ArithmeticError; @@ -229,19 +231,176 @@ where .map_err(Error::UnableToSubmitCrossDomainMessage) } +fn check_and_update_recent_xdm_submission( + consensus_client: &Arc, + xdm_id: XdmId, + msg: &BlockMessageWithStorageKey, +) -> bool +where + CBlock: BlockT, + CClient: AuxStore + HeaderBackend, +{ + let prefix = (RELAYER_PREFIX, msg.src_chain_id).encode(); + let current_block_id: BlockId = consensus_client.info().into(); + if let Ok(Some(submitted_block_id)) = + get_xdm_processed_block_number::<_, CBlock>(&**consensus_client, &prefix, xdm_id) + { + if !can_allow_xdm_submission( + consensus_client, + xdm_id, + submitted_block_id, + current_block_id.clone(), + None, + ) { + log::debug!( + target: LOG_TARGET, + "Skipping already submitted message relay from {:?}: {:?}", + msg.src_chain_id, + xdm_id + ); + return false; + } + } + + if let Err(err) = + set_xdm_message_processed_at(&**consensus_client, &prefix, xdm_id, current_block_id) + { + log::error!( + target: LOG_TARGET, + "Failed to store submitted message from {:?} to {:?}: {:?}", + msg.src_chain_id, + xdm_id, + err + ); + } + + true +} + +fn should_relay_outbox_message( + consensus_client: &Arc, + api: &ApiRef<'_, Client::Api>, + best_hash: Block::Hash, + msg: &BlockMessageWithStorageKey, +) -> bool +where + Block: BlockT, + CBlock: BlockT, + Client: ProvideRuntimeApi, + CClient: AuxStore + HeaderBackend, + Client::Api: RelayerApi, NumberFor, CBlock::Hash>, +{ + let id = msg.id(); + match api.should_relay_outbox_message(best_hash, msg.dst_chain_id, id) { + Ok(true) => (), + Ok(false) => return false, + Err(err) => { + tracing::error!( + target: LOG_TARGET, + ?err, + "Failed to fetch validity of outbox message {id:?} for domain {0:?}", + msg.dst_chain_id + ); + return false; + } + }; + + if let Some(dst_channel_state) = get_channel_state( + &**consensus_client, + msg.dst_chain_id, + msg.src_chain_id, + msg.channel_id, + ) + .ok() + .flatten() + { + // if this message should relay, + // check if the dst_chain inbox nonce is more than message nonce, + // if so, skip relaying since message is already executed on dst_chain + let relay_message = msg.nonce >= dst_channel_state.next_inbox_nonce; + if !relay_message { + log::debug!( + target: LOG_TARGET, + "Skipping message relay from {:?} to {:?}", + msg.src_chain_id, + msg.dst_chain_id, + ); + return false; + } + } + + let xdm_id = XdmId::RelayMessage((msg.dst_chain_id, msg.channel_id, msg.nonce)); + check_and_update_recent_xdm_submission(consensus_client, xdm_id, msg) +} + +fn should_relay_inbox_responses_message( + consensus_client: &Arc, + api: &ApiRef<'_, Client::Api>, + best_hash: Block::Hash, + msg: &BlockMessageWithStorageKey, +) -> bool +where + Block: BlockT, + CBlock: BlockT, + Client: ProvideRuntimeApi, + CClient: AuxStore + HeaderBackend, + Client::Api: RelayerApi, NumberFor, CBlock::Hash>, +{ + let id = msg.id(); + match api.should_relay_inbox_message_response(best_hash, msg.dst_chain_id, id) { + Ok(true) => (), + Ok(false) => return false, + Err(err) => { + tracing::error!( + target: LOG_TARGET, + ?err, + "Failed to fetch validity of inbox message response {id:?} for domain {0:?}", + msg.dst_chain_id + ); + return false; + } + }; + + if let Some(dst_channel_state) = get_channel_state( + &**consensus_client, + msg.dst_chain_id, + msg.src_chain_id, + msg.channel_id, + ) + .ok() + .flatten() + && let Some(dst_chain_outbox_response_nonce) = + dst_channel_state.latest_response_received_message_nonce + { + // relay inbox response if the dst_chain did not execute is already + let relay_message = msg.nonce > dst_chain_outbox_response_nonce; + if !relay_message { + log::debug!( + target: LOG_TARGET, + "Skipping message relay from {:?} to {:?}", + msg.src_chain_id, + msg.dst_chain_id, + ); + return false; + } + } + + let xdm_id = XdmId::RelayResponseMessage((msg.dst_chain_id, msg.channel_id, msg.nonce)); + check_and_update_recent_xdm_submission(consensus_client, xdm_id, msg) +} + // Fetch the XDM at the a given block and filter any already relayed XDM according to the best block -fn fetch_and_filter_messages( +fn fetch_and_filter_messages( client: &Arc, fetch_message_at: Block::Hash, consensus_client: &Arc, ) -> Result where - CNumber: Codec, - CHash: Codec, - CClient: AuxStore, + CBlock: BlockT, + CClient: AuxStore + HeaderBackend, Block: BlockT, Client: ProvideRuntimeApi + HeaderBackend, - Client::Api: RelayerApi, CNumber, CHash>, + Client::Api: RelayerApi, NumberFor, CBlock::Hash>, { let mut msgs = client .runtime_api() @@ -251,91 +410,16 @@ where let api = client.runtime_api(); let best_hash = client.info().best_hash; msgs.outbox.retain(|msg| { - let id = msg.id(); - let should_relay = match api.should_relay_outbox_message(best_hash, msg.dst_chain_id, id) { - Ok(valid) => valid, - Err(err) => { - tracing::error!( - target: LOG_TARGET, - ?err, - "Failed to fetch validity of outbox message {id:?} for domain {0:?}", - msg.dst_chain_id - ); - false - } - }; - - if should_relay - && let Some(dst_channel_state) = get_channel_state( - &**consensus_client, - msg.dst_chain_id, - msg.src_chain_id, - msg.channel_id, - ) - .ok() - .flatten() - { - // if this message should relay, - // check if the dst_chain inbox nonce is more than message nonce, - // if so, skip relaying since message is already executed on dst_chain - let relay_message = msg.nonce >= dst_channel_state.next_inbox_nonce; - if !relay_message { - log::debug!( - "Skipping message relay from {:?} to {:?}", - msg.src_chain_id, - msg.dst_chain_id, - ); - } - relay_message - } else { - should_relay - } + should_relay_outbox_message::(consensus_client, &api, best_hash, msg) }); msgs.inbox_responses.retain(|msg| { - let id = msg.id(); - let should_relay = match api.should_relay_inbox_message_response( + should_relay_inbox_responses_message::( + consensus_client, + &api, best_hash, - msg.dst_chain_id, - id, - ) { - Ok(valid) => valid, - Err(err) => { - tracing::error!( - target: LOG_TARGET, - ?err, - "Failed to fetch validity of inbox message response {id:?} for domain {0:?}", - msg.dst_chain_id - ); - false - } - }; - - if should_relay - && let Some(dst_channel_state) = get_channel_state( - &**consensus_client, - msg.dst_chain_id, - msg.src_chain_id, - msg.channel_id, - ) - .ok() - .flatten() - && let Some(dst_chain_outbox_response_nonce) = - dst_channel_state.latest_response_received_message_nonce - { - // relay inbox response if the dst_chain did not execute is already - let relay_message = msg.nonce > dst_chain_outbox_response_nonce; - if !relay_message { - log::debug!( - "Skipping message relay from {:?} to {:?}", - msg.src_chain_id, - msg.dst_chain_id, - ); - } - relay_message - } else { - should_relay - } + msg, + ) }); Ok(msgs) @@ -397,11 +481,17 @@ where "Checking messages to be submitted from chain: {chain_id:?} at block: ({to_process_consensus_number:?}, {to_process_consensus_hash:?})", ); - let xdm_proof_data = match chain_id { - ChainId::Consensus => XDMProofData::Consensus(to_process_consensus_hash), + let consensus_chain_api = consensus_chain_client.runtime_api(); + let (block_messages, maybe_domain_data) = match chain_id { + ChainId::Consensus => ( + fetch_and_filter_messages::<_, _, _, CBlock>( + consensus_chain_client, + to_process_consensus_hash, + consensus_chain_client, + )?, + None, + ), ChainId::Domain(domain_id) => { - let consensus_chain_api = consensus_chain_client.runtime_api(); - let confirmed_domain_block_hash = { match consensus_chain_api .latest_confirmed_domain_block(to_process_consensus_hash, domain_id)? @@ -411,7 +501,25 @@ where None => return Ok(()), } }; + ( + fetch_and_filter_messages::<_, _, _, CBlock>( + domain_client, + confirmed_domain_block_hash, + consensus_chain_client, + )?, + Some((domain_id, confirmed_domain_block_hash)), + ) + } + }; + // short circuit if the there are no messages to relay + if block_messages.is_empty() { + return Ok(()); + } + + let xdm_proof_data = match maybe_domain_data { + None => XDMProofData::Consensus(to_process_consensus_hash), + Some((domain_id, confirmed_domain_block_hash)) => { let storage_key = consensus_chain_api .confirmed_domain_block_storage_key(to_process_consensus_hash, domain_id)?; @@ -427,28 +535,6 @@ where } }; - // Fetch messages to be relayed and filter out already relayed messages - let block_messages = match &xdm_proof_data { - XDMProofData::Consensus(consensus_hash) => fetch_and_filter_messages( - consensus_chain_client, - *consensus_hash, - consensus_chain_client, - )?, - XDMProofData::Domain { - confirmed_domain_block_hash, - .. - } => fetch_and_filter_messages( - domain_client, - *confirmed_domain_block_hash, - consensus_chain_client, - )?, - }; - - // short circuit if the there are no messages to relay - if block_messages.is_empty() { - return Ok(()); - } - construct_cross_chain_message_and_submit::, CBlock::Hash, _, _>( block_messages.outbox, |key, dst_chain_id| { diff --git a/domains/primitives/runtime/src/lib.rs b/domains/primitives/runtime/src/lib.rs index e91b79cfe3..e8989b499f 100644 --- a/domains/primitives/runtime/src/lib.rs +++ b/domains/primitives/runtime/src/lib.rs @@ -93,8 +93,13 @@ pub fn maximum_block_length() -> BlockLength { BlockLength::max_with_normal_ratio(MAX_BLOCK_LENGTH, NORMAL_DISPATCH_RATIO) } -/// The existential deposit. Same with the one on primary chain. -pub const EXISTENTIAL_DEPOSIT: Balance = 500 * SHANNON; +/// Computed as ED = Account data size * Price per byte, where +/// Price per byte = Min Number of validators * Storage duration (years) * Storage cost per year +/// Account data size (80 bytes) +/// Min Number of redundant validators (10) - For a stable and redundant blockchain we need at least a certain number of full nodes/collators. +/// Storage duration (1 year) - It is theoretically unlimited, accounts will stay around while the chain is alive. +/// Storage cost per year of (12 * 1e-9 * 0.1 ) - SSD storage on cloud hosting costs about 0.1 USD per Gb per month +pub const EXISTENTIAL_DEPOSIT: Balance = 1_000_000_000_000 * SHANNON; /// We assume that ~5% of the block weight is consumed by `on_initialize` handlers. This is /// used to limit the maximal weight of a single extrinsic. diff --git a/shared/subspace-data-retrieval/src/object_fetcher.rs b/shared/subspace-data-retrieval/src/object_fetcher.rs index b89ad8c007..ed47611346 100644 --- a/shared/subspace-data-retrieval/src/object_fetcher.rs +++ b/shared/subspace-data-retrieval/src/object_fetcher.rs @@ -278,18 +278,22 @@ where .read_piece(next_source_piece_index, piece_index, piece_offset) .await?; next_source_piece_index = next_source_piece_index.next_source_index(); - read_records_data.extend(piece.record().to_raw_record_chunks().flatten().copied()); + // Discard piece data before the offset + read_records_data.extend( + piece + .record() + .to_raw_record_chunks() + .flatten() + .skip(piece_offset as usize) + .copied(), + ); if last_data_piece_in_segment { // The last 2 bytes might contain segment padding, so we can't use them for object length or object data. - read_records_data.truncate(RawRecord::SIZE - 2); + read_records_data.truncate(read_records_data.len() - 2); } - let data_length = self.decode_data_length( - &read_records_data[piece_offset as usize..], - piece_index, - piece_offset, - )?; + let data_length = self.decode_data_length(&read_records_data, piece_index, piece_offset)?; let data_length = if let Some(data_length) = data_length { data_length @@ -311,12 +315,8 @@ where next_source_piece_index = next_source_piece_index.next_source_index(); read_records_data.extend(piece.record().to_raw_record_chunks().flatten().copied()); - self.decode_data_length( - &read_records_data[piece_offset as usize..], - piece_index, - piece_offset, - )? - .expect("Extra RawRecord is larger than the length encoding; qed") + self.decode_data_length(&read_records_data, piece_index, piece_offset)? + .expect("Extra RawRecord is larger than the length encoding; qed") } else { trace!( piece_position_in_segment, @@ -347,14 +347,10 @@ where return Ok(None); } - // Discard piece data before the offset - let mut data = read_records_data[piece_offset as usize..].to_vec(); - drop(read_records_data); - // Read more pieces until we have enough data - if data_length as usize > data.len() { + if data_length as usize > read_records_data.len() { let remaining_piece_count = - (data_length as usize - data.len()).div_ceil(RawRecord::SIZE); + (data_length as usize - read_records_data.len()).div_ceil(RawRecord::SIZE); let remaining_piece_indexes = (next_source_piece_index..) .filter(|i| i.is_source()) .take(remaining_piece_count) @@ -363,14 +359,15 @@ where .await? .into_iter() .for_each(|piece| { - data.extend(piece.record().to_raw_record_chunks().flatten().copied()) + read_records_data + .extend(piece.record().to_raw_record_chunks().flatten().copied()) }); } // Decode the data, and return it if it's valid - let data = Vec::::decode(&mut data.as_slice())?; + let read_records_data = Vec::::decode(&mut read_records_data.as_slice())?; - Ok(Some(data)) + Ok(Some(read_records_data)) } /// Fetch and assemble an object that can cross segment boundaries, which requires assembling diff --git a/test/subspace-test-client/src/chain_spec.rs b/test/subspace-test-client/src/chain_spec.rs index 704a7d9a18..2dd67b93d3 100644 --- a/test/subspace-test-client/src/chain_spec.rs +++ b/test/subspace-test-client/src/chain_spec.rs @@ -5,10 +5,10 @@ use sp_core::{sr25519, Pair, Public}; use sp_runtime::traits::{IdentifyAccount, Verify}; use std::marker::PhantomData; use std::num::NonZeroU32; -use subspace_runtime_primitives::{AccountId, Balance, Signature}; +use subspace_runtime_primitives::{AccountId, Balance, Signature, SSC}; use subspace_test_runtime::{ AllowAuthoringBy, BalancesConfig, DomainsConfig, EnableRewardsAt, RewardsConfig, - RuntimeGenesisConfig, SubspaceConfig, SudoConfig, SystemConfig, SSC, WASM_BINARY, + RuntimeGenesisConfig, SubspaceConfig, SudoConfig, SystemConfig, WASM_BINARY, }; /// Generate a crypto pair from seed. diff --git a/test/subspace-test-runtime/src/lib.rs b/test/subspace-test-runtime/src/lib.rs index 89289f6101..4ca1fd72e7 100644 --- a/test/subspace-test-runtime/src/lib.rs +++ b/test/subspace-test-runtime/src/lib.rs @@ -100,7 +100,7 @@ use subspace_core_primitives::solutions::SolutionRange; use subspace_core_primitives::{hashes, PublicKey, Randomness, SlotNumber, U256}; use subspace_runtime_primitives::{ AccountId, Balance, BlockNumber, FindBlockRewardAddress, Hash, HoldIdentifier, Moment, Nonce, - Signature, MIN_REPLICATION_FACTOR, + Signature, MIN_REPLICATION_FACTOR, SHANNON, SSC, }; sp_runtime::impl_opaque_keys! { @@ -130,13 +130,6 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { system_version: 2, }; -/// The smallest unit of the token is called Shannon. -pub const SHANNON: Balance = 1; -/// Subspace Credits have 18 decimal places. -pub const DECIMAL_PLACES: u8 = 18; -/// One Subspace Credit. -pub const SSC: Balance = (10 * SHANNON).pow(DECIMAL_PLACES as u32); - // TODO: Many of below constants should probably be updatable but currently they are not /// Expected block time in milliseconds. @@ -360,8 +353,7 @@ impl pallet_balances::Config for Runtime { /// The ubiquitous event type. type RuntimeEvent = RuntimeEvent; type DustRemoval = (); - // TODO: Correct value - type ExistentialDeposit = ConstU128<{ 500 * SHANNON }>; + type ExistentialDeposit = ConstU128<{ 10_000_000_000_000 * SHANNON }>; type AccountStore = System; type WeightInfo = pallet_balances::weights::SubstrateWeight; type FreezeIdentifier = ();