From dd0dcf48b849d2a6906959067609b484679a836b Mon Sep 17 00:00:00 2001 From: Yeou Date: Tue, 19 Sep 2023 17:04:22 +0800 Subject: [PATCH 1/3] remove unused code and comments --- node/Cargo.toml | 8 ----- runtime/src/constants.rs | 59 -------------------------------- runtime/src/lib.rs | 73 ++++++++++++++++++++++------------------ 3 files changed, 41 insertions(+), 99 deletions(-) delete mode 100755 runtime/src/constants.rs diff --git a/node/Cargo.toml b/node/Cargo.toml index 027842bb..e60f60f8 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -38,9 +38,7 @@ sp-inherents = { version = "4.0.0-dev", git = "https://github.com/CESSProject/su sp-keyring = { version = "7.0.0", git = "https://github.com/CESSProject/substrate.git", branch = "cess-polkadot-v0.9.36" } sp-keystore = { version = "0.13.0", git = "https://github.com/CESSProject/substrate.git", branch = "cess-polkadot-v0.9.36" } sp-consensus = { version = "0.10.0-dev", git = "https://github.com/CESSProject/substrate.git", branch = "cess-polkadot-v0.9.36" } -# sp-transaction-pool sp-transaction-storage-proof = { version = "4.0.0-dev", git = "https://github.com/CESSProject/substrate.git", branch = "cess-polkadot-v0.9.36" } -# sp-io # client dependencies sc-client-api = { version = "4.0.0-dev", git = "https://github.com/CESSProject/substrate.git", branch = "cess-polkadot-v0.9.36" } @@ -65,21 +63,15 @@ sc-sysinfo = { version = "6.0.0-dev", git = "https://github.com/CESSProject/subs # frame dependencies frame-system = { version = "4.0.0-dev", git = "https://github.com/CESSProject/substrate.git", branch = "cess-polkadot-v0.9.36" } -# frame-system-rpc-runtime-api = { version = "4.0.0-dev", git = "https://github.com/CESSProject/substrate.git", branch = "cess-polkadot-v0.9.36" } pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, git = "https://github.com/CESSProject/substrate.git", branch = "cess-polkadot-v0.9.36" } -# pallet-assets -# pallet-asset-tx-payment pallet-im-online = { version = "4.0.0-dev", git = "https://github.com/CESSProject/substrate.git", branch = "cess-polkadot-v0.9.36" } # node-specific dependencies cess-node-runtime = { path = "../runtime" } -# node-rpc -# node-primitives # CLI-specific dependencies sc-cli = { version = "0.10.0-dev", git = "https://github.com/CESSProject/substrate.git", branch = "cess-polkadot-v0.9.36" } frame-benchmarking-cli = { version = "4.0.0-dev", git = "https://github.com/CESSProject/substrate.git", branch = "cess-polkadot-v0.9.36" } -# node-inspect try-runtime-cli = { version = "0.10.0-dev", optional = true, git = "https://github.com/CESSProject/substrate.git", branch = "cess-polkadot-v0.9.36" } serde_json = "1.0.85" diff --git a/runtime/src/constants.rs b/runtime/src/constants.rs deleted file mode 100755 index 42ee46d1..00000000 --- a/runtime/src/constants.rs +++ /dev/null @@ -1,59 +0,0 @@ -pub mod currency { - /// Balance of an account. - pub type Balance = u128; - - pub const MILLICENTS: Balance = 1_000_000_000; - pub const CENTS: Balance = 1_000 * MILLICENTS; // assume this is worth about a cent. - pub const DOLLARS: Balance = 100 * CENTS; - - pub const fn deposit(items: u32, bytes: u32) -> Balance { - items as Balance * 15 * CENTS + (bytes as Balance) * 6 * CENTS - } -} - -/// Time. -pub mod time { - pub type BlockNumber = u32; - pub type Moment = u64; - - /// Since BABE is probabilistic this is the average expected block time that - /// we are targeting. Blocks will be produced at a minimum duration defined - /// by `SLOT_DURATION`, but some slots will not be allocated to any - /// authority and hence no block will be produced. We expect to have this - /// block time on average following the defined slot duration and the value - /// of `c` configured for BABE (where `1 - c` represents the probability of - /// a slot being empty). - /// This value is only used indirectly to define the unit constants below - /// that are expressed in blocks. The rest of the code should use - /// `SLOT_DURATION` instead (like the Timestamp pallet for calculating the - /// minimum period). - /// - /// If using BABE with secondary slots (default) then all of the slots will - /// always be assigned, in which case `MILLISECS_PER_BLOCK` and - /// `SLOT_DURATION` should have the same value. - /// - /// - pub const MILLISECS_PER_BLOCK: Moment = 3000; - pub const SECS_PER_BLOCK: Moment = MILLISECS_PER_BLOCK / 1000; - - // NOTE: Currently it is not possible to change the slot duration after the chain has started. - // Attempting to do so will brick block production. - pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; - - // 1 in 4 blocks (on average, not counting collisions) will be primary BABE blocks. - pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); - - // NOTE: Currently it is not possible to change the epoch duration after the chain has started. - // Attempting to do so will brick block production. - pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 10 * MINUTES; - pub const EPOCH_DURATION_IN_SLOTS: u64 = { - const SLOT_FILL_RATE: f64 = MILLISECS_PER_BLOCK as f64 / SLOT_DURATION as f64; - - (EPOCH_DURATION_IN_BLOCKS as f64 * SLOT_FILL_RATE) as u64 - }; - - // These time units are defined in number of blocks. - pub const MINUTES: BlockNumber = 60 / (SECS_PER_BLOCK as BlockNumber); - pub const HOURS: BlockNumber = MINUTES * 60; - pub const DAYS: BlockNumber = HOURS * 24; -} diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 88a16728..63ac3db6 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -16,18 +16,18 @@ pub fn wasm_binary_unwrap() -> &'static [u8] { ) } -use codec::{Decode, Encode, /*MaxEncodedLen*/}; -use frame_election_provider_support::{ - onchain, ExtendedBalance, ElectionDataProvider, VoteWeight -}; +use codec::{Decode, Encode}; +use cp_cess_common::FRAGMENT_COUNT; +use frame_election_provider_support::{onchain, ElectionDataProvider, ExtendedBalance, VoteWeight}; +pub use frame_system::Call as SystemCall; pub use pallet_file_bank; -pub use pallet_storage_handler; -pub use pallet_oss; use pallet_grandpa::{ fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, }; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; +pub use pallet_oss; use pallet_session::historical as pallet_session_historical; +pub use pallet_storage_handler; pub use pallet_transaction_payment::{CurrencyAdapter, Multiplier, TargetedFeeAdjustment}; use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; use sp_api::impl_runtime_apis; @@ -39,8 +39,9 @@ use sp_runtime::{ generic::Era, impl_opaque_keys, traits::{ - BlakeTwo256, Block as BlockT, Bounded, ConvertInto, Dispatchable, DispatchInfoOf, IdentifyAccount, NumberFor, - OpaqueKeys, PostDispatchInfoOf, SaturatedConversion, StaticLookup, Verify, + BlakeTwo256, Block as BlockT, Bounded, ConvertInto, DispatchInfoOf, Dispatchable, + IdentifyAccount, NumberFor, OpaqueKeys, PostDispatchInfoOf, SaturatedConversion, + StaticLookup, Verify, }, transaction_validity::{ TransactionPriority, TransactionSource, TransactionValidity, TransactionValidityError, @@ -48,9 +49,7 @@ use sp_runtime::{ ApplyExtrinsicResult, FixedPointNumber, MultiSignature, Perbill, Percent, Permill, Perquintill, RuntimeAppPublic, }; -use cp_cess_common::{FRAGMENT_COUNT}; use sp_std::{marker::PhantomData, prelude::*}; -pub use frame_system::Call as SystemCall; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -58,17 +57,19 @@ use sp_version::RuntimeVersion; // A few exports that help ease life for downstream crates. pub use frame_support::{ construct_runtime, + dispatch::DispatchClass, pallet_prelude::Get, parameter_types, - dispatch::DispatchClass, traits::{ AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, ConstU8, Currency, CurrencyToVote, EitherOfDiverse, EqualPrivilegeOnly, Everything, FindAuthor, Imbalance, - InstanceFilter, KeyOwnerProofSystem, Nothing, - OnUnbalanced, Randomness, StorageInfo, U128CurrencyToVote, + InstanceFilter, KeyOwnerProofSystem, Nothing, OnUnbalanced, Randomness, StorageInfo, + U128CurrencyToVote, }, weights::{ - constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND}, + constants::{ + BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND, + }, ConstantMultiplier, IdentityFee, Weight, }, ConsensusEngineId, PalletId, StorageValue, @@ -81,11 +82,7 @@ use frame_system::{ pub mod impls; use impls::{Author, CreditToBlockAuthor, SchedulerStashAccountFinder}; -// use frame_support::traits::OnRuntimeUpgrade; -// pub use pallet_file_bank::migrations::TestMigrationFileBank; -// pub use pallet_audit::migrations::MigrationSegmentBook; -pub mod constants; use fp_rpc::TransactionStatus; pub use pallet_balances::Call as BalancesCall; use pallet_ethereum::{Call::transact, Transaction as EthereumTransaction}; @@ -274,7 +271,8 @@ parameter_types! { const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// We allow for 2 seconds of compute with a 6 second average block time. -const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), u64::MAX); +const MAXIMUM_BLOCK_WEIGHT: Weight = + Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), u64::MAX); parameter_types! { pub RuntimeBlockLength: BlockLength = @@ -459,8 +457,6 @@ impl pallet_scheduler::Config for Runtime { type WeightInfo = pallet_scheduler::weights::SubstrateWeight; type OriginPrivilegeCmp = EqualPrivilegeOnly; type Preimages = Preimage; - // type PreimageProvider = Preimage; - // type NoPreimagePostponement = NoPreimagePostponement; } parameter_types! { @@ -770,7 +766,10 @@ pub struct OnChainVrfSloverConfig; impl pallet_rrsc::VrfSloverConfig for OnChainVrfSloverConfig { fn min_electable_weight() -> VoteWeight { let total_issuance = ::Currency::total_issuance(); - ::CurrencyToVote::to_vote(MIN_ELECTABLE_STAKE, total_issuance) + ::CurrencyToVote::to_vote( + MIN_ELECTABLE_STAKE, + total_issuance, + ) } } @@ -926,8 +925,13 @@ impl pallet_transaction_payment::Config for Runtime { type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = IdentityFee; type LengthToFee = ConstantMultiplier; - type FeeMultiplierUpdate = - TargetedFeeAdjustment; + type FeeMultiplierUpdate = TargetedFeeAdjustment< + Self, + TargetBlockFullness, + AdjustmentVariable, + MinimumMultiplier, + MaximumMultiplier, + >; } impl pallet_sudo::Config for Runtime { @@ -1125,7 +1129,10 @@ where public: ::Signer, account: AccountId, nonce: Index, - ) -> Option<(RuntimeCall, ::SignaturePayload)> { + ) -> Option<( + RuntimeCall, + ::SignaturePayload, + )> { let tip = 0; // take the biggest period possible. let period = @@ -1409,7 +1416,7 @@ impl fp_self_contained::SelfContainedCall for RuntimeCall { } fn validate_self_contained( - &self, + &self, info: &Self::SignedInfo, dispatch_info: &DispatchInfoOf, len: usize, @@ -1420,7 +1427,6 @@ impl fp_self_contained::SelfContainedCall for RuntimeCall { } } - fn pre_dispatch_self_contained( &self, info: &Self::SignedInfo, @@ -1428,7 +1434,8 @@ impl fp_self_contained::SelfContainedCall for RuntimeCall { len: usize, ) -> Option> { match self { - RuntimeCall::Ethereum(call) => call.pre_dispatch_self_contained(info, dispatch_info, len), + RuntimeCall::Ethereum(call) => + call.pre_dispatch_self_contained(info, dispatch_info, len), _ => None, } } @@ -1438,9 +1445,10 @@ impl fp_self_contained::SelfContainedCall for RuntimeCall { info: Self::SignedInfo, ) -> Option>> { match self { - call @ RuntimeCall::Ethereum(pallet_ethereum::Call::transact { .. }) => Some( - call.dispatch(RuntimeOrigin::from(pallet_ethereum::RawOrigin::EthereumTransaction(info))), - ), + call @ RuntimeCall::Ethereum(pallet_ethereum::Call::transact { .. }) => + Some(call.dispatch(RuntimeOrigin::from( + pallet_ethereum::RawOrigin::EthereumTransaction(info), + ))), _ => None, } } @@ -1565,7 +1573,8 @@ pub type SignedExtra = ( pub type UncheckedExtrinsic = fp_self_contained::UncheckedExtrinsic; /// Extrinsic type that has already been checked. -pub type CheckedExtrinsic = fp_self_contained::CheckedExtrinsic; +pub type CheckedExtrinsic = + fp_self_contained::CheckedExtrinsic; /// The payload being signed in transactions. pub type SignedPayload = generic::SignedPayload; // Executive: handles dispatch to the various modules. From ce5f12daa51c5c7e4823d59409956e0299e8aba6 Mon Sep 17 00:00:00 2001 From: Yeou Date: Mon, 9 Oct 2023 15:54:44 +0800 Subject: [PATCH 2/3] run cargo fmt on audit pallet --- c-pallets/audit/src/benchmarking.rs | 53 ++- c-pallets/audit/src/constants.rs | 2 +- c-pallets/audit/src/lib.rs | 565 +++++++++++++++++----------- c-pallets/audit/src/types.rs | 7 +- 4 files changed, 390 insertions(+), 237 deletions(-) diff --git a/c-pallets/audit/src/benchmarking.rs b/c-pallets/audit/src/benchmarking.rs index 6fd062fd..2cf812f6 100644 --- a/c-pallets/audit/src/benchmarking.rs +++ b/c-pallets/audit/src/benchmarking.rs @@ -14,8 +14,8 @@ use frame_system::RawOrigin; // use pallet_cess_staking::{ // testing_utils, Config as StakingConfig, Pallet as Staking, RewardDestination, // }; -// use pallet_tee_worker::{Config as TeeWorkerConfig, testing_utils::add_scheduler, Pallet as TeeWorker}; -// use pallet_sminer::{Config as SminerConfig, Pallet as Sminer}; +// use pallet_tee_worker::{Config as TeeWorkerConfig, testing_utils::add_scheduler, Pallet as +// TeeWorker}; use pallet_sminer::{Config as SminerConfig, Pallet as Sminer}; // use sp_runtime::{ // traits::{Bounded, One, StaticLookup, TrailingZeroInput, Zero}, // Perbill, Percent, @@ -25,7 +25,11 @@ use frame_system::RawOrigin; // use frame_system::RawOrigin; pub struct Pallet(Audit); pub trait Config: - crate::Config + pallet_cess_staking::Config + pallet_tee_worker::benchmarking::Config + pallet_sminer::benchmarking::Config + pallet_file_bank::benchmarking::Config + crate::Config + + pallet_cess_staking::Config + + pallet_tee_worker::benchmarking::Config + + pallet_sminer::benchmarking::Config + + pallet_file_bank::benchmarking::Config { } @@ -34,24 +38,38 @@ const USER_SEED: u32 = 999666; const SEED: u32 = 2190502; const MINER_LIST: [&'static str; 30] = [ - "miner1", "miner2", "miner3", "miner4", "miner5", "miner6", "miner7", "miner8", "miner9", "miner10", - "miner11", "miner12", "miner13", "miner14", "miner15", "miner16", "miner17", "miner18", "miner19", "miner20", - "miner21", "miner22", "miner23", "miner24", "miner25", "miner26", "miner27", "miner28", "miner29", "miner30", + "miner1", "miner2", "miner3", "miner4", "miner5", "miner6", "miner7", "miner8", "miner9", + "miner10", "miner11", "miner12", "miner13", "miner14", "miner15", "miner16", "miner17", + "miner18", "miner19", "miner20", "miner21", "miner22", "miner23", "miner24", "miner25", + "miner26", "miner27", "miner28", "miner29", "miner30", ]; pub fn bench_generate_challenge() { - let space_challenge_param = [67_549_635, 67_864_236, 67_338_392, 67_130_229, 67_369_766, 67_193_409, 67_799_602, 67_425_292]; - let random_index_list = [691, 406, 838, 480, 996, 798, 362, 456, 144, 666, 1, 018, 568, 992, 650, 729, 808, 229, 623, 499, 671, 254, 24, 217, 698, 648, 781, 460, 298, 548, 742, 364, 183, 114, 309, 564, 127, 154, 815, 651, 397, 576, 697, 358, 880, 73, 629, 66]; + let space_challenge_param = [ + 67_549_635, 67_864_236, 67_338_392, 67_130_229, 67_369_766, 67_193_409, 67_799_602, + 67_425_292, + ]; + let random_index_list = [ + 691, 406, 838, 480, 996, 798, 362, 456, 144, 666, 1, 018, 568, 992, 650, 729, 808, 229, + 623, 499, 671, 254, 24, 217, 698, 648, 781, 460, 298, 548, 742, 364, 183, 114, 309, 564, + 127, 154, 815, 651, 397, 576, 697, 358, 880, 73, 629, 66, + ]; let random_list = [[55u8; 20]; 48]; - let mut miner_snapshot_list: BoundedVec, BlockNumberOf>, ::ChallengeMinerMax> = Default::default(); + let mut miner_snapshot_list: BoundedVec< + MinerSnapShot, BlockNumberOf>, + ::ChallengeMinerMax, + > = Default::default(); let mut total_idle_space: u128 = u128::MIN; let mut total_service_space: u128 = u128::MIN; - let all_miner = ::MinerControl::get_all_miner().expect("get all miner error!"); + let all_miner = + ::MinerControl::get_all_miner().expect("get all miner error!"); for miner in all_miner.into_iter() { - let (idle_space, service_space, service_bloom_filter, space_proof_info, tee_signature) = ::MinerControl::get_miner_snapshot(&miner).expect("get miner snapshot failed"); + let (idle_space, service_space, service_bloom_filter, space_proof_info, tee_signature) = + ::MinerControl::get_miner_snapshot(&miner) + .expect("get miner snapshot failed"); if (idle_space == 0) && (service_space == 0) { - continue; + continue } total_idle_space = total_idle_space.checked_add(idle_space).expect("overflow"); total_service_space = total_service_space.checked_add(service_space).expect("overflow"); @@ -83,10 +101,7 @@ pub fn bench_generate_challenge() { space_challenge_param, }; - let challenge_info = ChallengeInfo:: { - net_snap_shot, - miner_snapshot_list, - }; + let challenge_info = ChallengeInfo:: { net_snap_shot, miner_snapshot_list }; >::put(challenge_info); let duration: BlockNumberOf = 5000u32.saturated_into(); >::put(duration); @@ -151,7 +166,7 @@ benchmarks! { let sigma: BoundedVec = [5u8; 2048].to_vec().try_into().unwrap(); Audit::::submit_idle_proof(RawOrigin::Signed(miner.clone()).into(), idle_total_hash.clone())?; Audit::::submit_service_proof(RawOrigin::Signed(miner.clone()).into(), sigma.clone())?; - + let random_index_list = [691, 406, 838, 480, 996, 798, 362, 456, 144, 666, 1, 018, 568, 992, 650, 729, 808, 229, 623, 499, 671, 254, 24, 217, 698, 648, 781, 460, 298, 548, 742, 364, 183, 114, 309, 564, 127, 154, 815, 651, 397, 576, 697, 358, 880, 73, 629, 66]; let random_list = [[55u8; 20]; 48]; let verify_service_result = VerifyServiceResultInfo::{ @@ -159,7 +174,7 @@ benchmarks! { tee_acc: controller_acc.clone(), miner_prove: sigma.clone(), result: true, - chal: QElement { + chal: QElement { random_index_list: random_index_list.to_vec().try_into().unwrap(), random_list: random_list.to_vec().try_into().unwrap(), }, @@ -220,7 +235,7 @@ benchmarks! { tee_acc: controller_acc.clone(), miner_prove: sigma.clone(), result: true, - chal: QElement { + chal: QElement { random_index_list: random_index_list.to_vec().try_into().unwrap(), random_list: random_list.to_vec().try_into().unwrap(), }, diff --git a/c-pallets/audit/src/constants.rs b/c-pallets/audit/src/constants.rs index 7b26e422..b7a333b7 100644 --- a/c-pallets/audit/src/constants.rs +++ b/c-pallets/audit/src/constants.rs @@ -9,4 +9,4 @@ pub(super) const IDLE_FAULT_TOLERANT: u8 = 2; pub(super) const SERVICE_FAULT_TOLERANT: u8 = 2; -pub(super) type SpaceChallengeParam = [u64; 8]; \ No newline at end of file +pub(super) type SpaceChallengeParam = [u64; 8]; diff --git a/c-pallets/audit/src/lib.rs b/c-pallets/audit/src/lib.rs index 10384145..96c6a481 100644 --- a/c-pallets/audit/src/lib.rs +++ b/c-pallets/audit/src/lib.rs @@ -59,46 +59,40 @@ pub mod benchmarking; pub mod weights; use sp_runtime::{ + offchain::storage::{StorageRetrievalError, StorageValueRef}, traits::{CheckedAdd, SaturatedConversion}, - RuntimeDebug, Permill, - offchain::storage::{StorageValueRef, StorageRetrievalError}, + Permill, RuntimeDebug, }; - use codec::{Decode, Encode}; +use cp_bloom_filter::BloomFilter; +use cp_cess_common::*; +use cp_enclave_verify::verify_rsa; +use cp_scheduler_credit::SchedulerCreditCounter; use frame_support::{ - transactional, dispatch::DispatchResult, pallet_prelude::*, storage::bounded_vec::BoundedVec, traits::{ - FindAuthor, Randomness, ReservableCurrency, EstimateNextSessionRotation, - ValidatorSetWithIdentification, ValidatorSet, OneSessionHandler, StorageVersion, + EstimateNextSessionRotation, FindAuthor, OneSessionHandler, Randomness, ReservableCurrency, + StorageVersion, ValidatorSet, ValidatorSetWithIdentification, }, - PalletId, WeakBoundedVec, BoundedSlice, + transactional, BoundedSlice, PalletId, WeakBoundedVec, }; -use sp_core::{ - crypto::KeyTypeId, - offchain::OpaqueNetworkState, -}; -use sp_runtime::{Saturating, app_crypto::RuntimeAppPublic}; use frame_system::offchain::{CreateSignedTransaction, SubmitTransaction}; use pallet_file_bank::RandomFileList; -use pallet_tee_worker::TeeWorkerHandler; use pallet_sminer::MinerControl; use pallet_storage_handler::StorageHandle; +use pallet_tee_worker::TeeWorkerHandler; use scale_info::TypeInfo; -use sp_core::H256; -use sp_std::{ - convert:: { TryFrom, TryInto }, - prelude::*, - collections::btree_map::BTreeMap, - }; -use cp_enclave_verify::verify_rsa; -use cp_cess_common::*; +use sp_core::{crypto::KeyTypeId, offchain::OpaqueNetworkState, H256}; +use sp_runtime::{app_crypto::RuntimeAppPublic, Saturating}; +use sp_std::{ + collections::btree_map::BTreeMap, + convert::{TryFrom, TryInto}, + prelude::*, +}; pub use weights::WeightInfo; -use cp_bloom_filter::BloomFilter; -use cp_scheduler_credit::SchedulerCreditCounter; type AccountOf = ::AccountId; type BlockNumberOf = ::BlockNumber; @@ -152,8 +146,11 @@ impl sp_std::fmt::Debug for OffchainErr { pub mod pallet { use super::*; // use frame_benchmarking::baseline::Config; - use frame_support::{traits::Get}; - use frame_system::{ensure_signed, pallet_prelude::{*, OriginFor}}; + use frame_support::traits::Get; + use frame_system::{ + ensure_signed, + pallet_prelude::{OriginFor, *}, + }; ///18446744073709551615 pub const LIMIT: u64 = u64::MAX; @@ -246,7 +243,6 @@ pub mod pallet { SubmitServiceVerifyResult { tee: AccountOf, miner: AccountOf, result: bool }, VerifyProof { tee_worker: AccountOf, miner: AccountOf }, - } /// Error for the audit pallet. @@ -312,11 +308,13 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn keys)] - pub(super) type Keys = StorageValue<_, WeakBoundedVec, ValueQuery>; + pub(super) type Keys = + StorageValue<_, WeakBoundedVec, ValueQuery>; #[pallet::storage] #[pallet::getter(fn challenge_proposal)] - pub(super) type ChallengeProposal = CountedStorageMap<_, Blake2_128Concat, [u8; 32], (u32, ChallengeInfo)>; + pub(super) type ChallengeProposal = + CountedStorageMap<_, Blake2_128Concat, [u8; 32], (u32, ChallengeInfo)>; #[pallet::storage] #[pallet::getter(fn challenge_snap_shot)] @@ -324,15 +322,18 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn counted_idle_failed)] - pub(super) type CountedIdleFailed = StorageMap<_, Blake2_128Concat, AccountOf, u32, ValueQuery>; + pub(super) type CountedIdleFailed = + StorageMap<_, Blake2_128Concat, AccountOf, u32, ValueQuery>; #[pallet::storage] #[pallet::getter(fn counted_service_failed)] - pub(super) type CountedServiceFailed = StorageMap<_, Blake2_128Concat, AccountOf, u32, ValueQuery>; + pub(super) type CountedServiceFailed = + StorageMap<_, Blake2_128Concat, AccountOf, u32, ValueQuery>; #[pallet::storage] #[pallet::getter(fn counted_clear)] - pub(super) type CountedClear = StorageMap<_, Blake2_128Concat, AccountOf, u8, ValueQuery>; + pub(super) type CountedClear = + StorageMap<_, Blake2_128Concat, AccountOf, u8, ValueQuery>; #[pallet::storage] #[pallet::getter(fn challenge_era)] @@ -340,15 +341,28 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn unverify_idle_proof)] - pub(super) type UnverifyIdleProof = StorageMap<_, Blake2_128Concat, AccountOf, BoundedVec, T::VerifyMissionMax>, ValueQuery>; + pub(super) type UnverifyIdleProof = StorageMap< + _, + Blake2_128Concat, + AccountOf, + BoundedVec, T::VerifyMissionMax>, + ValueQuery, + >; #[pallet::storage] #[pallet::getter(fn unverify_service_proof)] - pub(super) type UnverifyServiceProof = StorageMap<_, Blake2_128Concat, AccountOf, BoundedVec, T::VerifyMissionMax>, ValueQuery>; + pub(super) type UnverifyServiceProof = StorageMap< + _, + Blake2_128Concat, + AccountOf, + BoundedVec, T::VerifyMissionMax>, + ValueQuery, + >; #[pallet::storage] #[pallet::getter(fn verify_result)] - pub(super) type VerifyResult = StorageMap<_, Blake2_128Concat, AccountOf, (Option, Option)>; + pub(super) type VerifyResult = + StorageMap<_, Blake2_128Concat, AccountOf, (Option, Option)>; #[pallet::storage] #[pallet::getter(fn verify_reassign_count)] @@ -390,8 +404,13 @@ pub mod pallet { log::info!("offchain worker random challenge start"); if let Err(e) = Self::offchain_work_start(now) { match e { - OffchainErr::Working => log::info!("offchain working, Unable to perform a new round of work."), - _ => log::info!("offchain worker generation challenge failed:{:?}", e), + OffchainErr::Working => log::info!( + "offchain working, Unable to perform a new round of work." + ), + _ => log::info!( + "offchain worker generation challenge failed:{:?}", + e + ), }; } log::info!("offchain worker random challenge end"); @@ -423,33 +442,44 @@ pub mod pallet { let count: u32 = Keys::::get().len() as u32; let limit = count - .checked_mul(2).ok_or(Error::::Overflow)? - .checked_div(3).ok_or(Error::::Overflow)?; + .checked_mul(2) + .ok_or(Error::::Overflow)? + .checked_div(3) + .ok_or(Error::::Overflow)?; let now = >::block_number(); let cur_block = >::get(); if now <= cur_block { - return Ok(()); - } - + return Ok(()) + } + if ChallengeProposal::::contains_key(&hash) { let mut proposal = ChallengeProposal::::get(&hash).unwrap(); proposal.0 += 1; if proposal.0 >= limit { - let duration = now.checked_add(&proposal.1.net_snap_shot.life).ok_or(Error::::Overflow)?; + let duration = now + .checked_add(&proposal.1.net_snap_shot.life) + .ok_or(Error::::Overflow)?; >::put(duration); let idle_duration = duration; let one_hour = T::OneHours::get(); let tee_length = T::TeeWorkerHandler::get_controller_list().len(); - let duration: u32 = (proposal.1.net_snap_shot.total_idle_space - .checked_add(proposal.1.net_snap_shot.total_service_space).ok_or(Error::::Overflow)? - .checked_div(IDLE_VERIFY_RATE).ok_or(Error::::Overflow)? - .checked_div(tee_length as u128).ok_or(Error::::Overflow)? - ) as u32; + let duration: u32 = (proposal + .1 + .net_snap_shot + .total_idle_space + .checked_add(proposal.1.net_snap_shot.total_service_space) + .ok_or(Error::::Overflow)? + .checked_div(IDLE_VERIFY_RATE) + .ok_or(Error::::Overflow)? + .checked_div(tee_length as u128) + .ok_or(Error::::Overflow)?) as u32; let v_duration = idle_duration - .checked_add(&duration.saturated_into()).ok_or(Error::::Overflow)? - .checked_add(&one_hour).ok_or(Error::::Overflow)?; + .checked_add(&duration.saturated_into()) + .ok_or(Error::::Overflow)? + .checked_add(&one_hour) + .ok_or(Error::::Overflow)?; >::put(v_duration); >::put(proposal.1); let _ = ChallengeProposal::::clear(ChallengeProposal::::count(), None); @@ -462,10 +492,7 @@ pub mod pallet { // Proposal Generally Less let _ = ChallengeProposal::::clear(ChallengeProposal::::count(), None); } else { - ChallengeProposal::::insert( - &hash, - (1, challenge_info), - ); + ChallengeProposal::::insert(&hash, (1, challenge_info)); } } @@ -512,10 +539,7 @@ pub mod pallet { let index: u32 = index % (tee_list.len() as u32); let tee_acc = &tee_list[index as usize]; - let prove_info = IdleProveInfo:: { - snap_shot: miner_snapshot, - idle_prove, - }; + let prove_info = IdleProveInfo:: { snap_shot: miner_snapshot, idle_prove }; UnverifyIdleProof::::mutate(tee_acc, |unverify_list| -> DispatchResult { unverify_list.try_push(prove_info).map_err(|_| Error::::Overflow)?; @@ -552,7 +576,6 @@ pub mod pallet { >::insert(&sender, u8::MIN); challenge_info.miner_snapshot_list.remove(index); } - return Ok(temp_miner_snap); } } @@ -568,10 +591,7 @@ pub mod pallet { let index: u32 = index % (tee_list.len() as u32); let tee_acc = &tee_list[index as usize]; - let prove_info = ServiceProveInfo:: { - snap_shot: miner_snapshot, - service_prove, - }; + let prove_info = ServiceProveInfo:: { snap_shot: miner_snapshot, service_prove }; UnverifyServiceProof::::mutate(tee_acc, |unverify_list| -> DispatchResult { unverify_list.try_push(prove_info).map_err(|_| Error::::Overflow)?; @@ -602,9 +622,10 @@ pub mod pallet { >::try_mutate(&tee_acc, |unverify_list| -> DispatchResult { for (index, miner_info) in unverify_list.iter().enumerate() { if &miner_info.snap_shot.miner == &sender { - let snap_shot = >::try_get().map_err(|_| Error::::UnexpectedError)?; + let snap_shot = >::try_get() + .map_err(|_| Error::::UnexpectedError)?; - let verify_idle_result = VerifyIdleResultInfo::{ + let verify_idle_result = VerifyIdleResultInfo:: { miner: sender.clone(), miner_prove: total_prove_hash.clone(), front: miner_info.snap_shot.space_proof_info.front, @@ -618,12 +639,25 @@ pub mod pallet { let tee_puk = T::TeeWorkerHandler::get_tee_publickey()?; let encoding = verify_idle_result.encode(); let hashing = sp_io::hashing::sha2_256(&encoding); - ensure!(verify_rsa(&tee_puk, &hashing, &signature), Error::::VerifyTeeSigFailed); + ensure!( + verify_rsa(&tee_puk, &hashing, &signature), + Error::::VerifyTeeSigFailed + ); - let idle_result = Self::check_idle_verify_param(idle_result, front, rear, &total_prove_hash, &accumulator, &miner_info); + let idle_result = Self::check_idle_verify_param( + idle_result, + front, + rear, + &total_prove_hash, + &accumulator, + &miner_info, + ); - if let Ok((_, service_result_opt)) = >::try_get(&sender).map_err(|_| Error::::UnexpectedError) { - let service_result = service_result_opt.ok_or(Error::::UnexpectedError)?; + if let Ok((_, service_result_opt)) = >::try_get(&sender) + .map_err(|_| Error::::UnexpectedError) + { + let service_result = + service_result_opt.ok_or(Error::::UnexpectedError)?; if idle_result && service_result { T::MinerControl::calculate_miner_reward( &sender, @@ -646,19 +680,33 @@ pub mod pallet { if idle_result { >::insert(&sender, u32::MIN); } else { - let count = >::get(&sender).checked_add(1).unwrap_or(IDLE_FAULT_TOLERANT as u32); + let count = >::get(&sender) + .checked_add(1) + .unwrap_or(IDLE_FAULT_TOLERANT as u32); if count >= IDLE_FAULT_TOLERANT as u32 { - T::MinerControl::idle_punish(&sender, miner_info.snap_shot.idle_space, miner_info.snap_shot.service_space)?; + T::MinerControl::idle_punish( + &sender, + miner_info.snap_shot.idle_space, + miner_info.snap_shot.service_space, + )?; } >::insert(&sender, count); } - let count = miner_info.snap_shot.space_proof_info.rear - .checked_sub(miner_info.snap_shot.space_proof_info.front).ok_or(Error::::Overflow)?; + let count = miner_info + .snap_shot + .space_proof_info + .rear + .checked_sub(miner_info.snap_shot.space_proof_info.front) + .ok_or(Error::::Overflow)?; T::CreditCounter::record_proceed_block_size(&tee_acc, count)?; unverify_list.remove(index); - Self::deposit_event(Event::::SubmitIdleVerifyResult { tee: tee_acc.clone(), miner: sender, result: idle_result }); + Self::deposit_event(Event::::SubmitIdleVerifyResult { + tee: tee_acc.clone(), + miner: sender, + result: idle_result, + }); return Ok(()) } @@ -683,32 +731,38 @@ pub mod pallet { >::try_mutate(&tee_acc, |unverify_list| -> DispatchResult { for (index, miner_info) in unverify_list.iter().enumerate() { if &miner_info.snap_shot.miner == &sender { - let snap_shot = >::try_get().map_err(|_| Error::::UnexpectedError)?; + let snap_shot = >::try_get() + .map_err(|_| Error::::UnexpectedError)?; - let verify_service_result = VerifyServiceResultInfo::{ + let verify_service_result = VerifyServiceResultInfo:: { miner: sender.clone(), tee_acc: tee_acc.clone(), miner_prove: miner_info.service_prove.clone(), result: service_result, - chal: QElement { + chal: QElement { random_index_list: snap_shot.net_snap_shot.random_index_list, random_list: snap_shot.net_snap_shot.random_list, }, - service_bloom_filter: service_bloom_filter, + service_bloom_filter, }; let tee_puk = T::TeeWorkerHandler::get_tee_publickey()?; let encoding = verify_service_result.encode(); let hashing = sp_io::hashing::sha2_256(&encoding); - ensure!(verify_rsa(&tee_puk, &hashing, &signature), Error::::VerifyTeeSigFailed); + ensure!( + verify_rsa(&tee_puk, &hashing, &signature), + Error::::VerifyTeeSigFailed + ); ensure!( service_bloom_filter == miner_info.snap_shot.service_bloom_filter, Error::::BloomFilterError, - ); + ); // Determine whether both proofs have been verified. - if let Ok((idle_result_opt, _)) = >::try_get(&sender).map_err(|_| Error::::UnexpectedError) { + if let Ok((idle_result_opt, _)) = >::try_get(&sender) + .map_err(|_| Error::::UnexpectedError) + { let idle_result = idle_result_opt.ok_or(Error::::UnexpectedError)?; // Determine whether to distribute rewards to miners. if idle_result && service_result { @@ -732,20 +786,34 @@ pub mod pallet { if service_result { >::insert(&sender, u32::MIN); } else { - let count = >::get(&sender).checked_add(1).unwrap_or(SERVICE_FAULT_TOLERANT.into()); + let count = >::get(&sender) + .checked_add(1) + .unwrap_or(SERVICE_FAULT_TOLERANT.into()); if count >= SERVICE_FAULT_TOLERANT as u32 { - T::MinerControl::service_punish(&sender, miner_info.snap_shot.idle_space, miner_info.snap_shot.service_space)?; + T::MinerControl::service_punish( + &sender, + miner_info.snap_shot.idle_space, + miner_info.snap_shot.service_space, + )?; } >::insert(&sender, count); } - let count = miner_info.snap_shot.service_space - .checked_div(IDLE_SEG_SIZE).ok_or(Error::::Overflow)? - .checked_add(1).ok_or(Error::::Overflow)?; + let count = miner_info + .snap_shot + .service_space + .checked_div(IDLE_SEG_SIZE) + .ok_or(Error::::Overflow)? + .checked_add(1) + .ok_or(Error::::Overflow)?; T::CreditCounter::record_proceed_block_size(&tee_acc, count as u64)?; unverify_list.remove(index); - Self::deposit_event(Event::::SubmitServiceVerifyResult { tee: tee_acc.clone(), miner: sender, result: service_result }); + Self::deposit_event(Event::::SubmitServiceVerifyResult { + tee: tee_acc.clone(), + miner: sender, + result: service_result, + }); return Ok(()) } @@ -761,16 +829,18 @@ pub mod pallet { pub fn update_lock(origin: OriginFor) -> DispatchResult { let _ = ensure_root(origin)?; - Lock::::mutate(|lock| *lock = !*lock ); + Lock::::mutate(|lock| *lock = !*lock); Ok(()) - } // FOR TESTING #[pallet::call_index(6)] #[transactional] #[pallet::weight(100_000_000)] - pub fn update_verify_duration(origin: OriginFor, new: BlockNumberOf) -> DispatchResult { + pub fn update_verify_duration( + origin: OriginFor, + new: BlockNumberOf, + ) -> DispatchResult { let _ = ensure_root(origin)?; >::put(new); @@ -784,10 +854,7 @@ pub mod pallet { pub fn update_counted_clear(origin: OriginFor, miner: AccountOf) -> DispatchResult { let _ = ensure_root(origin)?; - >::insert( - &miner, - 0, - ); + >::insert(&miner, 0); Ok(()) } @@ -795,7 +862,10 @@ pub mod pallet { #[pallet::call_index(8)] #[transactional] #[pallet::weight(100_000_000)] - pub fn update_challenge_duration(origin: OriginFor, new: BlockNumberOf) -> DispatchResult { + pub fn update_challenge_duration( + origin: OriginFor, + new: BlockNumberOf, + ) -> DispatchResult { let _ = ensure_root(origin)?; >::put(new); @@ -820,12 +890,9 @@ pub mod pallet { type Call = Call; fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { - if let Call::save_challenge_info { - challenge_info: _, - key, - seg_digest, - signature, - } = call { + if let Call::save_challenge_info { challenge_info: _, key, seg_digest, signature } = + call + { Self::check_unsign(key.clone(), &seg_digest, &signature) } else { InvalidTransaction::Call.into() @@ -846,14 +913,15 @@ pub mod pallet { weight = weight.saturating_add(T::DbWeight::get().reads(1)); for miner_snapshot in snap_shot.miner_snapshot_list.iter() { // unwrap_or(3) 3 Need to match the maximum number of consecutive penalties. - let count = >::get(&miner_snapshot.miner).checked_add(1).unwrap_or(6); + let count = + >::get(&miner_snapshot.miner).checked_add(1).unwrap_or(6); weight = weight.saturating_add(T::DbWeight::get().reads(1)); let _ = T::MinerControl::clear_punish( - &miner_snapshot.miner, - count, - miner_snapshot.idle_space, - miner_snapshot.service_space + &miner_snapshot.miner, + count, + miner_snapshot.idle_space, + miner_snapshot.service_space, ); weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); //For Testing @@ -864,10 +932,7 @@ pub mod pallet { } >::remove(&miner_snapshot.miner); } else { - >::insert( - &miner_snapshot.miner, - count, - ); + >::insert(&miner_snapshot.miner, count); } } @@ -906,7 +971,7 @@ pub mod pallet { >::remove(miner); weight = weight.saturating_add(T::DbWeight::get().writes(1)); } - return weight; + return weight } else { reassign_count = reassign_count.checked_add(1).unwrap_or(ceiling); >::put(reassign_count); @@ -917,7 +982,10 @@ pub mod pallet { let mut mission_count: u32 = 0; let mut max_count = 0; let tee_list = T::TeeWorkerHandler::get_controller_list(); - let mut reassign_list: BTreeMap, BoundedVec, T::VerifyMissionMax>> = Default::default(); + let mut reassign_list: BTreeMap< + AccountOf, + BoundedVec, T::VerifyMissionMax>, + > = Default::default(); for (acc, unverify_list) in UnverifyIdleProof::::iter() { seed += 1; @@ -940,10 +1008,11 @@ pub mod pallet { let result = value.try_append(&mut unverify_list.to_vec()); if result.is_err() { - let new_block: BlockNumberOf = now.saturating_add(10u32.saturated_into()); + let new_block: BlockNumberOf = + now.saturating_add(10u32.saturated_into()); >::put(new_block); weight = weight.saturating_add(T::DbWeight::get().writes(1)); - return weight; + return weight } } else { reassign_list.insert(tee_acc.clone(), unverify_list); @@ -958,17 +1027,23 @@ pub mod pallet { if mission_count != 0 { max_count = mission_count; for (acc, unverify_list) in reassign_list { - let result = UnverifyIdleProof::::mutate(acc, |tar_unverify_list| -> DispatchResult { - tar_unverify_list.try_append(&mut unverify_list.to_vec()).map_err(|_| Error::::Overflow)?; - // tar_unverify_list.try_push(mission) - Ok(()) - }); + let result = UnverifyIdleProof::::mutate( + acc, + |tar_unverify_list| -> DispatchResult { + tar_unverify_list + .try_append(&mut unverify_list.to_vec()) + .map_err(|_| Error::::Overflow)?; + // tar_unverify_list.try_push(mission) + Ok(()) + }, + ); if result.is_err() { - let new_block: BlockNumberOf = now.saturating_add(5u32.saturated_into()); + let new_block: BlockNumberOf = + now.saturating_add(5u32.saturated_into()); >::put(new_block); weight = weight.saturating_add(T::DbWeight::get().writes(1)); - return weight; + return weight } weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); @@ -976,7 +1051,10 @@ pub mod pallet { } let mut mission_count: u32 = 0; - let mut reassign_list: BTreeMap, BoundedVec, T::VerifyMissionMax>> = Default::default(); + let mut reassign_list: BTreeMap< + AccountOf, + BoundedVec, T::VerifyMissionMax>, + > = Default::default(); for (acc, unverify_list) in UnverifyServiceProof::::iter() { seed += 1; @@ -999,10 +1077,11 @@ pub mod pallet { let result = value.try_append(&mut unverify_list.to_vec()); if result.is_err() { - let new_block: BlockNumberOf = now.saturating_add(10u32.saturated_into()); + let new_block: BlockNumberOf = + now.saturating_add(10u32.saturated_into()); >::put(new_block); weight = weight.saturating_add(T::DbWeight::get().writes(1)); - return weight; + return weight } } else { reassign_list.insert(tee_acc.clone(), unverify_list); @@ -1017,17 +1096,23 @@ pub mod pallet { if mission_count != 0 { max_count = mission_count; for (acc, unverify_list) in reassign_list { - let result = UnverifyServiceProof::::mutate(acc, |tar_unverify_list| -> DispatchResult { - tar_unverify_list.try_append(&mut unverify_list.to_vec()).map_err(|_| Error::::Overflow)?; - // tar_unverify_list.try_push(mission) - Ok(()) - }); + let result = UnverifyServiceProof::::mutate( + acc, + |tar_unverify_list| -> DispatchResult { + tar_unverify_list + .try_append(&mut unverify_list.to_vec()) + .map_err(|_| Error::::Overflow)?; + // tar_unverify_list.try_push(mission) + Ok(()) + }, + ); if result.is_err() { - let new_block: BlockNumberOf = now.saturating_add(5u32.saturated_into()); + let new_block: BlockNumberOf = + now.saturating_add(5u32.saturated_into()); >::put(new_block); weight = weight.saturating_add(T::DbWeight::get().writes(1)); - return weight; + return weight } weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); @@ -1045,7 +1130,8 @@ pub mod pallet { } } else { let new_block = max_count.checked_mul(50u32).unwrap_or(u32::MAX.into()); - let new_block = now.checked_add(&new_block.saturated_into()).unwrap_or(u32::MAX.into()); + let new_block = + now.checked_add(&new_block.saturated_into()).unwrap_or(u32::MAX.into()); >::put(new_block); } } @@ -1062,12 +1148,11 @@ pub mod pallet { let keys = Keys::::get(); if !keys.contains(&key) { - return InvalidTransaction::Stale.into(); - } + return InvalidTransaction::Stale.into() + } - let signature_valid = seg_digest.using_encoded(|encoded_seg_digest| { - key.verify(&encoded_seg_digest, &signature) - }); + let signature_valid = seg_digest + .using_encoded(|encoded_seg_digest| key.verify(&encoded_seg_digest, &signature)); if !signature_valid { log::error!("bad signature."); @@ -1118,13 +1203,14 @@ pub mod pallet { let range = LIMIT / probability as u64 * 10; if (time_point > 2190502) && (time_point < (range + 2190502)) { if let (Some(progress), _) = - T::NextSessionRotation::estimate_current_session_progress(now) { + T::NextSessionRotation::estimate_current_session_progress(now) + { if progress >= START_FINAL_PERIOD { log::error!("TooLate!"); - return false; + return false } } - return true; + return true } false } @@ -1135,7 +1221,7 @@ pub mod pallet { log::info!("get local authority success!"); if !Self::check_working(&now, &authority_id) { Self::unlock_offchain(&authority_id); - return Err(OffchainErr::Working); + return Err(OffchainErr::Working) } log::info!("get challenge data..."); let challenge_info = Self::generation_challenge(now).map_err(|e| { @@ -1156,23 +1242,30 @@ pub mod pallet { let key = &authority_id.encode(); let storage = StorageValueRef::persistent(key); - let res = storage.mutate(|status: Result>, StorageRetrievalError>| { - match status { - // we are still waiting for inclusion. - Ok(Some(last_block)) => { - let lock_time = T::LockTime::get(); - // Based on human time, there is no possibility of overflow here - if last_block + lock_time > *now { - log::info!("last_block: {:?}, lock_time: {:?}, now: {:?}", last_block, lock_time, now); - Err(OffchainErr::Working) - } else { - Ok(*now) - } - }, - // attempt to set new status - _ => Ok(*now), - } - }); + let res = storage.mutate( + |status: Result>, StorageRetrievalError>| { + match status { + // we are still waiting for inclusion. + Ok(Some(last_block)) => { + let lock_time = T::LockTime::get(); + // Based on human time, there is no possibility of overflow here + if last_block + lock_time > *now { + log::info!( + "last_block: {:?}, lock_time: {:?}, now: {:?}", + last_block, + lock_time, + now + ); + Err(OffchainErr::Working) + } else { + Ok(*now) + } + }, + // attempt to set new status + _ => Ok(*now), + } + }, + ); if res.is_err() { log::error!("offchain work: {:?}", OffchainErr::Working); @@ -1196,7 +1289,7 @@ pub mod pallet { if local_keys.len() == 0 { log::info!("no local_keys"); - return Err(OffchainErr::Ineligible); + return Err(OffchainErr::Ineligible) } local_keys.sort(); @@ -1210,18 +1303,17 @@ pub mod pallet { }; if let Some(authority_id) = authority_id { - return Ok((authority_id.clone(), validators.len())); + return Ok((authority_id.clone(), validators.len())) } } Err(OffchainErr::Ineligible) } - fn generation_challenge(now: BlockNumberOf) - -> Result, OffchainErr> - { + fn generation_challenge(now: BlockNumberOf) -> Result, OffchainErr> { // let miner_count = T::MinerControl::get_miner_count(); - let allminer = T::MinerControl::get_all_miner().map_err(|_| OffchainErr::GenerateInfoError)?; + let allminer = + T::MinerControl::get_all_miner().map_err(|_| OffchainErr::GenerateInfoError)?; let miner_count = allminer.len() as u32; if miner_count == 0 { Err(OffchainErr::GenerateInfoError)?; @@ -1231,7 +1323,10 @@ pub mod pallet { let need_miner_count = miner_count; // let need_miner_count = miner_count / 10 + 1; - let mut miner_list: BoundedVec, BlockNumberOf>, T::ChallengeMinerMax> = Default::default(); + let mut miner_list: BoundedVec< + MinerSnapShot, BlockNumberOf>, + T::ChallengeMinerMax, + > = Default::default(); let mut valid_index_list: Vec = Default::default(); @@ -1242,44 +1337,67 @@ pub mod pallet { // TODO: need to set a maximum number of cycles let mut seed: u32 = 20230601; - while ((miner_list.len() as u32) < need_miner_count) && (valid_index_list.len() as u32 != miner_count) { - seed = seed.saturating_add(1); - let index_list = Self::random_select_miner(need_miner_count, miner_count, &valid_index_list, seed); + while ((miner_list.len() as u32) < need_miner_count) && + (valid_index_list.len() as u32 != miner_count) + { + seed = seed.saturating_add(1); + let index_list = Self::random_select_miner( + need_miner_count, + miner_count, + &valid_index_list, + seed, + ); for index in index_list { valid_index_list.push(index); let miner = allminer[index as usize].clone(); - let state = T::MinerControl::get_miner_state(&miner).map_err(|_| OffchainErr::GenerateInfoError)?; - if state == "lock".as_bytes().to_vec() || state == "offline".as_bytes().to_vec() || state == "exit".as_bytes().to_vec() { - continue; + let state = T::MinerControl::get_miner_state(&miner) + .map_err(|_| OffchainErr::GenerateInfoError)?; + if state == "lock".as_bytes().to_vec() || + state == "offline".as_bytes().to_vec() || + state == "exit".as_bytes().to_vec() + { + continue } - let (idle_space, service_space, service_bloom_filter, space_proof_info, tee_signature) = T::MinerControl::get_miner_snapshot(&miner).map_err(|_| OffchainErr::GenerateInfoError)?; + let ( + idle_space, + service_space, + service_bloom_filter, + space_proof_info, + tee_signature, + ) = T::MinerControl::get_miner_snapshot(&miner) + .map_err(|_| OffchainErr::GenerateInfoError)?; if (idle_space == 0) && (service_space == 0) { - continue; + continue } let idle_life: u32 = (idle_space - .checked_div(IDLE_PROVE_RATE).ok_or(OffchainErr::Overflow)? - .checked_add(50).ok_or(OffchainErr::Overflow)? - ) as u32; + .checked_div(IDLE_PROVE_RATE) + .ok_or(OffchainErr::Overflow)? + .checked_add(50) + .ok_or(OffchainErr::Overflow)?) as u32; if idle_life > max_life { max_life = idle_life; } let service_life: u32 = (service_space - .checked_div(SERVICE_PROVE_RATE).ok_or(OffchainErr::Overflow)? - .checked_add(50).ok_or(OffchainErr::Overflow)? - ) as u32; + .checked_div(SERVICE_PROVE_RATE) + .ok_or(OffchainErr::Overflow)? + .checked_add(50) + .ok_or(OffchainErr::Overflow)?) as u32; if service_life > max_life { max_life = service_life; } - total_idle_space = total_idle_space.checked_add(idle_space).ok_or(OffchainErr::Overflow)?; - total_service_space = total_service_space.checked_add(service_space).ok_or(OffchainErr::Overflow)?; + total_idle_space = + total_idle_space.checked_add(idle_space).ok_or(OffchainErr::Overflow)?; + total_service_space = total_service_space + .checked_add(service_space) + .ok_or(OffchainErr::Overflow)?; let miner_snapshot = MinerSnapShot::, BlockNumberOf> { miner, @@ -1295,11 +1413,11 @@ pub mod pallet { }; if let Err(_e) = miner_list.try_push(miner_snapshot) { - return Err(OffchainErr::GenerateInfoError)?; + return Err(OffchainErr::GenerateInfoError)? }; if (miner_list.len() as u32) >= need_miner_count { - break; + break } } } @@ -1327,55 +1445,70 @@ pub mod pallet { } // generate idle challenge param - let (_, n, d) = T::MinerControl::get_expenders().map_err(|_| OffchainErr::GenerateInfoError)?; + let (_, n, d) = + T::MinerControl::get_expenders().map_err(|_| OffchainErr::GenerateInfoError)?; let mut space_challenge_param: SpaceChallengeParam = Default::default(); let mut repeat_filter: Vec = Default::default(); let mut seed_multi: u32 = 10000; let mut seed: u32 = 1; for elem in &mut space_challenge_param { loop { - let random = Self::random_number(seed.checked_add(seed_multi).ok_or(OffchainErr::Overflow)?) % n; + let random = Self::random_number( + seed.checked_add(seed_multi).ok_or(OffchainErr::Overflow)?, + ) % n; let random = n - .checked_mul(d).ok_or(OffchainErr::Overflow)? - .checked_add(random).ok_or(OffchainErr::Overflow)?; + .checked_mul(d) + .ok_or(OffchainErr::Overflow)? + .checked_add(random) + .ok_or(OffchainErr::Overflow)?; if repeat_filter.contains(&random) { - continue; + continue } repeat_filter.push(random); *elem = random; seed = seed.checked_add(1).ok_or(OffchainErr::Overflow)?; - break; + break } seed_multi = seed_multi.checked_add(10000).ok_or(OffchainErr::Overflow)?; } let total_reward: u128 = T::MinerControl::get_reward() / 6; - let snap_shot = NetSnapShot::>{ + let snap_shot = NetSnapShot::> { start: now, life: max_life.into(), total_reward, total_idle_space, total_service_space, - random_index_list: random_index_list.try_into().map_err(|_| OffchainErr::GenerateInfoError)?, + random_index_list: random_index_list + .try_into() + .map_err(|_| OffchainErr::GenerateInfoError)?, random_list: random_list.try_into().map_err(|_| OffchainErr::GenerateInfoError)?, space_challenge_param, }; - Ok( ChallengeInfo::{ net_snap_shot: snap_shot, miner_snapshot_list: miner_list } ) + Ok(ChallengeInfo:: { net_snap_shot: snap_shot, miner_snapshot_list: miner_list }) } // Ensure that the length is not 0 - fn random_select_miner(need: u32, length: u32, valid_index_list: &Vec, seed: u32) -> Vec { + fn random_select_miner( + need: u32, + length: u32, + valid_index_list: &Vec, + seed: u32, + ) -> Vec { let mut miner_index_list: Vec = Default::default(); let mut seed: u32 = seed.saturating_mul(5000); - // In theory, unless the number of registered miners reaches 400 million, there is no possibility of overflow. - while (miner_index_list.len() as u32) < need && ((valid_index_list.len() + miner_index_list.len()) as u32 != length) { + // In theory, unless the number of registered miners reaches 400 million, there is no + // possibility of overflow. + while (miner_index_list.len() as u32) < need && + ((valid_index_list.len() + miner_index_list.len()) as u32 != length) + { seed += 1; let index = Self::random_number(seed); let index: u32 = (index % length as u64) as u32; if valid_index_list.contains(&index) { - continue; + continue } if !miner_index_list.contains(&index) { @@ -1391,21 +1524,20 @@ pub mod pallet { authority_id: T::AuthorityId, challenge_info: ChallengeInfo, ) -> Result<(), OffchainErr> { - let (signature, digest) = Self::offchain_sign_digest(now, &authority_id)?; let call = Call::save_challenge_info { - challenge_info, - seg_digest: digest, - signature: signature, - key: authority_id, - }; - + challenge_info, + seg_digest: digest, + signature, + key: authority_id, + }; + let result = SubmitTransaction::>::submit_unsigned_transaction(call.into()); if let Err(e) = result { log::error!("{:?}", e); - return Err(OffchainErr::SubmitTransactionFailed); + return Err(OffchainErr::SubmitTransactionFailed) } Ok(()) @@ -1414,20 +1546,26 @@ pub mod pallet { fn offchain_sign_digest( now: BlockNumberOf, authority_id: &T::AuthorityId, - ) -> Result< (<::AuthorityId as sp_runtime::RuntimeAppPublic>::Signature, SegDigest::>), OffchainErr> { - + ) -> Result< + ( + <::AuthorityId as sp_runtime::RuntimeAppPublic>::Signature, + SegDigest>, + ), + OffchainErr, + > { let network_state = sp_io::offchain::network_state().map_err(|_| OffchainErr::NetworkState)?; let author_len = Keys::::get().len(); - let digest = SegDigest::>{ + let digest = SegDigest::> { validators_len: author_len as u32, block_num: now, network_state, }; - let signature = authority_id.sign(&digest.encode()).ok_or(OffchainErr::FailedSigning)?; + let signature = + authority_id.sign(&digest.encode()).ok_or(OffchainErr::FailedSigning)?; Ok((signature, digest)) } @@ -1468,20 +1606,19 @@ pub mod pallet { .expect("secure hashes should always be bigger than u32; qed"); let random_vec = random_seed.as_bytes().to_vec(); if random_vec.len() >= 20 { - return random_vec[0..20].try_into().unwrap(); + return random_vec[0..20].try_into().unwrap() } } } fn check_idle_verify_param( - mut idle_result: bool, - front: u64, - rear: u64, + mut idle_result: bool, + front: u64, + rear: u64, total_prove_hash: &BoundedVec, - accumulator: &Accumulator, + accumulator: &Accumulator, miner_info: &IdleProveInfo, ) -> bool { - if accumulator != &miner_info.snap_shot.space_proof_info.accumulator { idle_result = false } @@ -1545,4 +1682,4 @@ impl OneSessionHandler for Pallet { fn on_disabled(_i: u32) { // ignore } -} \ No newline at end of file +} diff --git a/c-pallets/audit/src/types.rs b/c-pallets/audit/src/types.rs index 934a20ec..afba3d5a 100644 --- a/c-pallets/audit/src/types.rs +++ b/c-pallets/audit/src/types.rs @@ -7,7 +7,8 @@ use super::*; #[codec(mel_bound())] pub struct ChallengeInfo { pub(super) net_snap_shot: NetSnapShot>, - pub(super) miner_snapshot_list: BoundedVec, BlockNumberOf>, T::ChallengeMinerMax>, + pub(super) miner_snapshot_list: + BoundedVec, BlockNumberOf>, T::ChallengeMinerMax>, } #[derive(PartialEq, Eq, Encode, Decode, Clone, RuntimeDebug, MaxEncodedLen, TypeInfo)] @@ -26,7 +27,7 @@ pub struct NetSnapShot { pub struct MinerSnapShot { pub(super) miner: AccountId, pub(super) idle_life: Block, - pub(super) service_life: Block, + pub(super) service_life: Block, pub(super) idle_space: u128, pub(super) service_space: u128, pub(super) idle_submitted: bool, @@ -90,4 +91,4 @@ pub struct SegDigest { pub struct QElement { pub(super) random_index_list: BoundedVec>, pub(super) random_list: BoundedVec<[u8; 20], ConstU32<1024>>, -} \ No newline at end of file +} From 07e89a0eb57680b53a63d911e7d17e16692be9d1 Mon Sep 17 00:00:00 2001 From: Yeou Date: Wed, 11 Oct 2023 11:11:36 +0800 Subject: [PATCH 3/3] fix module name in README and comments --- c-pallets/audit/README.md | 4 ++-- c-pallets/audit/src/lib.rs | 4 ++-- docs/designs-of-storage-mining.md | 18 +++++++++--------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/c-pallets/audit/README.md b/c-pallets/audit/README.md index 08d799d1..10ad2267 100644 --- a/c-pallets/audit/README.md +++ b/c-pallets/audit/README.md @@ -1,9 +1,9 @@ -# Segment Book Module +# Audit Module This file is the exclusive pallet of cess and the proof of podr2 adaptation ## OverView -The job of this segment Book pallet is to process the proof of miner's service file and filling file, and generate random challenges. Call some traits of Smith pallet to punish miners. Call the trail of file bank pallet to obtain random files or files with problems in handling challenges. +The job of this aduit pallet is to process the proof of miner's service file and filling file, and generate random challenges. Call some traits of Smith pallet to punish miners. Call the trail of file bank pallet to obtain random files or files with problems in handling challenges. ### Terminology diff --git a/c-pallets/audit/src/lib.rs b/c-pallets/audit/src/lib.rs index 96c6a481..c45eb8a9 100644 --- a/c-pallets/audit/src/lib.rs +++ b/c-pallets/audit/src/lib.rs @@ -1,10 +1,10 @@ -//! # Segemnt Book Module +//! # Audit Module //! //! This file is the exclusive pallet of cess and the proof of podr2 adaptation //! //! ## OverView //! -//! The job of this segment Book pallet is to process the proof of miner's service file and filling +//! The job of this audit pallet is to process the proof of miner's service file and filling //! file, and generate random challenges. Call some traits of Smith pallet to punish miners. //! Call the trail of file bank pallet to obtain random files or files with problems in handling //! challenges. diff --git a/docs/designs-of-storage-mining.md b/docs/designs-of-storage-mining.md index 8dbb10d0..60c27db8 100644 --- a/docs/designs-of-storage-mining.md +++ b/docs/designs-of-storage-mining.md @@ -1,6 +1,6 @@ # Designs of Storage Mining -Segment book is the interface about proof of storage, which mainly deals with the proofs submission and verification of the data segments. +Audit is the interface about proof of storage, which mainly deals with the proofs submission and verification of the data segments. ![Image](https://raw.githubusercontent.com/CESSProject/W3F-illustration/main/cess-v0.1.1/Substrate-Node-Template-2.png) @@ -13,16 +13,16 @@ Segment book is the interface about proof of storage, which mainly deals with th ``` //pool of the proof of replication(PoRep) ready to verify which is generated by idle segment -VerPoolA = StorageDoubleMap +VerPoolA = StorageDoubleMap //pool of the proof of space time(PoSt) ready to verify which is generated by idle segment -VerPoolB = StorageDoubleMap +VerPoolB = StorageDoubleMap //pool of PoRep verified which is generated by idle segment -PrePoolA = StorageDoubleMap +PrePoolA = StorageDoubleMap //pool of PoSt verified which is generated by idle segment -PrePoolB = StorageDoubleMap +PrePoolB = StorageDoubleMap //count the total block height of all idle segments of miner BlockNumberB = StorageMap @@ -124,16 +124,16 @@ begin with input(sender, peer_id, segment_id, result): ``` //pool of PoRep ready to verify which is generated by service segment -VerPoolC = StorageDoubleMap +VerPoolC = StorageDoubleMap //pool of PoSt ready to verify which is generated by service segment -VerPoolD = StorageDoubleMap +VerPoolD = StorageDoubleMap //pool of PoRep verified which is generated by service segment -PrePoolC = StorageDoubleMap +PrePoolC = StorageDoubleMap //pool of PoSt verified which is generated by service segment -PrePoolD = StorageDoubleMap +PrePoolD = StorageDoubleMap //count the total block height of all service segments of miner BlockNumberD = StorageMap