>,
- slot_duration: SlotDuration,
- _phantom: PhantomData,
-}
-
-impl Clone for OrchestratorAuraConsensus {
- fn clone(&self) -> Self {
- Self {
- create_inherent_data_providers: self.create_inherent_data_providers.clone(),
- get_authorities_from_orchestrator: self.get_authorities_from_orchestrator.clone(),
- aura_worker: self.aura_worker.clone(),
- slot_duration: self.slot_duration,
- _phantom: PhantomData,
- }
- }
-}
-
-/// Build the tanssi aura worker.
-///
-/// The caller is responsible for running this worker, otherwise it will do nothing.
-pub fn build_orchestrator_aura_worker(
- BuildOrchestratorAuraWorkerParams {
- client,
- block_import,
- proposer_factory,
- sync_oracle,
- justification_sync_link,
- backoff_authoring_blocks,
- keystore,
- block_proposal_slot_portion,
- max_block_proposal_slot_portion,
- telemetry,
- force_authoring,
- compatibility_mode,
- }: BuildOrchestratorAuraWorkerParams>,
-) -> impl TanssiSlotWorker<
- B,
- Proposer = PF::Proposer,
- BlockImport = I,
- SyncOracle = SO,
- JustificationSyncLink = L,
- Claim = P::Public,
- AuxData = Vec>,
->
-where
- B: BlockT,
- C: ProvideRuntimeApi + BlockOf + AuxStore + HeaderBackend + Send + Sync,
- AuthorityId: From<::Public>,
- PF: Environment + Send + Sync + 'static,
- PF::Proposer: Proposer,
- P: Pair + Send + Sync,
- P::Public: AppPublic + Hash + Member + Encode + Decode,
- P::Signature: TryFrom> + Hash + Member + Encode + Decode,
- I: BlockImport + Send + Sync + 'static,
- Error: std::error::Error + Send + From + 'static,
- SO: SyncOracle + Send + Sync + Clone,
- L: sc_consensus::JustificationSyncLink,
- BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static,
-{
- OrchestratorAuraWorker {
- client,
- block_import,
- env: proposer_factory,
- keystore,
- sync_oracle,
- justification_sync_link,
- force_authoring,
- backoff_authoring_blocks,
- telemetry,
- block_proposal_slot_portion,
- max_block_proposal_slot_portion,
- compatibility_mode,
- _key_type: PhantomData::,
- }
-}
-
-/// Parameters of [`OrchestratorAuraConsensus::build`].
-pub struct BuildOrchestratorAuraConsensusParams {
- pub proposer_factory: PF,
- pub create_inherent_data_providers: CIDP,
- pub get_authorities_from_orchestrator: GOH,
- pub block_import: BI,
- pub para_client: Arc,
- pub backoff_authoring_blocks: Option,
- pub sync_oracle: SO,
- pub keystore: KeystorePtr,
- pub force_authoring: bool,
- pub slot_duration: SlotDuration,
- pub telemetry: Option,
- pub block_proposal_slot_portion: SlotProportion,
- pub max_block_proposal_slot_portion: Option,
-}
-
-impl OrchestratorAuraConsensus
-where
- B: BlockT,
- CIDP: CreateInherentDataProviders + 'static,
- GOH: 'static + Sync + Send,
- CIDP::InherentDataProviders: InherentDataProviderExt,
-{
- /// Create a new boxed instance of AURA consensus.
- pub fn build(
- BuildOrchestratorAuraConsensusParams {
- proposer_factory,
- create_inherent_data_providers,
- get_authorities_from_orchestrator,
- block_import,
- para_client,
- backoff_authoring_blocks,
- sync_oracle,
- keystore,
- force_authoring,
- slot_duration,
- telemetry,
- block_proposal_slot_portion,
- max_block_proposal_slot_portion,
- }: BuildOrchestratorAuraConsensusParams,
- ) -> Box>
- where
- Client:
- ProvideRuntimeApi + BlockOf + AuxStore + HeaderBackend + Send + Sync + 'static,
- AuthorityId: From<::Public>,
- BI: BlockImport + Send + Sync + 'static,
- SO: SyncOracle + Send + Sync + Clone + 'static,
- BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static,
- PF: Environment + Send + Sync + 'static,
- PF::Proposer: Proposer<
- B,
- Error = Error,
- ProofRecording = EnableProofRecording,
- Proof = ::Proof,
- >,
- Error: std::error::Error + Send + From + 'static,
- P: Pair + Send + Sync + 'static,
- P::Public: AppPublic + Hash + Member + Encode + Decode,
- P::Signature: TryFrom> + Hash + Member + Encode + Decode,
- GOH: RetrieveAuthoritiesFromOrchestrator<
- B,
- (PHash, PersistedValidationData),
- Vec>,
- > + 'static,
- {
- let worker = build_orchestrator_aura_worker::(
- BuildOrchestratorAuraWorkerParams {
- client: para_client,
- block_import,
- justification_sync_link: (),
- proposer_factory,
- sync_oracle,
- force_authoring,
- backoff_authoring_blocks,
- keystore,
- telemetry,
- block_proposal_slot_portion,
- max_block_proposal_slot_portion,
- compatibility_mode: sc_consensus_aura::CompatibilityMode::None,
- },
- );
-
- Box::new(OrchestratorAuraConsensus {
- create_inherent_data_providers: Arc::new(create_inherent_data_providers),
- get_authorities_from_orchestrator: Arc::new(get_authorities_from_orchestrator),
- aura_worker: Arc::new(Mutex::new(worker)),
- slot_duration,
- _phantom: PhantomData,
- })
- }
-}
-
-impl OrchestratorAuraConsensus
-where
- B: BlockT,
- CIDP: CreateInherentDataProviders + 'static,
- CIDP::InherentDataProviders: InherentDataProviderExt,
- GOH: RetrieveAuthoritiesFromOrchestrator
- + 'static,
- W: TanssiSlotWorker + Send + Sync,
-{
- /// Create the inherent data.
- ///
- /// Returns the created inherent data and the inherent data providers used.
- async fn inherent_data(
- &self,
- parent: B::Hash,
- validation_data: &PersistedValidationData,
- relay_parent: PHash,
- ) -> Option {
- self.create_inherent_data_providers
- .create_inherent_data_providers(parent, (relay_parent, validation_data.clone()))
- .await
- .map_err(|e| {
- tracing::error!(
- target: LOG_TARGET,
- error = ?e,
- "Failed to create inherent data providers.",
- )
- })
- .ok()
- }
-}
-
-#[async_trait::async_trait]
-impl ParachainConsensus for OrchestratorAuraConsensus
-where
- B: BlockT,
- CIDP: CreateInherentDataProviders + Send + Sync + 'static,
- CIDP::InherentDataProviders: InherentDataProviderExt + Send,
- GOH: RetrieveAuthoritiesFromOrchestrator
- + 'static,
- W: TanssiSlotWorker + Send + Sync,
- W::Proposer: Proposer::Proof>,
-{
- async fn produce_candidate(
- &mut self,
- parent: &B::Header,
- relay_parent: PHash,
- validation_data: &PersistedValidationData,
- ) -> Option> {
- let inherent_data_providers = self
- .inherent_data(parent.hash(), validation_data, relay_parent)
- .await?;
-
- let header = self
- .get_authorities_from_orchestrator
- .retrieve_authorities_from_orchestrator(
- parent.hash(),
- (relay_parent, validation_data.clone()),
- )
- .await
- .map_err(|e| {
- tracing::error!(
- target: LOG_TARGET,
- error = ?e,
- "Failed to get orch head.",
- )
- })
- .ok()?;
-
- let info = SlotInfo::new(
- inherent_data_providers.slot(),
- Box::new(inherent_data_providers),
- self.slot_duration.as_duration(),
- parent.clone(),
- // Set the block limit to 50% of the maximum PoV size.
- //
- // TODO: If we got benchmarking that includes the proof size,
- // we should be able to use the maximum pov size.
- Some((validation_data.max_pov_size / 2) as usize),
- );
-
- let res = self
- .aura_worker
- .lock()
- .await
- .tanssi_on_slot(info, header)
- .await?;
-
- Some(ParachainCandidate {
- block: res.block,
- proof: res.storage_proof,
- })
- }
-}
-
-#[allow(dead_code)]
-struct OrchestratorAuraWorker {
- client: Arc,
- block_import: I,
- env: E,
- keystore: KeystorePtr,
- sync_oracle: SO,
- justification_sync_link: L,
- force_authoring: bool,
- backoff_authoring_blocks: Option,
- block_proposal_slot_portion: SlotProportion,
- max_block_proposal_slot_portion: Option,
- telemetry: Option,
- compatibility_mode: CompatibilityMode,
- _key_type: PhantomData,
-}
-
-#[async_trait::async_trait]
-impl sc_consensus_slots::SimpleSlotWorker
- for OrchestratorAuraWorker>
-where
- B: BlockT,
- C: ProvideRuntimeApi + BlockOf + HeaderBackend + Sync,
- AuthorityId: From<::Public>,
- E: Environment + Send + Sync,
- E::Proposer: Proposer,
- I: BlockImport + Send + Sync + 'static,
- P: Pair + Send + Sync,
- P::Public: AppPublic + Public + Member + Encode + Decode + Hash,
- P::Signature: TryFrom> + Member + Encode + Decode + Hash + Debug,
- SO: SyncOracle + Send + Clone + Sync,
- L: sc_consensus::JustificationSyncLink,
- BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static,
- Error: std::error::Error + Send + From + 'static,
-{
- type BlockImport = I;
- type SyncOracle = SO;
- type JustificationSyncLink = L;
- type CreateProposer =
- Pin> + Send + 'static>>;
- type Proposer = E::Proposer;
- type Claim = P::Public;
- type AuxData = Vec>;
-
- fn logging_target(&self) -> &'static str {
- "tanssi_aura"
- }
-
- fn block_import(&mut self) -> &mut Self::BlockImport {
- &mut self.block_import
- }
-
- fn aux_data(
- &self,
- _header: &B::Header,
- _slot: Slot,
- ) -> Result {
- Ok(Default::default())
- }
-
- fn authorities_len(&self, epoch_data: &Self::AuxData) -> Option {
- Some(epoch_data.len())
- }
-
- async fn claim_slot(
- &mut self,
- _header: &B::Header,
- slot: Slot,
- epoch_data: &Self::AuxData,
- ) -> Option {
- let expected_author = slot_author::(slot, epoch_data);
- // if not running with force-authoring, just do the usual slot check
- if !self.force_authoring {
- expected_author.and_then(|p| {
- if Keystore::has_keys(&*self.keystore, &[(p.to_raw_vec(), NIMBUS_KEY_ID)]) {
- Some(p.clone())
- } else {
- None
- }
- })
- }
- // if running with force-authoring, as long as you are in the authority set,
- // propose
- else {
- epoch_data
- .iter()
- .find(|key| {
- Keystore::has_keys(&*self.keystore, &[(key.to_raw_vec(), NIMBUS_KEY_ID)])
- })
- .cloned()
- }
- }
-
- // TODO: Where to put these info in the refactor?
- fn pre_digest_data(&self, slot: Slot, claim: &Self::Claim) -> Vec {
- vec![
- >::aura_pre_digest(slot),
- // We inject the nimbus digest as well. Crutial to be able to verify signatures
- ::nimbus_pre_digest(
- // TODO remove this unwrap through trait reqs
- nimbus_primitives::NimbusId::from_slice(claim.as_ref()).unwrap(),
- ),
- ]
- }
-
- async fn block_import_params(
- &self,
- header: B::Header,
- header_hash: &B::Hash,
- body: Vec,
- storage_changes: StorageChanges,
- public: Self::Claim,
- _epoch: Self::AuxData,
- ) -> Result, sp_consensus::Error> {
- // sign the pre-sealed hash of the block and then
- // add it to a digest item.
- let signature = Keystore::sign_with(
- &*self.keystore,
- as AppCrypto>::ID,
- as AppCrypto>::CRYPTO_ID,
- public.as_slice(),
- header_hash.as_ref(),
- )
- .map_err(|e| sp_consensus::Error::CannotSign(format!("{}. Key: {:?}", e, public)))?
- .ok_or_else(|| {
- sp_consensus::Error::CannotSign(format!(
- "Could not find key in keystore. Key: {:?}",
- public
- ))
- })?;
- let signature = signature
- .clone()
- .try_into()
- .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public.to_raw_vec()))?;
-
- let signature_digest_item =
- ::nimbus_seal(signature);
-
- let mut import_block = BlockImportParams::new(BlockOrigin::Own, header);
- import_block.post_digests.push(signature_digest_item);
- import_block.body = Some(body);
- import_block.state_action =
- StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes));
- import_block.fork_choice = Some(ForkChoiceStrategy::LongestChain);
-
- Ok(import_block)
- }
-
- fn force_authoring(&self) -> bool {
- self.force_authoring
- }
-
- fn should_backoff(&self, slot: Slot, chain_head: &B::Header) -> bool {
- if let Some(ref strategy) = self.backoff_authoring_blocks {
- if let Ok(chain_head_slot) = find_pre_digest::(chain_head) {
- return strategy.should_backoff(
- *chain_head.number(),
- chain_head_slot,
- self.client.info().finalized_number,
- slot,
- self.logging_target(),
- );
- }
- }
- false
- }
-
- fn sync_oracle(&mut self) -> &mut Self::SyncOracle {
- &mut self.sync_oracle
- }
-
- fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink {
- &mut self.justification_sync_link
- }
-
- fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer {
- self.env
- .init(block)
- .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e)))
- .boxed()
- }
-
- fn telemetry(&self) -> Option {
- self.telemetry.clone()
- }
-
- fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> std::time::Duration {
- let parent_slot = find_pre_digest::(&slot_info.chain_head).ok();
-
- sc_consensus_slots::proposing_remaining_duration(
- parent_slot,
- slot_info,
- &self.block_proposal_slot_portion,
- self.max_block_proposal_slot_portion.as_ref(),
- sc_consensus_slots::SlotLenienceType::Exponential,
- self.logging_target(),
- )
- }
-}
-
-/// Parameters of [`build_aura_worker`].
-pub struct BuildOrchestratorAuraWorkerParams {
- /// The client to interact with the chain.
- pub client: Arc,
- /// The block import.
- pub block_import: I,
- /// The proposer factory to build proposer instances.
- pub proposer_factory: PF,
- /// The sync oracle that can give us the current sync status.
- pub sync_oracle: SO,
- /// Hook into the sync module to control the justification sync process.
- pub justification_sync_link: L,
- /// Should we force the authoring of blocks?
- pub force_authoring: bool,
- /// The backoff strategy when we miss slots.
- pub backoff_authoring_blocks: Option,
- /// The keystore used by the node.
- pub keystore: KeystorePtr,
- /// The proportion of the slot dedicated to proposing.
- ///
- /// The block proposing will be limited to this proportion of the slot from the starting of the
- /// slot. However, the proposing can still take longer when there is some lenience factor
- /// applied, because there were no blocks produced for some slots.
- pub block_proposal_slot_portion: SlotProportion,
- /// The maximum proportion of the slot dedicated to proposing with any lenience factor applied
- /// due to no blocks being produced.
- pub max_block_proposal_slot_portion: Option,
- /// Telemetry instance used to report telemetry metrics.
- pub telemetry: Option,
- /// Compatibility mode that should be used.
- ///
- /// If in doubt, use `Default::default()`.
- pub compatibility_mode: CompatibilityMode,
-}
-
#[async_trait::async_trait]
pub trait RetrieveAuthoritiesFromOrchestrator: Send + Sync {
/// Create the inherent data providers at the given `parent` block using the given `extra_args`.
@@ -620,186 +73,3 @@ pub trait TanssiSlotWorker: SimpleSlotWorker {
aux_data: Self::AuxData,
) -> Option>::Proof>>;
}
-
-#[async_trait::async_trait]
-impl TanssiSlotWorker
- for OrchestratorAuraWorker>
-where
- B: BlockT,
- C: ProvideRuntimeApi + BlockOf + HeaderBackend + Sync,
- AuthorityId: From<::Public>,
- E: Environment + Send + Sync,
- E::Proposer: Proposer,
- I: BlockImport + Send + Sync + 'static,
- P: Pair + Send + Sync,
- P::Public: AppPublic + Public + Member + Encode + Decode + Hash,
- P::Signature: TryFrom> + Member + Encode + Decode + Hash + Debug,
- SO: SyncOracle + Send + Clone + Sync,
- L: sc_consensus::JustificationSyncLink,
- BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static,
- Error: std::error::Error + Send + From + 'static,
-{
- async fn tanssi_on_slot(
- &mut self,
- slot_info: SlotInfo,
- aux_data: Self::AuxData,
- ) -> Option>::Proof>>
- where
- Self: Sync,
- {
- let slot = slot_info.slot;
- let telemetry = self.telemetry();
- let logging_target = self.logging_target();
-
- let proposing_remaining_duration = self.proposing_remaining_duration(&slot_info);
-
- let end_proposing_at = if proposing_remaining_duration == Duration::default() {
- debug!(
- target: logging_target,
- "Skipping proposal slot {} since there's no time left to propose", slot,
- );
-
- return None;
- } else {
- Instant::now() + proposing_remaining_duration
- };
-
- self.notify_slot(&slot_info.chain_head, slot, &aux_data);
-
- let authorities_len = self.authorities_len(&aux_data);
-
- if !self.force_authoring()
- && self.sync_oracle().is_offline()
- && authorities_len.map(|a| a > 1).unwrap_or(false)
- {
- debug!(
- target: logging_target,
- "Skipping proposal slot. Waiting for the network."
- );
- telemetry!(
- telemetry;
- CONSENSUS_DEBUG;
- "slots.skipping_proposal_slot";
- "authorities_len" => authorities_len,
- );
-
- return None;
- }
-
- let claim = self
- .claim_slot(&slot_info.chain_head, slot, &aux_data)
- .await?;
-
- log::info!("claim valid for slot {:?}", slot);
-
- if self.should_backoff(slot, &slot_info.chain_head) {
- return None;
- }
-
- debug!(
- target: logging_target,
- "Starting authorship at slot: {slot}"
- );
-
- telemetry!(telemetry; CONSENSUS_DEBUG; "slots.starting_authorship"; "slot_num" => slot);
-
- let proposer = match self.proposer(&slot_info.chain_head).await {
- Ok(p) => p,
- Err(err) => {
- warn!(
- target: logging_target,
- "Unable to author block in slot {slot:?}: {err}"
- );
-
- telemetry!(
- telemetry;
- CONSENSUS_WARN;
- "slots.unable_authoring_block";
- "slot" => *slot,
- "err" => ?err
- );
-
- return None;
- }
- };
-
- let proposal = self
- .propose(proposer, &claim, slot_info, end_proposing_at)
- .await?;
-
- let (block, storage_proof) = (proposal.block, proposal.proof);
- let (header, body) = block.deconstruct();
- let header_num = *header.number();
- let header_hash = header.hash();
- let parent_hash = *header.parent_hash();
-
- let block_import_params = match self
- .block_import_params(
- header,
- &header_hash,
- body.clone(),
- proposal.storage_changes,
- claim,
- aux_data,
- )
- .await
- {
- Ok(bi) => bi,
- Err(err) => {
- warn!(
- target: logging_target,
- "Failed to create block import params: {}", err
- );
-
- return None;
- }
- };
-
- info!(
- target: logging_target,
- "🔖 Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.",
- header_num,
- block_import_params.post_hash(),
- header_hash,
- );
-
- telemetry!(
- telemetry;
- CONSENSUS_INFO;
- "slots.pre_sealed_block";
- "header_num" => ?header_num,
- "hash_now" => ?block_import_params.post_hash(),
- "hash_previously" => ?header_hash,
- );
-
- let header = block_import_params.post_header();
- match self.block_import().import_block(block_import_params).await {
- Ok(res) => {
- res.handle_justification(
- &header.hash(),
- *header.number(),
- self.justification_sync_link(),
- );
- }
- Err(err) => {
- warn!(
- target: logging_target,
- "Error with block built on {:?}: {}", parent_hash, err,
- );
-
- telemetry!(
- telemetry;
- CONSENSUS_WARN;
- "slots.err_with_block_built_on";
- "hash" => ?parent_hash,
- "err" => ?err,
- );
- }
- }
-
- Some(SlotResult {
- block: B::new(header, body),
- storage_proof,
- })
- }
-}
diff --git a/client/consensus/src/lib.rs b/client/consensus/src/lib.rs
index 2691ebe0c..cc6929a11 100644
--- a/client/consensus/src/lib.rs
+++ b/client/consensus/src/lib.rs
@@ -28,10 +28,7 @@ mod manual_seal;
#[cfg(test)]
mod tests;
-pub use {
- consensus_orchestrator::{BuildOrchestratorAuraConsensusParams, OrchestratorAuraConsensus},
- sc_consensus_aura::CompatibilityMode,
-};
+pub use sc_consensus_aura::CompatibilityMode;
pub use {
cumulus_primitives_core::ParaId,
diff --git a/client/consensus/src/tests.rs b/client/consensus/src/tests.rs
index 1e9866ca7..57ccc861f 100644
--- a/client/consensus/src/tests.rs
+++ b/client/consensus/src/tests.rs
@@ -20,50 +20,34 @@
// https://github.com/paritytech/substrate/blob/master/client/consensus/aura/src/lib.rs#L832
// Most of the items hereby added are intended to make it work with our current consensus mechanism
use {
- crate::{
- consensus_orchestrator::{
- build_orchestrator_aura_worker, BuildOrchestratorAuraWorkerParams,
- },
- InherentDataProviderExt, LOG_TARGET,
- },
+ crate::{InherentDataProviderExt, LOG_TARGET},
cumulus_client_consensus_common::ParachainConsensus,
- cumulus_primitives_core::PersistedValidationData,
futures::prelude::*,
futures_timer::Delay,
- nimbus_primitives::{
- CompatibleDigestItem, NimbusId, NimbusPair, NIMBUS_ENGINE_ID, NIMBUS_KEY_ID,
- },
+ nimbus_primitives::{CompatibleDigestItem, NimbusId, NimbusPair, NIMBUS_ENGINE_ID},
parking_lot::Mutex,
sc_block_builder::BlockBuilderProvider,
- sc_client_api::{BlockchainEvents, HeaderBackend},
sc_consensus::{BoxJustificationImport, ForkChoiceStrategy},
- sc_consensus_aura::SlotProportion,
- sc_consensus_slots::{BackoffAuthoringOnFinalizedHeadLagging, SimpleSlotWorker, SlotInfo},
- sc_keystore::LocalKeystore,
+ sc_consensus_slots::SlotInfo,
sc_network_test::{Block as TestBlock, *},
sp_consensus::{
- BlockOrigin, EnableProofRecording, Environment, NoNetwork as DummyOracle, Proposal,
- Proposer, SelectChain, SyncOracle,
+ EnableProofRecording, Environment, Proposal, Proposer, SelectChain, SyncOracle,
},
- sp_consensus_aura::{inherents::InherentDataProvider, SlotDuration},
+ sp_consensus_aura::SlotDuration,
sp_consensus_slots::Slot,
- sp_core::{
- crypto::{ByteArray, Pair},
- H256,
- },
+ sp_core::crypto::{ByteArray, Pair},
sp_inherents::{CreateInherentDataProviders, InherentData},
sp_keyring::sr25519::Keyring,
- sp_keystore::Keystore,
sp_runtime::{
traits::{Block as BlockT, Header as _},
Digest, DigestItem,
},
- sp_timestamp::Timestamp,
- std::{sync::Arc, task::Poll, time::Duration},
+ std::{sync::Arc, time::Duration},
substrate_test_runtime_client::TestClient,
};
// Duration of slot time
+#[allow(unused)]
const SLOT_DURATION_MS: u64 = 1000;
type Error = sp_blockchain::Error;
@@ -367,6 +351,7 @@ where
}
}
/// Returns current duration since unix epoch.
+#[allow(unused)]
pub fn duration_now() -> Duration {
use std::time::SystemTime;
let now = SystemTime::now();
@@ -380,6 +365,7 @@ pub fn duration_now() -> Duration {
}
/// Returns the duration until the next slot from now.
+#[allow(unused)]
pub fn time_until_next_slot(slot_duration: Duration) -> Duration {
let now = duration_now().as_millis();
@@ -393,6 +379,8 @@ pub fn time_until_next_slot(slot_duration: Duration) -> Duration {
/// Every time a new slot is triggered, `parachain_block_producer.produce_candidate`
/// is called and the future it returns is
/// polled until completion, unless we are major syncing.
+/// TODO: refactor to use the new Tanssi Aura params
+#[allow(unused)]
pub async fn start_orchestrator_aura_consensus_candidate_producer(
slot_duration: SlotDuration,
client: C,
@@ -429,272 +417,6 @@ pub async fn start_orchestrator_aura_consensus_candidate_producer= &5)
- })
- .for_each(move |_| futures::future::ready(())),
- );
-
- let create_inherent_data_providers = |_, _| async {
- let slot = InherentDataProvider::from_timestamp_and_slot_duration(
- Timestamp::current(),
- SlotDuration::from_millis(SLOT_DURATION_MS),
- );
-
- Ok((slot,))
- };
-
- let sync_oracle = DummyOracle;
- let slot_duration = SlotDuration::from_millis(SLOT_DURATION_MS);
-
- let params = crate::BuildOrchestratorAuraConsensusParams {
- proposer_factory: environ,
- create_inherent_data_providers: |_, _| async {
- let slot = InherentDataProvider::from_timestamp_and_slot_duration(
- Timestamp::current(),
- SlotDuration::from_millis(SLOT_DURATION_MS),
- );
-
- Ok((slot,))
- },
- get_authorities_from_orchestrator: move |_block_hash: ::Hash,
- (_relay_parent, _validation_data): (
- H256,
- PersistedValidationData,
- )| {
- async move {
- let aux_data = vec![
- (Keyring::Alice).public().into(),
- (Keyring::Bob).public().into(),
- (Keyring::Charlie).public().into(),
- ];
- Ok(aux_data)
- }
- },
- block_import: client.clone(),
- para_client: client,
- sync_oracle: DummyOracle,
- keystore,
- force_authoring: false,
- backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()),
- slot_duration: SlotDuration::from_millis(SLOT_DURATION_MS),
- // We got around 500ms for proposing
- block_proposal_slot_portion: SlotProportion::new(0.5),
- max_block_proposal_slot_portion: None,
- telemetry: None,
- };
-
- let parachain_block_producer =
- crate::OrchestratorAuraConsensus::build::(params);
-
- aura_futures.push(start_orchestrator_aura_consensus_candidate_producer(
- slot_duration,
- select_chain,
- parachain_block_producer,
- sync_oracle,
- create_inherent_data_providers,
- ));
- }
-
- future::select(
- future::poll_fn(move |cx| {
- net.lock().poll(cx);
- Poll::<()>::Pending
- }),
- future::select(
- future::join_all(aura_futures),
- future::join_all(import_notifications),
- ),
- )
- .await;
-}
-
-// Checks node slot claim. Again for different slots, different authorities
-// should be able to claim
-#[tokio::test]
-async fn current_node_authority_should_claim_slot() {
- let net = AuraTestNet::new(4);
-
- let mut authorities: Vec = vec![
- Keyring::Alice.public().into(),
- Keyring::Bob.public().into(),
- Keyring::Charlie.public().into(),
- ];
-
- let keystore_path = tempfile::tempdir().expect("Creates keystore path");
- let keystore = LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore.");
-
- let public = keystore
- .sr25519_generate_new(NIMBUS_KEY_ID, None)
- .expect("Key should be created");
- authorities.push(public.into());
-
- let net = Arc::new(Mutex::new(net));
-
- let mut net = net.lock();
- let peer = net.peer(3);
- let client = peer.client().as_client();
- let environ = DummyFactory(client.clone());
-
- let mut worker =
- build_orchestrator_aura_worker::(
- BuildOrchestratorAuraWorkerParams {
- client: client.clone(),
- block_import: client,
- proposer_factory: environ,
- keystore: keystore.into(),
- sync_oracle: DummyOracle,
- justification_sync_link: (),
- force_authoring: false,
- backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()),
- telemetry: None,
- block_proposal_slot_portion: SlotProportion::new(0.5),
- max_block_proposal_slot_portion: None,
- compatibility_mode: Default::default(),
- },
- );
-
- let head = Header::new(
- 1,
- H256::from_low_u64_be(0),
- H256::from_low_u64_be(0),
- Default::default(),
- Default::default(),
- );
- assert!(worker
- .claim_slot(&head, 0.into(), &authorities)
- .await
- .is_none());
- assert!(worker
- .claim_slot(&head, 1.into(), &authorities)
- .await
- .is_none());
- assert!(worker
- .claim_slot(&head, 2.into(), &authorities)
- .await
- .is_none());
- assert!(worker
- .claim_slot(&head, 3.into(), &authorities)
- .await
- .is_some());
- assert!(worker
- .claim_slot(&head, 4.into(), &authorities)
- .await
- .is_none());
- assert!(worker
- .claim_slot(&head, 5.into(), &authorities)
- .await
- .is_none());
- assert!(worker
- .claim_slot(&head, 6.into(), &authorities)
- .await
- .is_none());
- assert!(worker
- .claim_slot(&head, 7.into(), &authorities)
- .await
- .is_some());
-}
-
-#[tokio::test]
-async fn on_slot_returns_correct_block() {
- let net = AuraTestNet::new(4);
-
- let keystore_path = tempfile::tempdir().expect("Creates keystore path");
- let keystore = LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore.");
- keystore
- .sr25519_generate_new(NIMBUS_KEY_ID, Some(&Keyring::Alice.to_seed()))
- .expect("Key should be created");
-
- let net = Arc::new(Mutex::new(net));
-
- let mut net = net.lock();
- let peer = net.peer(3);
- let client = peer.client().as_client();
- let environ = DummyFactory(client.clone());
-
- let mut worker =
- build_orchestrator_aura_worker::(
- BuildOrchestratorAuraWorkerParams {
- client: client.clone(),
- block_import: client.clone(),
- proposer_factory: environ,
- keystore: keystore.into(),
- sync_oracle: DummyOracle,
- justification_sync_link: (),
- force_authoring: false,
- backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()),
- telemetry: None,
- block_proposal_slot_portion: SlotProportion::new(0.5),
- max_block_proposal_slot_portion: None,
- compatibility_mode: Default::default(),
- },
- );
-
- let head = client.expect_header(client.info().genesis_hash).unwrap();
-
- use crate::consensus_orchestrator::TanssiSlotWorker;
- let res = worker
- .tanssi_on_slot(
- SlotInfo {
- slot: 0.into(),
- ends_at: std::time::Instant::now() + Duration::from_secs(100),
- create_inherent_data: Box::new(()),
- duration: Duration::from_millis(1000),
- chain_head: head,
- block_size_limit: None,
- },
- vec![
- (Keyring::Alice).public().into(),
- (Keyring::Bob).public().into(),
- (Keyring::Charlie).public().into(),
- ],
- )
- .await
- .unwrap();
-
- // The returned block should be imported and we should be able to get its header by now.
- assert!(client.header(res.block.hash()).unwrap().is_some());
-}
-
// Tests authorities are correctly returned and eligibility is correctly calculated
// thanks to the mocked runtime-apis
#[tokio::test]