Skip to content

Commit

Permalink
fmt
Browse files Browse the repository at this point in the history
  • Loading branch information
Agusrodri committed Jan 31, 2024
1 parent 151a24e commit 64e9c66
Show file tree
Hide file tree
Showing 3 changed files with 82 additions and 63 deletions.
40 changes: 19 additions & 21 deletions client/consensus/src/collators.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ use sc_consensus_aura::standalone as aura_internal;
use sp_api::ProvideRuntimeApi;
use sp_application_crypto::{AppCrypto, AppPublic};
use sp_consensus::BlockOrigin;
use sp_consensus_aura::{AuraApi, Slot, SlotDuration, digests::CompatibleDigestItem};
use sp_consensus_aura::{digests::CompatibleDigestItem, AuraApi, Slot, SlotDuration};
use sp_core::crypto::{ByteArray, Pair, Public};
use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider};
use sp_keystore::{Keystore, KeystorePtr};
Expand All @@ -55,7 +55,7 @@ use sp_timestamp::Timestamp;
use std::{convert::TryFrom, error::Error, time::Duration};

/// Parameters for instantiating a [`Collator`].
pub struct Params<BI, CIDP, RClient, Proposer, CS, /*GOH*/> {
pub struct Params<BI, CIDP, RClient, Proposer, CS /*GOH*/> {
/// A builder for inherent data builders.
pub create_inherent_data_providers: CIDP,
//pub get_authorities_from_orchestrator: GOH,
Expand All @@ -76,7 +76,7 @@ pub struct Params<BI, CIDP, RClient, Proposer, CS, /*GOH*/> {

/// A utility struct for writing collation logic that makes use of Aura entirely
/// or in part. See module docs for more details.
pub struct Collator<Block, P, BI, CIDP, RClient, Proposer, CS, /*GOH*/> {
pub struct Collator<Block, P, BI, CIDP, RClient, Proposer, CS /*GOH*/> {
create_inherent_data_providers: CIDP,
//get_authorities_from_orchestrator: GOH,
block_import: BI,
Expand All @@ -88,13 +88,13 @@ pub struct Collator<Block, P, BI, CIDP, RClient, Proposer, CS, /*GOH*/> {
_marker: std::marker::PhantomData<(Block, Box<dyn Fn(P) + Send + Sync + 'static>)>,
}

impl<Block, P, BI, CIDP, RClient, Proposer, CS, /*GOH*/>
Collator<Block, P, BI, CIDP, RClient, Proposer, CS, /*GOH*/>
impl<Block, P, BI, CIDP, RClient, Proposer, CS /*GOH*/>
Collator<Block, P, BI, CIDP, RClient, Proposer, CS /*GOH*/>
where
Block: BlockT,
RClient: RelayChainInterface,
CIDP: CreateInherentDataProviders<Block, (PHash, PersistedValidationData)> + 'static,
/* GOH: RetrieveAuthoritiesFromOrchestrator<
/* GOH: RetrieveAuthoritiesFromOrchestrator<
Block,
(PHash, PersistedValidationData),
Vec<AuthorityId<P>>,
Expand All @@ -107,7 +107,7 @@ where
P::Signature: TryFrom<Vec<u8>> + Member + Codec,
{
/// Instantiate a new instance of the `Aura` manager.
pub fn new(params: Params<BI, CIDP, RClient, Proposer, CS, /*GOH*/>) -> Self {
pub fn new(params: Params<BI, CIDP, RClient, Proposer, CS /*GOH*/>) -> Self {
Collator {
create_inherent_data_providers: params.create_inherent_data_providers,
//get_authorities_from_orchestrator: params.get_authorities_from_orchestrator,
Expand Down Expand Up @@ -254,7 +254,7 @@ where
}
}

fn pre_digest_data<P: Pair>(slot: Slot, claim: P::Public) -> Vec<sp_runtime::DigestItem>
fn pre_digest_data<P: Pair>(slot: Slot, claim: P::Public) -> Vec<sp_runtime::DigestItem>
where
P::Public: Codec,
P::Signature: Codec,
Expand All @@ -281,7 +281,7 @@ impl<Pub: Clone> SlotClaim<Pub> {
///
/// This does not check whether the author actually owns the slot or the timestamp
/// falls within the slot.
pub fn unchecked<P>(author_pub: Pub, slot: Slot, /*timestamp: Timestamp*/) -> Self
pub fn unchecked<P>(author_pub: Pub, slot: Slot /*timestamp: Timestamp*/) -> Self
where
P: Pair<Public = Pub>,
P::Public: Codec,
Expand All @@ -308,7 +308,7 @@ impl<Pub: Clone> SlotClaim<Pub> {
// TODO: do we need this timestamp?
// Get the timestamp corresponding to the relay-chain slot this claim was
// generated against.
/* pub fn timestamp(&self) -> Timestamp {
/* pub fn timestamp(&self) -> Timestamp {
self.timestamp
} */
}
Expand All @@ -335,10 +335,10 @@ where
P::Signature: Codec,
{
// load authorities
/* let authorities = client
.runtime_api()
.authorities(parent_hash)
.map_err(Box::new)?; */
/* let authorities = client
.runtime_api()
.authorities(parent_hash)
.map_err(Box::new)?; */

/* let authorities_v2 = crate::authorities::<B, C, P>(
client_set_aside_for_orch.as_ref(),
Expand All @@ -347,7 +347,7 @@ where
); */

// Determine the current slot and timestamp based on the relay-parent's.
/* let (slot_now, timestamp) = match consensus_common::relay_slot_and_timestamp(
/* let (slot_now, timestamp) = match consensus_common::relay_slot_and_timestamp(
relay_parent_header,
relay_chain_slot_duration,
) {
Expand Down Expand Up @@ -386,9 +386,9 @@ where
/// This returns `None` if the slot author is not locally controlled, and `Some` if it is,
/// with the public key of the slot author.
pub async fn claim_slot_inner<P: Pair>(
slot: Slot,
authorities: &Vec<AuthorityId<P>>,
keystore: &KeystorePtr,
slot: Slot,
authorities: &Vec<AuthorityId<P>>,
keystore: &KeystorePtr,
force_authoring: bool,
) -> Option<P::Public> {
let expected_author = crate::slot_author::<P>(slot, authorities.as_slice());
Expand All @@ -407,9 +407,7 @@ pub async fn claim_slot_inner<P: Pair>(
else {
authorities
.iter()
.find(|key| {
keystore.has_keys(&[(key.to_raw_vec(), NIMBUS_KEY_ID)])
})
.find(|key| keystore.has_keys(&[(key.to_raw_vec(), NIMBUS_KEY_ID)]))
.cloned()
}
}
Expand Down
48 changes: 32 additions & 16 deletions client/consensus/src/collators/basic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,10 @@ use cumulus_client_collator::{
};
use cumulus_client_consensus_common::ParachainBlockImportMarker;
use cumulus_client_consensus_proposer::ProposerInterface;
use cumulus_primitives_core::{relay_chain::{BlockId as RBlockId, Hash as PHash}, CollectCollationInfo, PersistedValidationData};
use cumulus_primitives_core::{
relay_chain::{BlockId as RBlockId, Hash as PHash},
CollectCollationInfo, PersistedValidationData,
};
use cumulus_relay_chain_interface::RelayChainInterface;
use parity_scale_codec::{Codec, Decode};

Expand Down Expand Up @@ -84,7 +87,10 @@ where
//TODO: re-check and analyze what to add here.
//Client::Api: TanssiAuthorityAssignmentApi<Block, P::Public> + CollectCollationInfo<Block>,
RClient: RelayChainInterface + Send + Clone + 'static,
CIDP: CreateInherentDataProviders<Block, (PHash, PersistedValidationData)> + Send + 'static + Clone,
CIDP: CreateInherentDataProviders<Block, (PHash, PersistedValidationData)>
+ Send
+ 'static
+ Clone,
CIDP::InherentDataProviders: Send + InherentDataProviderExt,
BI: BlockImport<Block> + ParachainBlockImportMarker + Send + Sync + 'static,
SO: SyncOracle + Send + Sync + Clone + 'static,
Expand All @@ -93,7 +99,14 @@ where
P: Pair,
P::Public: AppPublic + Member + Codec,
P::Signature: TryFrom<Vec<u8>> + Member + Codec,
GOH: RetrieveAuthoritiesFromOrchestrator<Block,(PHash, PersistedValidationData),Vec<AuthorityId<P>>,> + 'static + Sync + Send,
GOH: RetrieveAuthoritiesFromOrchestrator<
Block,
(PHash, PersistedValidationData),
Vec<AuthorityId<P>>,
>
+ 'static
+ Sync
+ Send,
{
async move {
let mut collation_requests = match params.collation_request_receiver {
Expand Down Expand Up @@ -173,18 +186,22 @@ where
(relay_parent_header.hash(), validation_data.clone()),
)
.await
{
Err(e) => reject_with_error!(e),
Ok(h) => h,
};

let inherent_providers = match params.create_inherent_data_providers
.create_inherent_data_providers(parent_hash.clone(), (*request.relay_parent(), validation_data.clone()))
{
Err(e) => reject_with_error!(e),
Ok(h) => h,
};

let inherent_providers = match params
.create_inherent_data_providers
.create_inherent_data_providers(
parent_hash.clone(),
(*request.relay_parent(), validation_data.clone()),
)
.await
{
Err(e) => reject_with_error!(e),
Ok(h) => h,
};
{
Err(e) => reject_with_error!(e),
Ok(h) => h,
};

let claim = match collator_util::tanssi_claim_slot::<P>(
//&*params.para_client,
Expand All @@ -203,7 +220,7 @@ where
Err(e) => reject_with_error!(e),
Ok(h) => h,
};
/* .map_err(|e| {
/* .map_err(|e| {
tracing::error!(
target: LOG_TARGET,
error = ?e,
Expand All @@ -212,7 +229,6 @@ where
})
.ok()?; */


let (parachain_inherent_data, other_inherent_data) = try_request!(
collator
.create_inherent_data(
Expand Down
57 changes: 31 additions & 26 deletions node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,9 @@ use {
},
cumulus_client_consensus_proposer::Proposer,
cumulus_client_pov_recovery::{PoVRecovery, RecoveryDelayRange},
cumulus_client_service::{prepare_node_config, start_relay_chain_tasks, StartRelayChainTasksParams, DARecoveryProfile},
cumulus_client_service::{
prepare_node_config, start_relay_chain_tasks, DARecoveryProfile, StartRelayChainTasksParams,
},
cumulus_primitives_core::{
relay_chain::{well_known_keys as RelayWellKnownKeys, CollatorPair, Hash as PHash},
ParaId,
Expand Down Expand Up @@ -66,7 +68,10 @@ use {
sc_executor::NativeElseWasmExecutor,
sc_network::NetworkBlock,
sc_network_sync::SyncingService,
sc_service::{Configuration, TFullBackend, TFullClient, SpawnTaskHandle, SpawnEssentialTaskHandle, TaskManager},
sc_service::{
Configuration, SpawnEssentialTaskHandle, SpawnTaskHandle, TFullBackend, TFullClient,
TaskManager,
},
sc_telemetry::TelemetryHandle,
sp_api::StorageProof,
sp_consensus::SyncOracle,
Expand All @@ -76,9 +81,10 @@ use {
sp_state_machine::{Backend as StateBackend, StorageValue},
std::{future::Future, pin::Pin, sync::Arc, time::Duration},
substrate_prometheus_endpoint::Registry,
tc_consensus::{BuildOrchestratorAuraConsensusParams, OrchestratorAuraConsensus, collators::basic::{
self as basic_tanssi_aura, Params as BasicTanssiAuraParams,
}},
tc_consensus::{
collators::basic::{self as basic_tanssi_aura, Params as BasicTanssiAuraParams},
BuildOrchestratorAuraConsensusParams, OrchestratorAuraConsensus,
},
tokio::sync::mpsc::{unbounded_channel, UnboundedSender},
};

Expand Down Expand Up @@ -321,21 +327,21 @@ async fn start_node_impl(
let (mut node_builder, import_queue_service) = node_builder.extract_import_queue_service();

start_relay_chain_tasks(StartRelayChainTasksParams {
client: node_builder.client.clone(),
announce_block: announce_block.clone(),
para_id,
relay_chain_interface: relay_chain_interface.clone(),
task_manager: &mut node_builder.task_manager,
da_recovery_profile: if validator {
DARecoveryProfile::Collator
} else {
DARecoveryProfile::FullNode
},
import_queue: import_queue_service,
relay_chain_slot_duration,
recovery_handle: Box::new(overseer_handle.clone()),
sync_service: node_builder.network.sync_service.clone(),
})?;
client: node_builder.client.clone(),
announce_block: announce_block.clone(),
para_id,
relay_chain_interface: relay_chain_interface.clone(),
task_manager: &mut node_builder.task_manager,
da_recovery_profile: if validator {
DARecoveryProfile::Collator
} else {
DARecoveryProfile::FullNode
},
import_queue: import_queue_service,
relay_chain_slot_duration,
recovery_handle: Box::new(overseer_handle.clone()),
sync_service: node_builder.network.sync_service.clone(),
})?;

if validator {
let collator_key = collator_key
Expand All @@ -354,7 +360,7 @@ async fn start_node_impl(
);
}

/* let parachain_consensus = build_consensus_orchestrator(
/* let parachain_consensus = build_consensus_orchestrator(
node_builder.client.clone(),
block_import.clone(),
node_builder.prometheus_registry.as_ref(),
Expand Down Expand Up @@ -391,9 +397,9 @@ async fn start_node_impl(
let para_id_clone = para_id.clone();
let overseer = overseer_handle.clone();
let announce_block_clone = announce_block.clone();

// TODO: change for async backing
collate_on_tanssi = Some(move || async move {
collate_on_tanssi = Some(move || async move {
//#[allow(deprecated)]
//cumulus_client_collator::start_collator(params_generator()).await;
start_consensus_orchestrator(
Expand All @@ -413,7 +419,8 @@ async fn start_node_impl(
collator_key_clone,
overseer,
announce_block_clone,
).expect("Start consensus should succeed");
)
.expect("Start consensus should succeed");
});

start_consensus_orchestrator(
Expand Down Expand Up @@ -727,7 +734,6 @@ fn start_consensus_container(
overseer_handle: OverseerHandle,
announce_block: Arc<dyn Fn(Hash, Option<Vec<u8>>) + Send + Sync>,
) -> Result<(), sc_service::Error> {

let slot_duration = cumulus_client_consensus_aura::slot_duration(&*orchestrator_client)?;

let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
Expand Down Expand Up @@ -874,7 +880,6 @@ fn start_consensus_orchestrator(
overseer_handle: OverseerHandle,
announce_block: Arc<dyn Fn(Hash, Option<Vec<u8>>) + Send + Sync>,
) -> Result<(), sc_service::Error> {

//impl Future<Output = ()> + Send + 'static

let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?;
Expand Down

0 comments on commit 64e9c66

Please sign in to comment.