Skip to content

Commit

Permalink
Implement on demand block production (#402)
Browse files Browse the repository at this point in the history
Allows registered parathreads to produce blocks every n slots, where n
is defined on chain
  • Loading branch information
tmpolaczyk authored Feb 14, 2024
1 parent ae5cc6a commit 6f48194
Show file tree
Hide file tree
Showing 16 changed files with 896 additions and 94 deletions.
55 changes: 55 additions & 0 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -545,6 +545,61 @@ jobs:
name: logs
path: logs

zombienet-tests-parathreads:
runs-on: ubuntu-latest
needs: ["set-tags", "build"]
steps:
- name: Checkout
uses: actions/checkout@v3
with:
ref: ${{ needs.set-tags.outputs.git_ref }}

- name: Pnpm
uses: pnpm/action-setup@v2
with:
version: 8

- name: Setup Node
uses: actions/setup-node@v3
with:
node-version: 20.x
cache: "pnpm"

- name: "Download binaries"
uses: actions/[email protected]
with:
name: binaries
path: target/release

- name: "Run zombie test"
run: |
chmod uog+x target/release/tanssi-node
chmod uog+x target/release/container-chain-template-simple-node
chmod uog+x target/release/container-chain-template-frontier-node
cd test
pnpm install
## Run tests
pnpm moonwall test zombie_tanssi_parathreads
- name: "Gather zombie logs"
if: failure()
run: |
ls -ltr /tmp
latest_zombie_dir=$(find /tmp -type d -iname "*zombie*" -printf '%T@ %p\n' | sort -n | tail -1 | cut -f2- -d" ")
logs_dir="logs"
mkdir -p "$logs_dir"
find "$latest_zombie_dir" -type f -name "*.log" -exec cp {} "$logs_dir" \;
- name: "Upload zombie logs"
if: failure()
uses: actions/[email protected]
with:
name: logs-parathreads
path: logs

zombienet-tests-rotation:
runs-on: self-hosted
needs: ["set-tags", "build"]
Expand Down
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions client/consensus/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ sp-timestamp = { workspace = true }
substrate-prometheus-endpoint = { workspace = true }

# Own
pallet-registrar-runtime-api = { workspace = true, features = [ "std" ] }
tp-consensus = { workspace = true, features = [ "std" ] }

# Cumulus dependencies
Expand Down
54 changes: 47 additions & 7 deletions client/consensus/src/collators.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ use parity_scale_codec::{Codec, Encode};
use polkadot_node_primitives::{Collation, MaybeCompressedPoV};
use polkadot_primitives::Id as ParaId;

use crate::AuthorityId;
use crate::{find_pre_digest, AuthorityId, OrchestratorAuraWorkerAuxData};
use futures::prelude::*;
use nimbus_primitives::{CompatibleDigestItem as NimbusCompatibleDigestItem, NIMBUS_KEY_ID};
use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction};
Expand All @@ -41,7 +41,7 @@ use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvid
use sp_keystore::{Keystore, KeystorePtr};
use sp_runtime::{
generic::Digest,
traits::{Block as BlockT, HashingFor, Header as HeaderT, Member},
traits::{Block as BlockT, HashingFor, Header as HeaderT, Member, Zero},
};
use sp_state_machine::StorageChanges;
use sp_timestamp::Timestamp;
Expand Down Expand Up @@ -87,7 +87,7 @@ where
BI: BlockImport<Block> + Send + Sync + 'static,
Proposer: ProposerInterface<Block>,
CS: CollatorServiceInterface<Block>,
P: Pair,
P: Pair + Send + Sync + 'static,
P::Public: AppPublic + Member,
P::Signature: TryFrom<Vec<u8>> + Member + Codec,
{
Expand Down Expand Up @@ -284,28 +284,68 @@ impl<Pub: Clone> SlotClaim<Pub> {
}

/// Attempt to claim a slot locally.
pub fn tanssi_claim_slot<P>(
authorities: Vec<AuthorityId<P>>,
pub fn tanssi_claim_slot<P, B>(
aux_data: OrchestratorAuraWorkerAuxData<P>,
chain_head: &B::Header,
slot: Slot,
force_authoring: bool,
keystore: &KeystorePtr,
) -> Result<Option<SlotClaim<P::Public>>, Box<dyn Error>>
where
P: Pair,
P: Pair + Send + Sync + 'static,
P::Public: Codec + std::fmt::Debug,
P::Signature: Codec,
B: BlockT,
{
let author_pub = {
let res = claim_slot_inner::<P>(slot, &authorities, keystore, force_authoring);
let res = claim_slot_inner::<P>(slot, &aux_data.authorities, keystore, force_authoring);
match res {
Some(p) => p,
None => return Ok(None),
}
};

if is_parathread_and_should_skip_slot::<P, B>(&aux_data, chain_head, slot) {
return Ok(None);
}

Ok(Some(SlotClaim::unchecked::<P>(author_pub, slot)))
}

/// Returns true if this container chain is a parathread and the collator should skip this slot and not produce a block
pub fn is_parathread_and_should_skip_slot<P, B>(
aux_data: &OrchestratorAuraWorkerAuxData<P>,
chain_head: &B::Header,
slot: Slot,
) -> bool
where
P: Pair + Send + Sync + 'static,
P::Public: Codec + std::fmt::Debug,
P::Signature: Codec,
B: BlockT,
{
if slot.is_zero() {
// Always produce on slot 0 (for tests)
return false;
}
if let Some(min_slot_freq) = aux_data.min_slot_freq {
if let Ok(chain_head_slot) = find_pre_digest::<B, P::Signature>(chain_head) {
let slot_diff = slot.saturating_sub(chain_head_slot);

// TODO: this doesn't take into account force authoring.
// So a node with `force_authoring = true` will not propose a block for a parathread until the
// `min_slot_freq` has elapsed.
slot_diff < min_slot_freq
} else {
// In case of error always propose
false
}
} else {
// Not a parathread: always propose
false
}
}

/// Attempt to claim a slot using a keystore.
pub fn claim_slot_inner<P: Pair>(
slot: Slot,
Expand Down
15 changes: 8 additions & 7 deletions client/consensus/src/collators/basic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,13 @@ use sp_keystore::KeystorePtr;
use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member};
use std::{convert::TryFrom, sync::Arc, time::Duration};

use crate::collators as collator_util;
use crate::{consensus_orchestrator::RetrieveAuthoritiesFromOrchestrator, AuthorityId};
use crate::consensus_orchestrator::RetrieveAuthoritiesFromOrchestrator;
use crate::{collators as collator_util, OrchestratorAuraWorkerAuxData};

/// Parameters for [`run`].
pub struct Params<BI, CIDP, Client, RClient, SO, Proposer, CS, GOH> {
pub create_inherent_data_providers: CIDP,
pub get_authorities_from_orchestrator: GOH,
pub get_orchestrator_aux_data: GOH,
pub block_import: BI,
pub para_client: Arc<Client>,
pub relay_client: RClient,
Expand Down Expand Up @@ -92,13 +92,13 @@ where
SO: SyncOracle + Send + Sync + Clone + 'static,
Proposer: ProposerInterface<Block> + Send + Sync + 'static,
CS: CollatorServiceInterface<Block> + Send + Sync + 'static,
P: Pair,
P: Pair + Sync + Send + 'static,
P::Public: AppPublic + Member + Codec,
P::Signature: TryFrom<Vec<u8>> + Member + Codec,
GOH: RetrieveAuthoritiesFromOrchestrator<
Block,
(PHash, PersistedValidationData),
Vec<AuthorityId<P>>,
OrchestratorAuraWorkerAuxData<P>,
>
+ 'static
+ Sync
Expand Down Expand Up @@ -177,7 +177,7 @@ where

// Retrieve authorities that are able to produce the block
let authorities = match params
.get_authorities_from_orchestrator
.get_orchestrator_aux_data
.retrieve_authorities_from_orchestrator(
parent_hash,
(relay_parent_header.hash(), validation_data.clone()),
Expand All @@ -200,8 +200,9 @@ where
Ok(h) => h,
};

let mut claim = match collator_util::tanssi_claim_slot::<P>(
let mut claim = match collator_util::tanssi_claim_slot::<P, Block>(
authorities,
&parent_header,
inherent_providers.slot(),
params.force_authoring,
&params.keystore,
Expand Down
11 changes: 11 additions & 0 deletions client/consensus/src/consensus_orchestrator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@
//! the ParachainConsensus trait to access the orchestrator-dicated authorities, and further
//! it implements the TanssiWorker to TanssiOnSlot trait. This trait is
use {
crate::AuthorityId,
crate::Pair,
crate::Slot,
sc_consensus_slots::{SimpleSlotWorker, SlotInfo, SlotResult},
sp_consensus::Proposer,
sp_runtime::traits::Block as BlockT,
Expand Down Expand Up @@ -60,6 +63,14 @@ where
}
}

pub struct OrchestratorAuraWorkerAuxData<P>
where
P: Pair + Send + Sync + 'static,
{
pub authorities: Vec<AuthorityId<P>>,
pub min_slot_freq: Option<Slot>,
}

#[async_trait::async_trait]
pub trait TanssiSlotWorker<B: BlockT>: SimpleSlotWorker<B> {
/// Called when a new slot is triggered.
Expand Down
26 changes: 26 additions & 0 deletions client/consensus/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ mod manual_seal;
#[cfg(test)]
mod tests;

pub use crate::consensus_orchestrator::OrchestratorAuraWorkerAuxData;
pub use sc_consensus_aura::CompatibilityMode;

pub use {
Expand All @@ -36,7 +37,9 @@ pub use {
get_aura_id_from_seed, ContainerManualSealAuraConsensusDataProvider,
OrchestratorManualSealAuraConsensusDataProvider,
},
pallet_registrar_runtime_api::OnDemandBlockProductionApi,
parity_scale_codec::{Decode, Encode},
sc_consensus_aura::find_pre_digest,
sc_consensus_aura::{slot_duration, AuraVerifier, BuildAuraWorkerParams, SlotProportion},
sc_consensus_slots::InherentDataProviderExt,
sp_api::{Core, ProvideRuntimeApi},
Expand Down Expand Up @@ -103,6 +106,29 @@ where
authorities
}

/// Return the set of authorities assigned to the paraId where
/// the first eligible key from the keystore is collating
pub fn min_slot_freq<B, C, P>(client: &C, parent_hash: &B::Hash, para_id: ParaId) -> Option<Slot>
where
P: Pair + Send + Sync + 'static,
P::Public: AppPublic + Hash + Member + Encode + Decode,
P::Signature: TryFrom<Vec<u8>> + Hash + Member + Encode + Decode,
B: BlockT,
C: ProvideRuntimeApi<B>,
C::Api: OnDemandBlockProductionApi<B, ParaId, Slot>,
AuthorityId<P>: From<<NimbusPair as sp_application_crypto::Pair>::Public>,
{
let runtime_api = client.runtime_api();

let min_slot_freq = runtime_api.min_slot_freq(*parent_hash, para_id).ok()?;
log::debug!(
"min_slot_freq for para {:?} is {:?}",
para_id,
min_slot_freq
);
min_slot_freq
}

use nimbus_primitives::{NimbusId, NimbusPair, NIMBUS_KEY_ID};
/// Grab the first eligible nimbus key from the keystore
/// If multiple keys are eligible this function still only returns one
Expand Down
Loading

0 comments on commit 6f48194

Please sign in to comment.