Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/master' into agustin-keepdb-cli-…
Browse files Browse the repository at this point in the history
…flag
  • Loading branch information
Agusrodri committed Oct 18, 2023
2 parents d6ba0ae + 93cfc2f commit c14ea97
Show file tree
Hide file tree
Showing 15 changed files with 112 additions and 260 deletions.
4 changes: 1 addition & 3 deletions client/consensus/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ pub(crate) fn slot_author<P: Pair>(
pub fn authorities<B, C, P>(
client: &C,
parent_hash: &B::Hash,
keystore: KeystorePtr,
para_id: ParaId,
) -> Option<Vec<AuthorityId<P>>>
where
P: Pair + Send + Sync,
Expand All @@ -94,8 +94,6 @@ where
{
let runtime_api = client.runtime_api();

let (_first_eligibile_key, para_id) =
first_eligible_key::<B, C, P>(client, parent_hash, keystore.clone())?;
let authorities = runtime_api
.para_id_authorities(*parent_hash, para_id)
.ok()?;
Expand Down
36 changes: 3 additions & 33 deletions client/consensus/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -705,48 +705,18 @@ async fn authorities_runtime_api_tests() {
let net = AuraTestNet::new(4);
let net = Arc::new(Mutex::new(net));

let keystore_path = tempfile::tempdir().expect("Creates keystore path");
let keystore = LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore.");

let keystoreptr: sp_keystore::KeystorePtr = keystore.into();
let mut net = net.lock();
let peer = net.peer(3);
let client = peer.client().as_client();
let environ = DummyFactory(client.clone());

let default_hash = Default::default();
let authorities = crate::authorities::<_, _, nimbus_primitives::NimbusPair>(
&environ,
&default_hash,
keystoreptr.clone(),
);
assert!(authorities.is_none());

keystoreptr
.sr25519_generate_new(NIMBUS_KEY_ID, Some(&Keyring::Bob.to_seed()))
.expect("Key should be created");

// Bob according top the runtime-api is not eligible
let authorities_after_bob = crate::authorities::<_, _, nimbus_primitives::NimbusPair>(
&environ,
&default_hash,
keystoreptr.clone(),
);
assert!(authorities_after_bob.is_none());

// Alice according top the runtime-api is eligible
keystoreptr
.sr25519_generate_new(NIMBUS_KEY_ID, Some(&Keyring::Alice.to_seed()))
.expect("Key should be created");

let authorities_after_alice = crate::authorities::<_, _, nimbus_primitives::NimbusPair>(
let authorities = crate::authorities::<_, _, nimbus_primitives::NimbusPair>(
&environ,
&default_hash,
keystoreptr.clone(),
1000u32.into(),
);

assert_eq!(
authorities_after_alice,
Some(vec![Keyring::Alice.public().into()])
);
assert_eq!(authorities, Some(vec![Keyring::Alice.public().into()]));
}
2 changes: 1 addition & 1 deletion node/src/container_chain_monitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -209,8 +209,8 @@ pub async fn monitor_task(state: Arc<Mutex<ContainerChainSpawnerState>>) {
let max_running_container_chains = 4;

loop {
log::info!("Monitor tick");
sleep(monitor_period).await;
log::debug!("Monitor tick");
let mut state = state.lock().unwrap();
let monitor_state = &mut state.spawned_containers_monitor;

Expand Down
8 changes: 2 additions & 6 deletions node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1071,7 +1071,6 @@ fn build_consensus_container(

let relay_chain_interace_for_orch = relay_chain_interface.clone();
let orchestrator_client_for_cidp = orchestrator_client;
let keystore_for_cidp = keystore.clone();

let params = tc_consensus::BuildOrchestratorAuraConsensusParams {
proposer_factory,
Expand Down Expand Up @@ -1129,7 +1128,6 @@ fn build_consensus_container(
get_authorities_from_orchestrator: move |_block_hash, (relay_parent, _validation_data)| {
let relay_chain_interace_for_orch = relay_chain_interace_for_orch.clone();
let orchestrator_client_for_cidp = orchestrator_client_for_cidp.clone();
let keystore_for_cidp = keystore_for_cidp.clone();

async move {
let latest_header =
Expand All @@ -1149,7 +1147,7 @@ fn build_consensus_container(
let authorities = tc_consensus::authorities::<Block, ParachainClient, NimbusPair>(
orchestrator_client_for_cidp.as_ref(),
&latest_header.hash(),
keystore_for_cidp,
para_id,
);

let aux_data = authorities.ok_or_else(|| {
Expand Down Expand Up @@ -1216,7 +1214,6 @@ fn build_consensus_orchestrator(
);

let client_set_aside_for_cidp = client.clone();
let keystore_for_cidp = keystore.clone();
let client_set_aside_for_orch = client.clone();

let params = BuildOrchestratorAuraConsensusParams {
Expand Down Expand Up @@ -1272,13 +1269,12 @@ fn build_consensus_orchestrator(
get_authorities_from_orchestrator:
move |block_hash: H256, (_relay_parent, _validation_data)| {
let client_set_aside_for_orch = client_set_aside_for_orch.clone();
let keystore_for_cidp = keystore_for_cidp.clone();

async move {
let authorities = tc_consensus::authorities::<Block, ParachainClient, NimbusPair>(
client_set_aside_for_orch.as_ref(),
&block_hash,
keystore_for_cidp,
para_id,
);

let aux_data = authorities.ok_or_else(|| {
Expand Down
73 changes: 7 additions & 66 deletions pallets/initializer/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,8 @@
//!
//! This pallet is in charge of organizing what happens on session changes.
//! In particular this pallet has implemented the OneSessionHandler trait
//! which will be called upon a session change. This pallet will then store
//! the bufferedSessionChanges (collators, new session index, etc) in the
//! BufferedSessionChanges storage item. This storage item gets read on_finalize
//! and calls the SessionHandler config trait
//! which will be called upon a session change. There it will call the
//! SessionHandler config trait
#![cfg_attr(not(feature = "std"), no_std)]

Expand All @@ -34,30 +32,16 @@ mod tests;
pub use pallet::*;
use {
frame_support::{pallet_prelude::*, traits::OneSessionHandler},
frame_system::pallet_prelude::*,
parity_scale_codec::{Decode, Encode},
scale_info::TypeInfo,
sp_runtime::{
traits::{AtLeast32BitUnsigned, Zero},
RuntimeAppPublic,
},
sp_runtime::{traits::AtLeast32BitUnsigned, RuntimeAppPublic},
sp_std::prelude::*,
};

#[frame_support::pallet]
pub mod pallet {
use super::*;

#[derive(Encode, Decode, TypeInfo)]
#[scale_info(skip_type_params(T))]
pub struct BufferedSessionChange<T: Config> {
pub changed: bool,
pub validators: Vec<(T::AccountId, T::AuthorityId)>,
pub queued: Vec<(T::AccountId, T::AuthorityId)>,
pub session_index: T::SessionIndex,
}

// The apply_new_sseion trait. We need to comply with this
// The apply_new_session trait. We need to comply with this
pub trait ApplyNewSession<T: Config> {
fn apply_new_session(
changed: bool,
Expand All @@ -84,42 +68,11 @@ pub mod pallet {

type SessionHandler: ApplyNewSession<Self>;
}

/// Buffered session changes along with the block number at which they should be applied.
///
/// Typically this will be empty or one element long. Apart from that this item never hits
/// the storage.
///
/// However this is a `Vec` regardless to handle various edge cases that may occur at runtime
/// upgrade boundaries or if governance intervenes.
#[pallet::storage]
pub(super) type BufferedSessionChanges<T: Config> =
StorageValue<_, BufferedSessionChange<T>, OptionQuery>;

#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn on_finalize(_now: BlockNumberFor<T>) {
// Apply buffered session changes as the last thing. This way the runtime APIs and the
// next block will observe the next session.
//
// Note that we only apply the last session as all others lasted less than a block (weirdly).
if let Some(BufferedSessionChange {
changed,
session_index,
validators,
queued,
}) = BufferedSessionChanges::<T>::take()
{
// Changes to be applied on new session
T::SessionHandler::apply_new_session(changed, session_index, validators, queued);
}
}
}
}

impl<T: Config> Pallet<T> {
/// Should be called when a new session occurs. Buffers the session notification to be applied
/// at the end of the block. If `queued` is `None`, the `validators` are considered queued.
/// Should be called when a new session occurs. If `queued` is `None`,
/// the `validators` are considered queued.
fn on_new_session<'a, I: 'a>(
changed: bool,
session_index: T::SessionIndex,
Expand All @@ -135,19 +88,7 @@ impl<T: Config> Pallet<T> {
validators.clone()
};

if session_index == T::SessionIndex::zero() {
// Genesis session should be immediately enacted.
T::SessionHandler::apply_new_session(false, 0u32.into(), validators, queued);
} else {
BufferedSessionChanges::<T>::mutate(|v| {
*v = Some(BufferedSessionChange {
changed,
validators,
queued,
session_index,
})
});
}
T::SessionHandler::apply_new_session(changed, session_index, validators, queued);
}

/// Should be called when a new session occurs. Buffers the session notification to be applied
Expand Down
35 changes: 3 additions & 32 deletions pallets/initializer/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

use {
super::*,
crate::mock::{new_test_ext, session_change_validators, Initializer, System, Test},
crate::mock::{new_test_ext, session_change_validators, Initializer},
};

#[test]
Expand All @@ -29,15 +29,12 @@ fn session_0_is_instantly_applied() {
Some(Vec::new().into_iter()),
);

let v = BufferedSessionChanges::<Test>::get();
assert!(v.is_none());

assert_eq!(session_change_validators(), Some((0, Vec::new())));
});
}

#[test]
fn session_change_before_initialize_is_still_buffered_after() {
fn session_change_applied() {
new_test_ext().execute_with(|| {
Initializer::test_trigger_on_new_session(
false,
Expand All @@ -46,33 +43,7 @@ fn session_change_before_initialize_is_still_buffered_after() {
Some(Vec::new().into_iter()),
);

let now = System::block_number();
Initializer::on_initialize(now);

// Session change validators are applied after on_finalize
assert_eq!(session_change_validators(), None);

let v = BufferedSessionChanges::<Test>::get();
assert!(v.is_some());
});
}

#[test]
fn session_change_applied_on_finalize() {
new_test_ext().execute_with(|| {
Initializer::on_initialize(1);
Initializer::test_trigger_on_new_session(
false,
1,
Vec::new().into_iter(),
Some(Vec::new().into_iter()),
);

Initializer::on_finalize(1);

// Session change validators are applied after on_finalize
// Session change validators are applied
assert_eq!(session_change_validators(), Some((1, Vec::new())));

assert!(BufferedSessionChanges::<Test>::get().is_none());
});
}
14 changes: 13 additions & 1 deletion runtime/dancebox/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ use {
},
sp_std::{marker::PhantomData, prelude::*},
sp_version::RuntimeVersion,
tp_traits::GetSessionContainerChains,
};
pub use {
sp_runtime::{MultiAddress, Perbill, Permill},
Expand Down Expand Up @@ -1332,7 +1333,18 @@ impl_runtime_apis! {
impl pallet_registrar_runtime_api::RegistrarApi<Block, ParaId, MaxLengthTokenSymbol> for Runtime {
/// Return the registered para ids
fn registered_paras() -> Vec<ParaId> {
Registrar::registered_para_ids().to_vec()
// We should return the container-chains for the session in which we are kicking in
let parent_number = System::block_number();
let should_end_session = <Runtime as pallet_session::Config>::ShouldEndSession::should_end_session(parent_number + 1);

let session_index = if should_end_session {
Session::current_index() +1
}
else {
Session::current_index()
};

Registrar::session_container_chains(session_index).to_vec()
}

/// Fetch genesis data for this para id
Expand Down
2 changes: 1 addition & 1 deletion test/configs/zombieDanceboxUpgrade.json
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"relaychain": {
"chain": "rococo-local",
"default_command": "tmp/polkadot",
"default_args": ["--no-hardware-benchmarks", "-lparachain=debug", "--database=paritydb"],
"default_args": ["--no-hardware-benchmarks", "-lparachain=debug", "--database=paritydb", "--no-beefy"],
"nodes": [
{
"name": "alice",
Expand Down
2 changes: 1 addition & 1 deletion test/configs/zombieTanssi.json
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"relaychain": {
"chain": "rococo-local",
"default_command": "tmp/polkadot",
"default_args": ["--no-hardware-benchmarks", "-lparachain=debug", "--database=paritydb"],
"default_args": ["--no-hardware-benchmarks", "-lparachain=debug", "--database=paritydb", "--no-beefy"],
"nodes": [
{
"name": "alice",
Expand Down
2 changes: 1 addition & 1 deletion test/configs/zombieTanssiMetrics.json
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"relaychain": {
"chain": "rococo-local",
"default_command": "tmp/polkadot",
"default_args": ["--no-hardware-benchmarks", "-lparachain=debug", "--database=paritydb"],
"default_args": ["--no-hardware-benchmarks", "-lparachain=debug", "--database=paritydb", "--no-beefy"],
"nodes": [
{
"name": "alice",
Expand Down
2 changes: 1 addition & 1 deletion test/configs/zombieTanssiWarpSync.json
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"relaychain": {
"chain": "rococo-local",
"default_command": "tmp/polkadot",
"default_args": ["--no-hardware-benchmarks", "-lparachain=debug", "--database=paritydb"],
"default_args": ["--no-hardware-benchmarks", "-lparachain=debug", "--database=paritydb", "--no-beefy"],
"nodes": [
{
"name": "alice",
Expand Down
19 changes: 0 additions & 19 deletions typescript-api/src/dancebox/interfaces/augment-api-query.ts
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ import type {
PalletBalancesIdAmount,
PalletBalancesReserveData,
PalletConfigurationHostConfiguration,
PalletInitializerBufferedSessionChange,
PalletPooledStakingCandidateEligibleCandidate,
PalletPooledStakingPendingOperationKey,
PalletPooledStakingPoolsKey,
Expand Down Expand Up @@ -263,24 +262,6 @@ declare module "@polkadot/api-base/types/storage" {
/** Generic query */
[key: string]: QueryableStorageEntry<ApiType>;
};
initializer: {
/**
* Buffered session changes along with the block number at which they should be applied.
*
* Typically this will be empty or one element long. Apart from that this item never hits the storage.
*
* However this is a `Vec` regardless to handle various edge cases that may occur at runtime upgrade boundaries or
* if governance intervenes.
*/
bufferedSessionChanges: AugmentedQuery<
ApiType,
() => Observable<Option<PalletInitializerBufferedSessionChange>>,
[]
> &
QueryableStorageEntry<ApiType, []>;
/** Generic query */
[key: string]: QueryableStorageEntry<ApiType>;
};
invulnerables: {
/** The invulnerable, permissioned collators. This list must be sorted. */
invulnerables: AugmentedQuery<ApiType, () => Observable<Vec<AccountId32>>, []> &
Expand Down
Loading

0 comments on commit c14ea97

Please sign in to comment.