Skip to content

Commit

Permalink
runtime: remove ttl (#5461)
Browse files Browse the repository at this point in the history
Resolves #4776

This will enable proper core-sharing between paras, even if one of them
is not producing blocks.

TODO:
- [x] duplicate first entry in the claim queue if the queue used to be
empty
- [x] don't back anything if at the end of the block there'll be a
session change
- [x] write migration for removing the availability core storage
- [x] update and write unit tests
- [x] prdoc
- [x] add zombienet test for synchronous backing
- [x] add zombienet test for core-sharing paras where one of them is not
producing any blocks

_Important note:_
The `ttl` and `max_availability_timeouts` fields of the
HostConfiguration are not removed in this PR, due to #64.
Adding the workaround with the storage version check for every use of
the active HostConfiguration in all runtime APIs would be insane, as
it's used in almost all runtime APIs.

So even though the ttl and max_availability_timeouts fields will now be
unused, they will remain part of the host configuration.

These will be removed in a separate PR once #64 is fixed. Tracked by
#6067

---------

Signed-off-by: Andrei Sandu <[email protected]>
Co-authored-by: Andrei Sandu <[email protected]>
Co-authored-by: Andrei Sandu <[email protected]>
Co-authored-by: command-bot <>
  • Loading branch information
3 people authored Oct 21, 2024
1 parent 225536c commit ee803b7
Show file tree
Hide file tree
Showing 41 changed files with 1,510 additions and 1,967 deletions.
19 changes: 19 additions & 0 deletions .gitlab/pipeline/zombienet/polkadot.yml
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,25 @@ zombienet-polkadot-functional-0016-approval-voting-parallel:
--local-dir="${LOCAL_DIR}/functional"
--test="0016-approval-voting-parallel.zndsl"

zombienet-polkadot-functional-0017-sync-backing:
extends:
- .zombienet-polkadot-common
script:
- /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
--local-dir="${LOCAL_DIR}/functional"
--test="0017-sync-backing.zndsl"

zombienet-polkadot-functional-0018-shared-core-idle-parachain:
extends:
- .zombienet-polkadot-common
before_script:
- !reference [ .zombienet-polkadot-common, before_script ]
- cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/functional
script:
- /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
--local-dir="${LOCAL_DIR}/functional"
--test="0018-shared-core-idle-parachain.zndsl"

zombienet-polkadot-smoke-0001-parachains-smoke-test:
extends:
- .zombienet-polkadot-common
Expand Down
11 changes: 7 additions & 4 deletions polkadot/primitives/src/v8/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2093,7 +2093,9 @@ pub struct SchedulerParams<BlockNumber> {
pub lookahead: u32,
/// How many cores are managed by the coretime chain.
pub num_cores: u32,
/// The max number of times a claim can time out in availability.
/// Deprecated and no longer used by the runtime.
/// Removal is tracked by <https://github.com/paritytech/polkadot-sdk/issues/6067>.
#[deprecated]
pub max_availability_timeouts: u32,
/// The maximum queue size of the pay as you go module.
pub on_demand_queue_max_size: u32,
Expand All @@ -2104,13 +2106,14 @@ pub struct SchedulerParams<BlockNumber> {
pub on_demand_fee_variability: Perbill,
/// The minimum amount needed to claim a slot in the spot pricing queue.
pub on_demand_base_fee: Balance,
/// The number of blocks a claim stays in the scheduler's claim queue before getting cleared.
/// This number should go reasonably higher than the number of blocks in the async backing
/// lookahead.
/// Deprecated and no longer used by the runtime.
/// Removal is tracked by <https://github.com/paritytech/polkadot-sdk/issues/6067>.
#[deprecated]
pub ttl: BlockNumber,
}

impl<BlockNumber: Default + From<u32>> Default for SchedulerParams<BlockNumber> {
#[allow(deprecated)]
fn default() -> Self {
Self {
group_rotation_frequency: 1u32.into(),
Expand Down
9 changes: 6 additions & 3 deletions polkadot/runtime/parachains/src/assigner_coretime/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -318,9 +318,12 @@ impl<T: Config> AssignmentProvider<BlockNumberFor<T>> for Pallet<T> {
Assignment::Bulk(para_id)
}

fn session_core_count() -> u32 {
let config = configuration::ActiveConfig::<T>::get();
config.scheduler_params.num_cores
fn assignment_duplicated(assignment: &Assignment) {
match assignment {
Assignment::Pool { para_id, core_index } =>
on_demand::Pallet::<T>::assignment_duplicated(*para_id, *core_index),
Assignment::Bulk(_) => {},
}
}
}

Expand Down
3 changes: 1 addition & 2 deletions polkadot/runtime/parachains/src/assigner_coretime/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ use crate::{
paras::{ParaGenesisArgs, ParaKind},
scheduler::common::Assignment,
};
use alloc::collections::btree_map::BTreeMap;
use frame_support::{assert_noop, assert_ok, pallet_prelude::*, traits::Currency};
use pallet_broker::TaskId;
use polkadot_primitives::{BlockNumber, Id as ParaId, SessionIndex, ValidationCode};
Expand Down Expand Up @@ -78,7 +77,7 @@ fn run_to_block(
OnDemand::on_initialize(b + 1);

// In the real runtime this is expected to be called by the `InclusionInherent` pallet.
Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1);
Scheduler::advance_claim_queue(&Default::default());
}
}

Expand Down
4 changes: 1 addition & 3 deletions polkadot/runtime/parachains/src/assigner_parachains.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,5 @@ impl<T: Config> AssignmentProvider<BlockNumberFor<T>> for Pallet<T> {
Assignment::Bulk(para_id)
}

fn session_core_count() -> u32 {
paras::Parachains::<T>::decode_len().unwrap_or(0) as u32
}
fn assignment_duplicated(_: &Assignment) {}
}
3 changes: 1 addition & 2 deletions polkadot/runtime/parachains/src/assigner_parachains/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ use crate::{
},
paras::{ParaGenesisArgs, ParaKind},
};
use alloc::collections::btree_map::BTreeMap;
use frame_support::{assert_ok, pallet_prelude::*};
use polkadot_primitives::{BlockNumber, Id as ParaId, SessionIndex, ValidationCode};

Expand Down Expand Up @@ -71,7 +70,7 @@ fn run_to_block(
Scheduler::initializer_initialize(b + 1);

// In the real runtime this is expected to be called by the `InclusionInherent` pallet.
Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1);
Scheduler::advance_claim_queue(&Default::default());
}
}

Expand Down
131 changes: 59 additions & 72 deletions polkadot/runtime/parachains/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,10 @@ use crate::{
configuration, inclusion, initializer, paras,
paras::ParaKind,
paras_inherent,
scheduler::{self, common::AssignmentProvider, CoreOccupied, ParasEntry},
scheduler::{
self,
common::{Assignment, AssignmentProvider},
},
session_info, shared,
};
use alloc::{
Expand Down Expand Up @@ -138,8 +141,6 @@ pub(crate) struct BenchBuilder<T: paras_inherent::Config> {
/// Make every candidate include a code upgrade by setting this to `Some` where the interior
/// value is the byte length of the new code.
code_upgrade: Option<u32>,
/// Specifies whether the claimqueue should be filled.
fill_claimqueue: bool,
/// Cores which should not be available when being populated with pending candidates.
unavailable_cores: Vec<u32>,
/// Use v2 candidate descriptor.
Expand Down Expand Up @@ -178,7 +179,6 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
backed_in_inherent_paras: Default::default(),
elastic_paras: Default::default(),
code_upgrade: None,
fill_claimqueue: true,
unavailable_cores: vec![],
candidate_descriptor_v2: false,
candidate_modifier: None,
Expand Down Expand Up @@ -322,13 +322,6 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
self.max_validators() / self.max_validators_per_core()
}

/// Set whether the claim queue should be filled.
#[cfg(not(feature = "runtime-benchmarks"))]
pub(crate) fn set_fill_claimqueue(mut self, f: bool) -> Self {
self.fill_claimqueue = f;
self
}

/// Get the minimum number of validity votes in order for a backed candidate to be included.
#[cfg(feature = "runtime-benchmarks")]
pub(crate) fn fallback_min_backing_votes() -> u32 {
Expand All @@ -340,10 +333,13 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
HeadData(vec![0xFF; max_head_size as usize])
}

fn candidate_descriptor_mock(candidate_descriptor_v2: bool) -> CandidateDescriptorV2<T::Hash> {
fn candidate_descriptor_mock(
para_id: ParaId,
candidate_descriptor_v2: bool,
) -> CandidateDescriptorV2<T::Hash> {
if candidate_descriptor_v2 {
CandidateDescriptorV2::new(
0.into(),
para_id,
Default::default(),
CoreIndex(200),
2,
Expand All @@ -356,7 +352,7 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
} else {
// Convert v1 to v2.
CandidateDescriptor::<T::Hash> {
para_id: 0.into(),
para_id,
relay_parent: Default::default(),
collator: junk_collator(),
persisted_validation_data_hash: Default::default(),
Expand All @@ -373,6 +369,7 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {

/// Create a mock of `CandidatePendingAvailability`.
fn candidate_availability_mock(
para_id: ParaId,
group_idx: GroupIndex,
core_idx: CoreIndex,
candidate_hash: CandidateHash,
Expand All @@ -381,15 +378,16 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
candidate_descriptor_v2: bool,
) -> inclusion::CandidatePendingAvailability<T::Hash, BlockNumberFor<T>> {
inclusion::CandidatePendingAvailability::<T::Hash, BlockNumberFor<T>>::new(
core_idx, // core
candidate_hash, // hash
Self::candidate_descriptor_mock(candidate_descriptor_v2), // candidate descriptor
commitments, // commitments
availability_votes, // availability votes
Default::default(), // backers
Zero::zero(), // relay parent
One::one(), /* relay chain block this
* was backed in */
core_idx, // core
candidate_hash, // hash
Self::candidate_descriptor_mock(para_id, candidate_descriptor_v2), /* candidate descriptor */
commitments, // commitments
availability_votes, /* availability
* votes */
Default::default(), // backers
Zero::zero(), // relay parent
One::one(), /* relay chain block this
* was backed in */
group_idx, // backing group
)
}
Expand All @@ -416,6 +414,7 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
hrmp_watermark: 0u32.into(),
};
let candidate_availability = Self::candidate_availability_mock(
para_id,
group_idx,
core_idx,
candidate_hash,
Expand Down Expand Up @@ -886,14 +885,11 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
extra_cores;

assert!(used_cores <= max_cores);
let fill_claimqueue = self.fill_claimqueue;

// NOTE: there is an n+2 session delay for these actions to take effect.
// We are currently in Session 0, so these changes will take effect in Session 2.
Self::setup_para_ids(used_cores - extra_cores);
configuration::ActiveConfig::<T>::mutate(|c| {
c.scheduler_params.num_cores = used_cores as u32;
});
configuration::Pallet::<T>::set_coretime_cores_unchecked(used_cores as u32).unwrap();

let validator_ids = generate_validator_pairs::<T>(self.max_validators());
let target_session = SessionIndex::from(self.target_session);
Expand All @@ -902,7 +898,7 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
let bitfields = builder.create_availability_bitfields(
&builder.backed_and_concluding_paras,
&builder.elastic_paras,
used_cores,
scheduler::Pallet::<T>::num_availability_cores(),
);

let mut backed_in_inherent = BTreeMap::new();
Expand Down Expand Up @@ -930,66 +926,57 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {

assert_eq!(inclusion::PendingAvailability::<T>::iter().count(), used_cores - extra_cores);

// Mark all the used cores as occupied. We expect that there are
// `backed_and_concluding_paras` that are pending availability and that there are
// `used_cores - backed_and_concluding_paras ` which are about to be disputed.
let now = frame_system::Pallet::<T>::block_number() + One::one();

// Sanity check that the occupied cores reported by the inclusion module are what we expect
// to be.
let mut core_idx = 0u32;
let elastic_paras = &builder.elastic_paras;
// Assign potentially multiple cores to same parachains,
let cores = all_cores

let mut occupied_cores = inclusion::Pallet::<T>::get_occupied_cores()
.map(|(core, candidate)| (core, candidate.candidate_descriptor().para_id()))
.collect::<Vec<_>>();
occupied_cores.sort_by(|(core_a, _), (core_b, _)| core_a.0.cmp(&core_b.0));

let mut expected_cores = all_cores
.iter()
.flat_map(|(para_id, _)| {
(0..elastic_paras.get(&para_id).cloned().unwrap_or(1))
.map(|_para_local_core_idx| {
let ttl = configuration::ActiveConfig::<T>::get().scheduler_params.ttl;
// Load an assignment into provider so that one is present to pop
let assignment =
<T as scheduler::Config>::AssignmentProvider::get_mock_assignment(
CoreIndex(core_idx),
ParaId::from(*para_id),
);
let old_core_idx = core_idx;
core_idx += 1;
CoreOccupied::Paras(ParasEntry::new(assignment, now + ttl))
(CoreIndex(old_core_idx), ParaId::from(*para_id))
})
.collect::<Vec<CoreOccupied<_>>>()
.collect::<Vec<_>>()
})
.collect::<Vec<CoreOccupied<_>>>();
.collect::<Vec<_>>();

scheduler::AvailabilityCores::<T>::set(cores);
expected_cores.sort_by(|(core_a, _), (core_b, _)| core_a.0.cmp(&core_b.0));

core_idx = 0u32;
assert_eq!(expected_cores, occupied_cores);

// We need entries in the claim queue for those:
all_cores.append(&mut builder.backed_in_inherent_paras.clone());

if fill_claimqueue {
let cores = all_cores
.keys()
.flat_map(|para_id| {
(0..elastic_paras.get(&para_id).cloned().unwrap_or(1))
.map(|_para_local_core_idx| {
let ttl = configuration::ActiveConfig::<T>::get().scheduler_params.ttl;
// Load an assignment into provider so that one is present to pop
let assignment =
<T as scheduler::Config>::AssignmentProvider::get_mock_assignment(
CoreIndex(core_idx),
ParaId::from(*para_id),
);

core_idx += 1;
(
CoreIndex(core_idx - 1),
[ParasEntry::new(assignment, now + ttl)].into(),
)
})
.collect::<Vec<(CoreIndex, VecDeque<ParasEntry<_>>)>>()
})
.collect::<BTreeMap<CoreIndex, VecDeque<ParasEntry<_>>>>();
let mut core_idx = 0u32;
let cores = all_cores
.keys()
.flat_map(|para_id| {
(0..elastic_paras.get(&para_id).cloned().unwrap_or(1))
.map(|_para_local_core_idx| {
// Load an assignment into provider so that one is present to pop
let assignment =
<T as scheduler::Config>::AssignmentProvider::get_mock_assignment(
CoreIndex(core_idx),
ParaId::from(*para_id),
);

scheduler::ClaimQueue::<T>::set(cores);
}
core_idx += 1;
(CoreIndex(core_idx - 1), [assignment].into())
})
.collect::<Vec<(CoreIndex, VecDeque<Assignment>)>>()
})
.collect::<BTreeMap<CoreIndex, VecDeque<Assignment>>>();

scheduler::ClaimQueue::<T>::set(cores);

Bench::<T> {
data: ParachainsInherentData {
Expand Down
Loading

0 comments on commit ee803b7

Please sign in to comment.