Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[pallet-broker] Force-unpool provisionally pooled regions before redispatching them #4081

Open
wants to merge 15 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 14 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions prdoc/pr_4081.prdoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json

title: "[pallet-broker] Force-unpool provisionally pooled regions before redispatching them"

doc:
- audience: Runtime User
description: |
This PR force removes regions from the pool before allowing them to be redispatched (through
`partition`/`interlace`/`assign`) for regions pooled with `Provisional` finality. To claim
any revenue from before this point, `claim_revenue` should be called before
partitioning/interleaving/reassigning as it cannot be claimed afterwards.

crates:
- name: pallet-broker
bump: patch
12 changes: 12 additions & 0 deletions substrate/frame/broker/src/benchmarking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -349,6 +349,10 @@ mod benches {
let region = Broker::<T>::do_purchase(caller.clone(), 10_000_000u32.into())
.map_err(|_| BenchmarkError::Weightless)?;

// Worst case has an existing provisional pool assignment.
Broker::<T>::do_pool(region, None, caller.clone(), Finality::Provisional)
.map_err(|_| BenchmarkError::Weightless)?;

#[extrinsic_call]
_(RawOrigin::Signed(caller), region, 2);

Expand Down Expand Up @@ -381,6 +385,10 @@ mod benches {
let region = Broker::<T>::do_purchase(caller.clone(), 10_000_000u32.into())
.map_err(|_| BenchmarkError::Weightless)?;

// Worst case has an existing provisional pool assignment.
Broker::<T>::do_pool(region, None, caller.clone(), Finality::Provisional)
.map_err(|_| BenchmarkError::Weightless)?;

#[extrinsic_call]
_(RawOrigin::Signed(caller), region, 0x00000_fffff_fffff_00000.into());

Expand Down Expand Up @@ -417,6 +425,10 @@ mod benches {
let region = Broker::<T>::do_purchase(caller.clone(), 10_000_000u32.into())
.map_err(|_| BenchmarkError::Weightless)?;

// Worst case has an existing provisional pool assignment.
Broker::<T>::do_pool(region, None, caller.clone(), Finality::Provisional)
.map_err(|_| BenchmarkError::Weightless)?;

#[extrinsic_call]
_(RawOrigin::Signed(caller), region, 1000, Provisional);

Expand Down
22 changes: 22 additions & 0 deletions substrate/frame/broker/src/dispatchable_impls.rs
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,7 @@ impl<T: Config> Pallet<T> {
maybe_check_owner: Option<T::AccountId>,
pivot_offset: Timeslice,
) -> Result<(RegionId, RegionId), Error<T>> {
let status = Status::<T>::get().ok_or(Error::<T>::Uninitialized)?;
let mut region = Regions::<T>::get(&region_id).ok_or(Error::<T>::UnknownRegion)?;

if let Some(check_owner) = maybe_check_owner {
Expand All @@ -232,6 +233,13 @@ impl<T: Config> Pallet<T> {
region.paid = None;
let new_region_ids = (region_id, RegionId { begin: pivot, ..region_id });

// Remove this region from the pool in case it has been assigned provisionally. If we get
// this far then it is still in `Regions` and thus could only have been pooled
// provisionally.
Self::force_unpool_region(region_id, &region, &status);

// Overwrite the previous region with its new end and create a new region for the second
// part of the partition.
Regions::<T>::insert(&new_region_ids.0, &RegionRecord { end: pivot, ..region.clone() });
Regions::<T>::insert(&new_region_ids.1, &region);
Self::deposit_event(Event::Partitioned { old_region_id: region_id, new_region_ids });
Expand All @@ -244,6 +252,7 @@ impl<T: Config> Pallet<T> {
maybe_check_owner: Option<T::AccountId>,
pivot: CoreMask,
) -> Result<(RegionId, RegionId), Error<T>> {
let status = Status::<T>::get().ok_or(Error::<T>::Uninitialized)?;
let region = Regions::<T>::get(&region_id).ok_or(Error::<T>::UnknownRegion)?;

if let Some(check_owner) = maybe_check_owner {
Expand All @@ -254,6 +263,11 @@ impl<T: Config> Pallet<T> {
ensure!(!pivot.is_void(), Error::<T>::VoidPivot);
ensure!(pivot != region_id.mask, Error::<T>::CompletePivot);

// Remove this region from the pool in case it has been assigned provisionally. If we get
// this far then it is still in `Regions` and thus could only have been pooled
// provisionally.
Self::force_unpool_region(region_id, &region, &status);

// The old region should be removed.
Regions::<T>::remove(&region_id);

Expand All @@ -274,9 +288,17 @@ impl<T: Config> Pallet<T> {
finality: Finality,
) -> Result<(), Error<T>> {
let config = Configuration::<T>::get().ok_or(Error::<T>::Uninitialized)?;
let status = Status::<T>::get().ok_or(Error::<T>::Uninitialized)?;

if let Some((region_id, region)) = Self::utilize(region_id, maybe_check_owner, finality)? {
let workplan_key = (region_id.begin, region_id.core);
let mut workplan = Workplan::<T>::get(&workplan_key).unwrap_or_default();

// Remove this region from the pool in case it has been assigned provisionally. If we
// get this far then it is still in `Regions` and thus could only have been pooled
// provisionally.
Self::force_unpool_region(region_id, &region, &status);

// Ensure no previous allocations exist.
workplan.retain(|i| (i.mask & region_id.mask).is_void());
if workplan
Expand Down
8 changes: 8 additions & 0 deletions substrate/frame/broker/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -390,6 +390,14 @@ pub mod pallet {
/// The Region whose contribution is no longer exists.
region_id: RegionId,
},
/// A region has been force-removed from the pool. This is usually due to a provisionally
/// pooled region being redeployed.
RegionUnpooled {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the reason for the major bump, but since there is an event for a region being pooled we should also have one for it being unpooled.

/// The Region which has been force-removed from the pool.
region_id: RegionId,
/// The timeslice at which the region was force-removed.
when: Timeslice,
},
/// Some historical Instantaneous Core Pool payment record has been initialized.
HistoryInitialized {
/// The timeslice whose history has been initialized.
Expand Down
Loading