Skip to content

Commit

Permalink
Revert "[Runtime] Bound XCMP queue" (#3117)
Browse files Browse the repository at this point in the history
Reverts #2302. 🤦‍♂️ should have checked the
migration CI first.

We either need to reduce the `max_message_size` for the open HRMP
channels on the failing chains or increase the `PageSize` of the XCMP
queue.
Both would be fine on a test-net, but i assume this will also fail
before the next SP runtime upgrade so first need to think what best to
do.
AFAIK its not possible currently to change the `max_message_size` of an
open HRMP channel.
  • Loading branch information
ggwpez authored Jan 30, 2024
1 parent 400ee1b commit fafe272
Show file tree
Hide file tree
Showing 23 changed files with 65 additions and 367 deletions.
9 changes: 1 addition & 8 deletions cumulus/pallets/parachain-system/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
use codec::{Decode, Encode};
use cumulus_primitives_core::{
relay_chain, AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo,
GetChannelInfo, InboundDownwardMessage, InboundHrmpMessage, ListChannelInfos, MessageSendError,
GetChannelInfo, InboundDownwardMessage, InboundHrmpMessage, MessageSendError,
OutboundHrmpMessage, ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender,
XcmpMessageHandler, XcmpMessageSource,
};
Expand Down Expand Up @@ -1023,13 +1023,6 @@ impl<T: Config> FeeTracker for Pallet<T> {
}
}

impl<T: Config> ListChannelInfos for Pallet<T> {
fn outgoing_channels() -> Vec<ParaId> {
let Some(state) = Self::relevant_messaging_state() else { return Vec::new() };
state.egress_channels.into_iter().map(|(id, _)| id).collect()
}
}

impl<T: Config> GetChannelInfo for Pallet<T> {
fn get_channel_status(id: ParaId) -> ChannelStatus {
// Note, that we are using `relevant_messaging_state` which may be from the previous
Expand Down
105 changes: 25 additions & 80 deletions cumulus/pallets/xcmp-queue/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ pub mod weights;
pub use weights::WeightInfo;

use bounded_collections::BoundedBTreeSet;
use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen};
use codec::{Decode, DecodeLimit, Encode};
use cumulus_primitives_core::{
relay_chain::BlockNumber as RelayBlockNumber, ChannelStatus, GetChannelInfo, MessageSendError,
ParaId, XcmpMessageFormat, XcmpMessageHandler, XcmpMessageSource,
Expand Down Expand Up @@ -105,6 +105,7 @@ pub mod pallet {

#[pallet::pallet]
#[pallet::storage_version(migration::STORAGE_VERSION)]
#[pallet::without_storage_info]
pub struct Pallet<T>(_);

#[pallet::config]
Expand All @@ -131,25 +132,6 @@ pub mod pallet {
#[pallet::constant]
type MaxInboundSuspended: Get<u32>;

/// Maximal number of outbound XCMP channels that can have messages queued at the same time.
///
/// If this is reached, then no further messages can be sent to channels that do not yet
/// have a message queued. This should be set to the expected maximum of outbound channels
/// which is determined by [`Self::ChannelInfo`]. It is important to set this correctly,
/// since otherwise the congestion control protocol will not work as intended and messages
/// may be dropped. This value increases the PoV and should therefore not be picked too
/// high.
#[pallet::constant]
type MaxActiveOutboundChannels: Get<u32>;

/// The maximal page size for HRMP message pages.
///
/// A lower limit can be set dynamically, but this is the hard-limit for the PoV worst case
/// benchmarking. The limit for the size of a message is slightly below this, since some
/// overhead is incurred for encoding the format.
#[pallet::constant]
type MaxPageSize: Get<u32>;

/// The origin that is allowed to resume or suspend the XCMP queue.
type ControllerOrigin: EnsureOrigin<Self::RuntimeOrigin>;

Expand Down Expand Up @@ -294,10 +276,6 @@ pub mod pallet {
AlreadySuspended,
/// The execution is already resumed.
AlreadyResumed,
/// There are too many active outbound channels.
TooManyOutboundChannels,
/// The message is too big.
TooBig,
}

/// The suspended inbound XCMP channels. All others are not suspended.
Expand All @@ -319,28 +297,19 @@ pub mod pallet {
/// case of the need to send a high-priority signal message this block.
/// The bool is true if there is a signal message waiting to be sent.
#[pallet::storage]
pub(super) type OutboundXcmpStatus<T: Config> = StorageValue<
_,
BoundedVec<OutboundChannelDetails, T::MaxActiveOutboundChannels>,
ValueQuery,
>;
pub(super) type OutboundXcmpStatus<T: Config> =
StorageValue<_, Vec<OutboundChannelDetails>, ValueQuery>;

// The new way of doing it:
/// The messages outbound in a given XCMP channel.
#[pallet::storage]
pub(super) type OutboundXcmpMessages<T: Config> = StorageDoubleMap<
_,
Blake2_128Concat,
ParaId,
Twox64Concat,
u16,
BoundedVec<u8, T::MaxPageSize>,
ValueQuery,
>;
pub(super) type OutboundXcmpMessages<T: Config> =
StorageDoubleMap<_, Blake2_128Concat, ParaId, Twox64Concat, u16, Vec<u8>, ValueQuery>;

/// Any signal messages waiting to be sent.
#[pallet::storage]
pub(super) type SignalMessages<T: Config> =
StorageMap<_, Blake2_128Concat, ParaId, BoundedVec<u8, T::MaxPageSize>, ValueQuery>;
StorageMap<_, Blake2_128Concat, ParaId, Vec<u8>, ValueQuery>;

/// The configuration which controls the dynamics of the outbound queue.
#[pallet::storage]
Expand All @@ -362,14 +331,15 @@ pub mod pallet {
StorageMap<_, Twox64Concat, ParaId, FixedU128, ValueQuery, InitialFactor>;
}

#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)]
#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)]
pub enum OutboundState {
Ok,
Suspended,
}

/// Struct containing detailed information about the outbound channel.
#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, RuntimeDebug, MaxEncodedLen)]
#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)]
#[cfg_attr(feature = "std", derive(Debug))]
pub struct OutboundChannelDetails {
/// The `ParaId` of the parachain that this channel is connected with.
recipient: ParaId,
Expand Down Expand Up @@ -405,7 +375,7 @@ impl OutboundChannelDetails {
}
}

#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)]
#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)]
pub struct QueueConfigData {
/// The number of pages which must be in the queue for the other side to be told to suspend
/// their sending.
Expand Down Expand Up @@ -508,9 +478,7 @@ impl<T: Config> Pallet<T> {
{
details
} else {
all_channels
.try_push(OutboundChannelDetails::new(recipient))
.map_err(|_| MessageSendError::TooManyChannels)?;
all_channels.push(OutboundChannelDetails::new(recipient));
all_channels
.last_mut()
.expect("can't be empty; a new element was just pushed; qed")
Expand All @@ -535,7 +503,7 @@ impl<T: Config> Pallet<T> {
if page.len() + encoded_fragment.len() > max_message_size {
return None
}
page.try_extend(encoded_fragment.iter().cloned()).ok()?;
page.extend_from_slice(&encoded_fragment[..]);
Some(page.len())
},
)
Expand All @@ -553,9 +521,7 @@ impl<T: Config> Pallet<T> {
new_page.extend_from_slice(&encoded_fragment[..]);
let last_page_size = new_page.len();
let number_of_pages = (channel_details.last_index - channel_details.first_index) as u32;
let bounded_page =
BoundedVec::try_from(new_page).map_err(|_| MessageSendError::TooBig)?;
<OutboundXcmpMessages<T>>::insert(recipient, page_index, bounded_page);
<OutboundXcmpMessages<T>>::insert(recipient, page_index, new_page);
<OutboundXcmpStatus<T>>::put(all_channels);
(number_of_pages, last_page_size)
};
Expand All @@ -577,21 +543,17 @@ impl<T: Config> Pallet<T> {

/// Sends a signal to the `dest` chain over XCMP. This is guaranteed to be dispatched on this
/// block.
fn send_signal(dest: ParaId, signal: ChannelSignal) -> Result<(), Error<T>> {
fn send_signal(dest: ParaId, signal: ChannelSignal) {
let mut s = <OutboundXcmpStatus<T>>::get();
if let Some(details) = s.iter_mut().find(|item| item.recipient == dest) {
details.signals_exist = true;
} else {
s.try_push(OutboundChannelDetails::new(dest).with_signals())
.map_err(|_| Error::<T>::TooManyOutboundChannels)?;
s.push(OutboundChannelDetails::new(dest).with_signals());
}

let page = BoundedVec::try_from((XcmpMessageFormat::Signals, signal).encode())
.map_err(|_| Error::<T>::TooBig)?;

<SignalMessages<T>>::insert(dest, page);
<SignalMessages<T>>::mutate(dest, |page| {
*page = (XcmpMessageFormat::Signals, signal).encode();
});
<OutboundXcmpStatus<T>>::put(s);
Ok(())
}

fn suspend_channel(target: ParaId) {
Expand All @@ -601,13 +563,7 @@ impl<T: Config> Pallet<T> {
defensive_assert!(ok, "WARNING: Attempt to suspend channel that was not Ok.");
details.state = OutboundState::Suspended;
} else {
if s.try_push(OutboundChannelDetails::new(target).with_suspended_state()).is_err() {
// Nothing that we can do here. The outbound channel does not exist either, so
// there should be no message going out as well. The next time that the channel
// can be created it will again get the suspension from the remote side. It can
// therefore result in a few lost messages, but should eventually self-repair.
defensive!("Cannot pause channel; too many outbound channels");
}
s.push(OutboundChannelDetails::new(target).with_suspended_state());
}
});
}
Expand Down Expand Up @@ -708,17 +664,13 @@ impl<T: Config> OnQueueChanged<ParaId> for Pallet<T> {
let suspended = suspended_channels.contains(&para);

if suspended && fp.pages <= resume_threshold {
// If the resuming fails then it is not critical. We will retry in the future.
let _ = Self::send_signal(para, ChannelSignal::Resume);
Self::send_signal(para, ChannelSignal::Resume);

suspended_channels.remove(&para);
<InboundXcmpSuspended<T>>::put(suspended_channels);
} else if !suspended && fp.pages >= suspend_threshold {
log::warn!("XCMP queue for sibling {:?} is full; suspending channel.", para);
if let Err(_) = Self::send_signal(para, ChannelSignal::Suspend) {
// It will retry if `drop_threshold` is not reached, but it could be too late.
defensive!("Could not send suspension signal; future messages may be dropped.");
}
Self::send_signal(para, ChannelSignal::Suspend);

if let Err(err) = suspended_channels.try_insert(para) {
log::error!("Too many channels suspended; cannot suspend sibling {:?}: {:?}; further messages may be dropped.", para, err);
Expand Down Expand Up @@ -890,7 +842,7 @@ impl<T: Config> XcmpMessageSource for Pallet<T> {
// since it's so unlikely then for now we just drop it.
defensive!("WARNING: oversize message in queue - dropping");
} else {
result.push((para_id, page.into_inner()));
result.push((para_id, page));
}

let max_total_size = match T::ChannelInfo::get_channel_info(para_id) {
Expand Down Expand Up @@ -938,14 +890,7 @@ impl<T: Config> XcmpMessageSource for Pallet<T> {
let pruned = old_statuses_len - statuses.len();
// removing an item from status implies a message being sent, so the result messages must
// be no less than the pruned channels.

// TODO <https://github.com/paritytech/parity-common/pull/800>
{
let mut statuses_inner = statuses.into_inner();
statuses_inner.rotate_left(result.len().saturating_sub(pruned));
statuses = BoundedVec::try_from(statuses_inner)
.expect("Rotating does not change the length; it still fits; qed");
}
statuses.rotate_left(result.len().saturating_sub(pruned));

<OutboundXcmpStatus<T>>::put(statuses);

Expand Down
26 changes: 12 additions & 14 deletions cumulus/pallets/xcmp-queue/src/migration.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@

//! A module that is responsible for migration of storage.

pub mod v5;

use crate::{Config, OverweightIndex, Pallet, QueueConfig, QueueConfigData, DEFAULT_POV_SIZE};
use cumulus_primitives_core::XcmpMessageFormat;
use frame_support::{
Expand All @@ -27,7 +25,7 @@ use frame_support::{
};

/// The current storage version.
pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(5);
pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(4);

pub const LOG: &str = "runtime::xcmp-queue-migration";

Expand Down Expand Up @@ -266,9 +264,9 @@ pub mod v4 {

/// Migrates `QueueConfigData` to v4, removing deprecated fields and bumping page
/// thresholds to at least the default values.
pub struct UncheckedMigrateV3ToV4<T: Config>(PhantomData<T>);
pub struct UncheckedMigrationToV4<T: Config>(PhantomData<T>);

impl<T: Config> OnRuntimeUpgrade for UncheckedMigrateV3ToV4<T> {
impl<T: Config> OnRuntimeUpgrade for UncheckedMigrationToV4<T> {
fn on_runtime_upgrade() -> Weight {
let translate = |pre: v2::QueueConfigData| -> QueueConfigData {
let pre_default = v2::QueueConfigData::default();
Expand Down Expand Up @@ -301,13 +299,13 @@ pub mod v4 {
}
}

/// [`UncheckedMigrateV3ToV4`] wrapped in a
/// [`UncheckedMigrationToV4`] wrapped in a
/// [`VersionedMigration`](frame_support::migrations::VersionedMigration), ensuring the
/// migration is only performed when on-chain version is 3.
pub type MigrateV3ToV4<T> = frame_support::migrations::VersionedMigration<
pub type MigrationToV4<T> = frame_support::migrations::VersionedMigration<
3,
4,
UncheckedMigrateV3ToV4<T>,
UncheckedMigrationToV4<T>,
Pallet<T>,
<T as frame_system::Config>::DbWeight,
>;
Expand Down Expand Up @@ -374,10 +372,10 @@ mod tests {
&v2.encode(),
);

let bytes = v4::MigrateV3ToV4::<Test>::pre_upgrade();
let bytes = v4::MigrationToV4::<Test>::pre_upgrade();
assert!(bytes.is_ok());
v4::MigrateV3ToV4::<Test>::on_runtime_upgrade();
assert!(v4::MigrateV3ToV4::<Test>::post_upgrade(bytes.unwrap()).is_ok());
v4::MigrationToV4::<Test>::on_runtime_upgrade();
assert!(v4::MigrationToV4::<Test>::post_upgrade(bytes.unwrap()).is_ok());

let v4 = QueueConfig::<Test>::get();

Expand All @@ -403,10 +401,10 @@ mod tests {
&v2.encode(),
);

let bytes = v4::MigrateV3ToV4::<Test>::pre_upgrade();
let bytes = v4::MigrationToV4::<Test>::pre_upgrade();
assert!(bytes.is_ok());
v4::MigrateV3ToV4::<Test>::on_runtime_upgrade();
assert!(v4::MigrateV3ToV4::<Test>::post_upgrade(bytes.unwrap()).is_ok());
v4::MigrationToV4::<Test>::on_runtime_upgrade();
assert!(v4::MigrationToV4::<Test>::post_upgrade(bytes.unwrap()).is_ok());

let v4 = QueueConfig::<Test>::get();

Expand Down
Loading

0 comments on commit fafe272

Please sign in to comment.