Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Core assignation wired to collatorAssignment #629

Merged
merged 55 commits into from
Aug 7, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
55 commits
Select commit Hold shift + click to select a range
050e4c0
first try
girazoki Jul 17, 2024
34d00dd
assign first cores to parachains, rest to parathreads
girazoki Jul 17, 2024
24197ab
zombie starlight
girazoki Jul 18, 2024
8294deb
assignation
girazoki Jul 18, 2024
efafadf
fix moonwall
girazoki Jul 18, 2024
905f17f
swap order of workers
girazoki Jul 18, 2024
bc9d8f8
let's fix this
girazoki Jul 18, 2024
0e379dd
try to generate genesis containers
girazoki Jul 18, 2024
87d01fd
keep advancing
girazoki Jul 18, 2024
99046d0
finally working
girazoki Jul 18, 2024
944bd55
Merge remote-tracking branch 'origin/master' into wiring-core-assigna…
girazoki Jul 19, 2024
a5a2bf6
FMT for now
girazoki Jul 19, 2024
8287d38
Merge remote-tracking branch 'origin/master' into wiring-core-assigna…
girazoki Jul 19, 2024
3ed99fa
zepter and toml-maid
girazoki Jul 19, 2024
c0c6a81
toml-maid again
girazoki Jul 19, 2024
d505fc5
Merge remote-tracking branch 'origin/master' into wiring-core-assigna…
girazoki Jul 19, 2024
8181d8e
fix
girazoki Jul 19, 2024
f3c91e9
strcut doc
girazoki Jul 19, 2024
1b9dc19
more fixes
girazoki Jul 19, 2024
2b6a837
clippy
girazoki Jul 19, 2024
23ea0bf
pnpm lock
girazoki Jul 22, 2024
d9c41f3
wip
girazoki Jul 23, 2024
f845cdd
wip
girazoki Jul 23, 2024
9fa9d34
more wip
girazoki Jul 23, 2024
2b15e38
fmt
girazoki Jul 23, 2024
f8733eb
wip of wip
girazoki Jul 23, 2024
c01a173
wip
girazoki Jul 24, 2024
0f098f2
add first two tests regarding core-scheduling
girazoki Jul 25, 2024
d352df1
yet another test
girazoki Jul 26, 2024
824eb5d
parathread affinity working
girazoki Jul 26, 2024
30b01f5
Merge remote-tracking branch 'origin/master' into wiring-core-assigna…
girazoki Jul 26, 2024
eda7d69
toml maid
girazoki Jul 29, 2024
5de9e61
fmt
girazoki Jul 29, 2024
3b8064c
fix quite a few stuff
girazoki Jul 29, 2024
ca48464
FMT
girazoki Jul 29, 2024
72e3510
fix tanssi relay test
girazoki Jul 29, 2024
e487788
fix clippy
girazoki Jul 29, 2024
20e0950
fix clippy
girazoki Jul 29, 2024
4f89141
FMT more
girazoki Jul 29, 2024
b9a8e5a
fmt
girazoki Jul 29, 2024
1dd390d
Merge remote-tracking branch 'origin/master' into wiring-core-assigna…
girazoki Jul 30, 2024
7ed78e7
fixes
girazoki Jul 30, 2024
c06f886
Merge remote-tracking branch 'origin/master' into wiring-core-assigna…
girazoki Jul 31, 2024
155fdbc
fixes
girazoki Jul 31, 2024
6aea91b
pr feedback
girazoki Aug 1, 2024
b2aa4ed
adapt to parathreadS
girazoki Aug 1, 2024
8c54c48
register parathreads on genesis in tests
girazoki Aug 1, 2024
960e27c
fmt
girazoki Aug 1, 2024
d69c9e7
fixes
girazoki Aug 1, 2024
98a1bd9
add way to retrieve key
girazoki Aug 1, 2024
1215187
tests required by PR review
girazoki Aug 2, 2024
fcf7ad2
more comments
girazoki Aug 2, 2024
d798d2f
better comments
girazoki Aug 2, 2024
2366a3a
fmt
girazoki Aug 2, 2024
11603ab
Merge remote-tracking branch 'origin/master' into wiring-core-assigna…
girazoki Aug 6, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 17 additions & 14 deletions solo-chains/runtime/starlight/src/tests/common/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -760,6 +760,9 @@ impl<T: runtime_parachains::paras_inherent::Config> ParasInherentTestBuilder<T>
/// available.
/// - `cores_with_backed_candidates` Mapping of `para_id` seed to number of
/// validity votes.
/// Important! this uses a BtreeMap, which means that elements will use increasing core orders
/// example: if we have parachains 1000, 1001, and 1002, they will use respectively cores
/// 0 1 and 2. There is no way in which we force 1002 to use core 0 in this setup
fn create_backed_candidates(
&self,
paras_with_backed_candidates: &BTreeMap<u32, u32>,
Expand Down Expand Up @@ -1005,21 +1008,21 @@ impl<T: runtime_parachains::paras_inherent::Config> ParasInherentTestBuilder<T>

use frame_support::StorageHasher;
pub fn storage_map_final_key<H: frame_support::StorageHasher>(
pallet_prefix: &str,
map_name: &str,
key: &[u8],
pallet_prefix: &str,
map_name: &str,
key: &[u8],
) -> Vec<u8> {
let key_hashed = H::hash(key);
let pallet_prefix_hashed = frame_support::Twox128::hash(pallet_prefix.as_bytes());
let storage_prefix_hashed = frame_support::Twox128::hash(map_name.as_bytes());
let key_hashed = H::hash(key);
let pallet_prefix_hashed = frame_support::Twox128::hash(pallet_prefix.as_bytes());
let storage_prefix_hashed = frame_support::Twox128::hash(map_name.as_bytes());

let mut final_key = Vec::with_capacity(
pallet_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len(),
);
let mut final_key = Vec::with_capacity(
pallet_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len(),
);

final_key.extend_from_slice(&pallet_prefix_hashed[..]);
final_key.extend_from_slice(&storage_prefix_hashed[..]);
final_key.extend_from_slice(key_hashed.as_ref());
final_key.extend_from_slice(&pallet_prefix_hashed[..]);
final_key.extend_from_slice(&storage_prefix_hashed[..]);
final_key.extend_from_slice(key_hashed.as_ref());

final_key
}
final_key
}
313 changes: 312 additions & 1 deletion solo-chains/runtime/starlight/src/tests/core_scheduling_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,18 @@

#![cfg(test)]

use crate::{ContainerRegistrar, ParasSudoWrapper};
girazoki marked this conversation as resolved.
Show resolved Hide resolved
use primitives::CoreIndex;
use runtime_parachains::paras::{ParaGenesisArgs, ParaKind};
use starlight_runtime_constants::time::EpochDurationInBlocks;
use {
crate::tests::common::*,
crate::{OnDemandAssignmentProvider, Paras},
crate::{OnDemandAssignmentProvider, Paras, Session},
cumulus_primitives_core::relay_chain::{vstaging::SchedulerParams, AsyncBackingParams},
frame_support::assert_ok,
frame_system::pallet_prelude::BlockNumberFor,
primitives::runtime_api::runtime_decl_for_parachain_host::ParachainHostV11,
sp_core::{Decode, Encode},
sp_keystore::testing::MemoryKeystore,
sp_std::{collections::btree_map::BTreeMap, vec},
std::sync::Arc,
Expand Down Expand Up @@ -334,6 +339,305 @@ fn test_parathread_that_buys_core_has_affinity_and_can_produce() {
})
}

#[test]
fn test_on_demand_core_affinity_bound_to_core_gets_expired_at_session_boundaries() {
ExtBuilder::default()
.with_balances(vec![
// Alice gets 10k extra tokens for her mapping deposit
(AccountId::from(ALICE), 210_000 * UNIT),
(AccountId::from(BOB), 100_000 * UNIT),
(AccountId::from(CHARLIE), 100_000 * UNIT),
(AccountId::from(DAVE), 100_000 * UNIT),
])
.with_config(pallet_configuration::HostConfiguration {
max_collators: 2,
min_orchestrator_collators: 0,
max_orchestrator_collators: 0,
collators_per_container: 2,
..Default::default()
})
.with_collators(vec![
(AccountId::from(ALICE), 210 * UNIT),
(AccountId::from(BOB), 100 * UNIT),
])
.with_para_ids(vec![ParaRegistrationParams {
para_id: 1000,
genesis_data: empty_genesis_data(),
block_production_credits: u32::MAX,
collator_assignment_credits: u32::MAX,
parathread_params: Some(tp_traits::ParathreadParams {
slot_frequency: SlotFrequency { min: 1, max: 1 },
}),
}])
.with_relay_config(runtime_parachains::configuration::HostConfiguration::<
BlockNumberFor<Runtime>,
> {
scheduler_params: SchedulerParams {
num_cores: 2,
// A very high number to avoid group rotation in tests
// Otherwise we get a 1 by default, which changes groups every block
group_rotation_frequency: 10000000,
ttl: 2,
..Default::default()
},
async_backing_params: AsyncBackingParams {
allowed_ancestry_len: 1,
max_candidate_depth: 0,
},
minimum_backing_votes: 1,
max_head_data_size: 5,
..Default::default()
})
.with_keystore(Arc::new(MemoryKeystore::new()))
.build()
.execute_with(|| {
run_to_block(2);
// Now the parathread should be there
assert!(Paras::is_parathread(1000u32.into()));
let alice_keys =
get_authority_keys_from_seed(&AccountId::from(ALICE).to_string(), None);

// Parathread should have collators
assert!(
authorities_for_container(1000u32.into()) == Some(vec![alice_keys.nimbus.clone()])
);

// let's buy core
assert_ok!(OnDemandAssignmentProvider::place_order_allow_death(
origin_of(ALICE.into()),
100 * UNIT,
1000u32.into()
));

// We try producing having an on-demand core
let cores_with_backed: BTreeMap<_, _> =
vec![(1000u32, Session::validators().len() as u32)]
.into_iter()
.collect();

let inherent_data = ParasInherentTestBuilder::<Runtime>::new()
.set_backed_and_concluding_paras(cores_with_backed)
.build();
set_new_inherent_data(inherent_data);
run_block();

run_to_session(1);

// When we do run_to_session, we only do on_initialize(block in which session changed)
// Since we still did not do on_finalize, the parathread is still bounded to core 0
let key = storage_map_final_key::<frame_support::Twox64Concat>(
"OnDemandAssignmentProvider",
"ParaIdAffinity",
&cumulus_primitives_core::ParaId::from(1000u32).encode(),
);
let value_before_session: Option<CoreAffinityCount> =
frame_support::storage::unhashed::get(key.as_ref());

assert_eq!(
value_before_session,
Some(CoreAffinityCount {
core_index: CoreIndex(0),
count: 1
})
);

// However as soon as we do on_finalize of the current block (the session boundary change block) the affinity
// will be removed, allowing parathread 1000 to use any core available that is not 0
// The latter is demonstrated better in the following test
end_block();
let value_after_session: Option<CoreAffinityCount> =
frame_support::storage::unhashed::get(key.as_ref());
assert_eq!(value_after_session, None);
})
}
#[test]
fn test_parathread_uses_0_and_then_1_after_parachain_onboarded() {
ExtBuilder::default()
.with_balances(vec![
// Alice gets 10k extra tokens for her mapping deposit
(AccountId::from(ALICE), 210_000 * UNIT),
(AccountId::from(BOB), 100_000 * UNIT),
(AccountId::from(CHARLIE), 100_000 * UNIT),
(AccountId::from(DAVE), 100_000 * UNIT),
])
.with_config(pallet_configuration::HostConfiguration {
max_collators: 2,
min_orchestrator_collators: 0,
max_orchestrator_collators: 0,
collators_per_container: 1,
collators_per_parathread: 1,
..Default::default()
})
.with_collators(vec![
(AccountId::from(ALICE), 210 * UNIT),
(AccountId::from(BOB), 100 * UNIT),
])
.with_para_ids(vec![ParaRegistrationParams {
para_id: 1001,
genesis_data: empty_genesis_data(),
block_production_credits: u32::MAX,
collator_assignment_credits: u32::MAX,
parathread_params: Some(tp_traits::ParathreadParams {
slot_frequency: SlotFrequency { min: 1, max: 1 },
}),
}])
.with_relay_config(runtime_parachains::configuration::HostConfiguration::<
BlockNumberFor<Runtime>,
> {
scheduler_params: SchedulerParams {
num_cores: 2,
// A very high number to avoid group rotation in tests
// Otherwise we get a 1 by default, which changes groups every block
group_rotation_frequency: 10000000,
ttl: 2,
..Default::default()
},
async_backing_params: AsyncBackingParams {
allowed_ancestry_len: 1,
max_candidate_depth: 0,
},
minimum_backing_votes: 1,
max_head_data_size: 5,
..Default::default()
})
.with_keystore(Arc::new(MemoryKeystore::new()))
.build()
.execute_with(|| {
run_to_block(2);
// Now the parathread should be there
assert!(Paras::is_parathread(1001u32.into()));
let alice_keys =
get_authority_keys_from_seed(&AccountId::from(ALICE).to_string(), None);

// Parathread should have collators
assert!(
authorities_for_container(1001u32.into()) == Some(vec![alice_keys.nimbus.clone()])
);

// Register parachain
assert_ok!(ContainerRegistrar::register(
origin_of(ALICE.into()),
1000.into(),
empty_genesis_data()
));
assert_ok!(ContainerRegistrar::mark_valid_for_collating(
root_origin(),
1000.into()
));
assert_ok!(ParasSudoWrapper::sudo_schedule_para_initialize(
root_origin(),
1000.into(),
ParaGenesisArgs {
genesis_head: ParasInherentTestBuilder::<Runtime>::mock_head_data(),
validation_code: mock_validation_code(),
para_kind: ParaKind::Parachain,
},
));

assert_ok!(Paras::add_trusted_validation_code(
root_origin(),
mock_validation_code()
));

// The parathread now uses core 0 but once the parachain is onboarded (and gets collators)
// it should use core 1.
// let's just go to the block right before edge of session 2.
let epoch_duration = EpochDurationInBlocks::get();

run_to_block(2 * epoch_duration - 1);
// we are not a parachain yet
assert!(!Paras::is_parachain(1000u32.into()));
// we dont have authorities
assert_eq!(authorities_for_container(1000u32.into()), None);

// let's buy core for 1000
assert_ok!(OnDemandAssignmentProvider::place_order_allow_death(
origin_of(ALICE.into()),
100 * UNIT,
1001u32.into()
));

// We try producing having an on-demand core
let cores_with_backed: BTreeMap<_, _> =
vec![(1001u32, Session::validators().len() as u32)]
.into_iter()
.collect();

let inherent_data = ParasInherentTestBuilder::<Runtime>::new()
.set_backed_and_concluding_paras(cores_with_backed)
.build();
set_new_inherent_data(inherent_data.clone());
run_block();

let key = storage_map_final_key::<frame_support::Twox64Concat>(
"OnDemandAssignmentProvider",
"ParaIdAffinity",
&cumulus_primitives_core::ParaId::from(1001u32).encode(),
);
let value_before_session: Option<CoreAffinityCount> =
frame_support::storage::unhashed::get(key.as_ref());

// 1000 is bounded to core 0!
assert_eq!(
value_before_session,
Some(CoreAffinityCount {
core_index: CoreIndex(0),
count: 1
})
);

// let's run to right after the edge
// We need one more run block to trigger the on_finalize
run_to_session(2);
run_block();
// Now the parathread should be there
assert!(Paras::is_parachain(1000u32.into()));

let bob_keys = get_authority_keys_from_seed(&AccountId::from(BOB).to_string(), None);
// we should have authorities now: two sessions later and para is parachain already
assert_eq!(
authorities_for_container(1000u32.into()),
Some(vec![bob_keys.nimbus.clone()])
);

// 1001 should occupy core 0 now. which means if we try to buy a core (and use it)
// for par 1000, then it should assign core 1
// let's buy core for 1000
assert_ok!(OnDemandAssignmentProvider::place_order_allow_death(
origin_of(ALICE.into()),
100 * UNIT,
1001u32.into()
));

// We try producing having an on-demand core
let cores_with_backed: BTreeMap<_, _> = vec![
(1000u32, Session::validators().len() as u32),
(1001u32, Session::validators().len() as u32),
]
.into_iter()
.collect();

let inherent_data = ParasInherentTestBuilder::<Runtime>::new()
.set_backed_and_concluding_paras(cores_with_backed)
.build();

set_new_inherent_data(inherent_data);
run_block();

let value_after_session: Option<CoreAffinityCount> =
frame_support::storage::unhashed::get(key.as_ref());

// 1000 is bounded to core 1!
assert_eq!(
value_after_session,
Some(CoreAffinityCount {
core_index: CoreIndex(1),
count: 1
})
);
})
}

#[test]
fn test_should_have_availability_for_registered_parachain() {
ExtBuilder::default()
Expand Down Expand Up @@ -411,3 +715,10 @@ fn test_should_have_availability_for_registered_parachain() {
assert_eq!(availability_after.len(), 1);
})
}

// we dont have access to the type so this is the only thing we can do
#[derive(Encode, Decode, Debug, Default, Clone, Copy, PartialEq, scale_info::TypeInfo)]
pub struct CoreAffinityCount {
pub core_index: cumulus_primitives_core::relay_chain::CoreIndex,
pub count: u32,
}