Skip to content

Commit

Permalink
[reconfigurator] Only place Crucible zones on provisionable zpools (#…
Browse files Browse the repository at this point in the history
…5601)

Previously, the planner placed Crucible zones on zpools unconditionally.
Now, we respect the policy and state of disks to
limit which disks are targets to receive these zones.

Partial fix of #5372
  • Loading branch information
smklein authored Apr 23, 2024
1 parent 86d13a7 commit 4a0d382
Show file tree
Hide file tree
Showing 3 changed files with 136 additions and 2 deletions.
85 changes: 83 additions & 2 deletions nexus/reconfigurator/planning/src/planner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ use crate::blueprint_builder::Error;
use nexus_types::deployment::Blueprint;
use nexus_types::deployment::PlanningInput;
use nexus_types::deployment::SledFilter;
use nexus_types::deployment::ZpoolFilter;
use nexus_types::external_api::views::SledPolicy;
use nexus_types::external_api::views::SledState;
use nexus_types::inventory::Collection;
Expand Down Expand Up @@ -196,9 +197,9 @@ impl<'a> Planner<'a> {
continue;
}

// Every zpool on the sled should have a Crucible zone on it.
// Every provisionable zpool on the sled should have a Crucible zone on it.
let mut ncrucibles_added = 0;
for zpool_id in sled_resources.zpools.keys() {
for zpool_id in sled_resources.all_zpools(ZpoolFilter::InService) {
if self
.blueprint
.sled_ensure_zone_crucible(sled_id, *zpool_id)?
Expand Down Expand Up @@ -414,8 +415,11 @@ mod test {
use nexus_types::external_api::views::SledState;
use nexus_types::inventory::OmicronZonesFound;
use omicron_common::api::external::Generation;
use omicron_common::disk::DiskIdentity;
use omicron_test_utils::dev::test_setup_log;
use omicron_uuid_kinds::GenericUuid;
use omicron_uuid_kinds::PhysicalDiskUuid;
use omicron_uuid_kinds::ZpoolUuid;
use std::collections::HashMap;

/// Runs through a basic sequence of blueprints for adding a sled
Expand Down Expand Up @@ -760,6 +764,83 @@ mod test {
logctx.cleanup_successful();
}

#[test]
fn test_crucible_allocation_skips_nonprovisionable_disks() {
static TEST_NAME: &str =
"planner_crucible_allocation_skips_nonprovisionable_disks";
let logctx = test_setup_log(TEST_NAME);

// Create an example system with a single sled
let (collection, input, blueprint1) =
example(&logctx.log, TEST_NAME, 1);

let mut builder = input.into_builder();

// Avoid churning on the quantity of Nexus zones - we're okay staying at
// one.
builder.policy_mut().target_nexus_zone_count = 1;

let new_sled_disk = |policy| nexus_types::deployment::SledDisk {
disk_identity: DiskIdentity {
vendor: "test-vendor".to_string(),
serial: "test-serial".to_string(),
model: "test-model".to_string(),
},
disk_id: PhysicalDiskUuid::new_v4(),
policy,
state: nexus_types::external_api::views::PhysicalDiskState::Active,
};

let (_, sled_details) = builder.sleds_mut().iter_mut().next().unwrap();

// Inject some new disks into the input.
//
// These counts are arbitrary, as long as they're non-zero
// for the sake of the test.

const NEW_IN_SERVICE_DISKS: usize = 2;
const NEW_EXPUNGED_DISKS: usize = 1;

for _ in 0..NEW_IN_SERVICE_DISKS {
sled_details.resources.zpools.insert(
ZpoolUuid::new_v4(),
new_sled_disk(nexus_types::external_api::views::PhysicalDiskPolicy::InService),
);
}
for _ in 0..NEW_EXPUNGED_DISKS {
sled_details.resources.zpools.insert(
ZpoolUuid::new_v4(),
new_sled_disk(nexus_types::external_api::views::PhysicalDiskPolicy::Expunged),
);
}

let input = builder.build();

let blueprint2 = Planner::new_based_on(
logctx.log.clone(),
&blueprint1,
&input,
"test: some new disks",
&collection,
)
.expect("failed to create planner")
.with_rng_seed((TEST_NAME, "bp2"))
.plan()
.expect("failed to plan");

let diff = blueprint2.diff_since_blueprint(&blueprint1).unwrap();
println!("1 -> 2 (some new disks, one expunged):\n{}", diff.display());
let mut modified_sleds = diff.sleds_modified();
assert_eq!(modified_sleds.len(), 1);
let (_, diff_modified) = modified_sleds.next().unwrap();

// We should be adding a Crucible zone for each new in-service disk.
assert_eq!(diff_modified.zones_added().count(), NEW_IN_SERVICE_DISKS);
assert_eq!(diff_modified.zones_removed().len(), 0);

logctx.cleanup_successful();
}

/// Check that the planner will skip non-provisionable sleds when allocating
/// extra Nexus zones
#[test]
Expand Down
1 change: 1 addition & 0 deletions nexus/types/src/deployment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ pub use planning_input::SledDetails;
pub use planning_input::SledDisk;
pub use planning_input::SledFilter;
pub use planning_input::SledResources;
pub use planning_input::ZpoolFilter;
pub use zone_type::blueprint_zone_type;
pub use zone_type::BlueprintZoneType;

Expand Down
52 changes: 52 additions & 0 deletions nexus/types/src/deployment/planning_input.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,12 @@ pub struct SledDisk {
pub state: PhysicalDiskState,
}

impl SledDisk {
fn provisionable(&self) -> bool {
DiskFilter::InService.matches_policy_and_state(self.policy, self.state)
}
}

/// Filters that apply to disks.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum DiskFilter {
Expand Down Expand Up @@ -70,6 +76,34 @@ impl DiskFilter {
}
}

/// Filters that apply to zpools.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum ZpoolFilter {
/// All zpools
All,

/// All zpools which are in-service.
InService,
}

impl ZpoolFilter {
fn matches_policy_and_state(
self,
policy: PhysicalDiskPolicy,
state: PhysicalDiskState,
) -> bool {
match self {
ZpoolFilter::All => true,
ZpoolFilter::InService => match (policy, state) {
(PhysicalDiskPolicy::InService, PhysicalDiskState::Active) => {
true
}
_ => false,
},
}
}
}

/// Describes the resources available on each sled for the planner
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SledResources {
Expand All @@ -87,6 +121,24 @@ pub struct SledResources {
}

impl SledResources {
/// Returns if the zpool is provisionable (known, in-service, and active).
pub fn zpool_is_provisionable(&self, zpool: &ZpoolUuid) -> bool {
let Some(disk) = self.zpools.get(zpool) else { return false };
disk.provisionable()
}

/// Returns all zpools matching the given filter.
pub fn all_zpools(
&self,
filter: ZpoolFilter,
) -> impl Iterator<Item = &ZpoolUuid> + '_ {
self.zpools.iter().filter_map(move |(zpool, disk)| {
filter
.matches_policy_and_state(disk.policy, disk.state)
.then_some(zpool)
})
}

pub fn all_disks(
&self,
filter: DiskFilter,
Expand Down

0 comments on commit 4a0d382

Please sign in to comment.