diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index a252f9b821..1238692b67 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -13,6 +13,7 @@ use crate::blueprint_builder::Error; use nexus_types::deployment::Blueprint; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledFilter; +use nexus_types::deployment::ZpoolFilter; use nexus_types::external_api::views::SledPolicy; use nexus_types::external_api::views::SledState; use nexus_types::inventory::Collection; @@ -196,9 +197,9 @@ impl<'a> Planner<'a> { continue; } - // Every zpool on the sled should have a Crucible zone on it. + // Every provisionable zpool on the sled should have a Crucible zone on it. let mut ncrucibles_added = 0; - for zpool_id in sled_resources.zpools.keys() { + for zpool_id in sled_resources.all_zpools(ZpoolFilter::InService) { if self .blueprint .sled_ensure_zone_crucible(sled_id, *zpool_id)? @@ -414,8 +415,11 @@ mod test { use nexus_types::external_api::views::SledState; use nexus_types::inventory::OmicronZonesFound; use omicron_common::api::external::Generation; + use omicron_common::disk::DiskIdentity; use omicron_test_utils::dev::test_setup_log; use omicron_uuid_kinds::GenericUuid; + use omicron_uuid_kinds::PhysicalDiskUuid; + use omicron_uuid_kinds::ZpoolUuid; use std::collections::HashMap; /// Runs through a basic sequence of blueprints for adding a sled @@ -760,6 +764,83 @@ mod test { logctx.cleanup_successful(); } + #[test] + fn test_crucible_allocation_skips_nonprovisionable_disks() { + static TEST_NAME: &str = + "planner_crucible_allocation_skips_nonprovisionable_disks"; + let logctx = test_setup_log(TEST_NAME); + + // Create an example system with a single sled + let (collection, input, blueprint1) = + example(&logctx.log, TEST_NAME, 1); + + let mut builder = input.into_builder(); + + // Avoid churning on the quantity of Nexus zones - we're okay staying at + // one. + builder.policy_mut().target_nexus_zone_count = 1; + + let new_sled_disk = |policy| nexus_types::deployment::SledDisk { + disk_identity: DiskIdentity { + vendor: "test-vendor".to_string(), + serial: "test-serial".to_string(), + model: "test-model".to_string(), + }, + disk_id: PhysicalDiskUuid::new_v4(), + policy, + state: nexus_types::external_api::views::PhysicalDiskState::Active, + }; + + let (_, sled_details) = builder.sleds_mut().iter_mut().next().unwrap(); + + // Inject some new disks into the input. + // + // These counts are arbitrary, as long as they're non-zero + // for the sake of the test. + + const NEW_IN_SERVICE_DISKS: usize = 2; + const NEW_EXPUNGED_DISKS: usize = 1; + + for _ in 0..NEW_IN_SERVICE_DISKS { + sled_details.resources.zpools.insert( + ZpoolUuid::new_v4(), + new_sled_disk(nexus_types::external_api::views::PhysicalDiskPolicy::InService), + ); + } + for _ in 0..NEW_EXPUNGED_DISKS { + sled_details.resources.zpools.insert( + ZpoolUuid::new_v4(), + new_sled_disk(nexus_types::external_api::views::PhysicalDiskPolicy::Expunged), + ); + } + + let input = builder.build(); + + let blueprint2 = Planner::new_based_on( + logctx.log.clone(), + &blueprint1, + &input, + "test: some new disks", + &collection, + ) + .expect("failed to create planner") + .with_rng_seed((TEST_NAME, "bp2")) + .plan() + .expect("failed to plan"); + + let diff = blueprint2.diff_since_blueprint(&blueprint1).unwrap(); + println!("1 -> 2 (some new disks, one expunged):\n{}", diff.display()); + let mut modified_sleds = diff.sleds_modified(); + assert_eq!(modified_sleds.len(), 1); + let (_, diff_modified) = modified_sleds.next().unwrap(); + + // We should be adding a Crucible zone for each new in-service disk. + assert_eq!(diff_modified.zones_added().count(), NEW_IN_SERVICE_DISKS); + assert_eq!(diff_modified.zones_removed().len(), 0); + + logctx.cleanup_successful(); + } + /// Check that the planner will skip non-provisionable sleds when allocating /// extra Nexus zones #[test] diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 36f87a2eb3..5ded144c94 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -55,6 +55,7 @@ pub use planning_input::SledDetails; pub use planning_input::SledDisk; pub use planning_input::SledFilter; pub use planning_input::SledResources; +pub use planning_input::ZpoolFilter; pub use zone_type::blueprint_zone_type; pub use zone_type::BlueprintZoneType; diff --git a/nexus/types/src/deployment/planning_input.rs b/nexus/types/src/deployment/planning_input.rs index 2503ff81f3..0a4c12b11c 100644 --- a/nexus/types/src/deployment/planning_input.rs +++ b/nexus/types/src/deployment/planning_input.rs @@ -42,6 +42,12 @@ pub struct SledDisk { pub state: PhysicalDiskState, } +impl SledDisk { + fn provisionable(&self) -> bool { + DiskFilter::InService.matches_policy_and_state(self.policy, self.state) + } +} + /// Filters that apply to disks. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub enum DiskFilter { @@ -70,6 +76,34 @@ impl DiskFilter { } } +/// Filters that apply to zpools. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum ZpoolFilter { + /// All zpools + All, + + /// All zpools which are in-service. + InService, +} + +impl ZpoolFilter { + fn matches_policy_and_state( + self, + policy: PhysicalDiskPolicy, + state: PhysicalDiskState, + ) -> bool { + match self { + ZpoolFilter::All => true, + ZpoolFilter::InService => match (policy, state) { + (PhysicalDiskPolicy::InService, PhysicalDiskState::Active) => { + true + } + _ => false, + }, + } + } +} + /// Describes the resources available on each sled for the planner #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SledResources { @@ -87,6 +121,24 @@ pub struct SledResources { } impl SledResources { + /// Returns if the zpool is provisionable (known, in-service, and active). + pub fn zpool_is_provisionable(&self, zpool: &ZpoolUuid) -> bool { + let Some(disk) = self.zpools.get(zpool) else { return false }; + disk.provisionable() + } + + /// Returns all zpools matching the given filter. + pub fn all_zpools( + &self, + filter: ZpoolFilter, + ) -> impl Iterator + '_ { + self.zpools.iter().filter_map(move |(zpool, disk)| { + filter + .matches_policy_and_state(disk.policy, disk.state) + .then_some(zpool) + }) + } + pub fn all_disks( &self, filter: DiskFilter,