From 5c2e5b0ec82b25f14d2d76451c05413c4f798961 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 18 Jul 2024 10:47:02 -0700 Subject: [PATCH 01/84] [wip] Starting sled agent API to manage datasets explicitly --- illumos-utils/src/zfs.rs | 44 +++++- sled-agent/src/backing_fs.rs | 3 +- sled-agent/src/http_entrypoints.rs | 39 +++++- sled-agent/src/params.rs | 2 + sled-agent/src/sled_agent.rs | 33 ++++- sled-storage/src/dataset.rs | 25 +++- sled-storage/src/disk.rs | 63 +++++++++ sled-storage/src/manager.rs | 216 ++++++++++++++++++++++++++++- sled-storage/src/resources.rs | 17 +++ uuid-kinds/src/lib.rs | 1 + 10 files changed, 419 insertions(+), 24 deletions(-) diff --git a/illumos-utils/src/zfs.rs b/illumos-utils/src/zfs.rs index 139e6fe607..21de2a50da 100644 --- a/illumos-utils/src/zfs.rs +++ b/illumos-utils/src/zfs.rs @@ -203,7 +203,8 @@ pub struct EncryptionDetails { #[derive(Debug, Default)] pub struct SizeDetails { pub quota: Option, - pub compression: Option<&'static str>, + pub reservation: Option, + pub compression: Option, } #[cfg_attr(any(test, feature = "testing"), mockall::automock, allow(dead_code))] @@ -274,10 +275,18 @@ impl Zfs { ) -> Result<(), EnsureFilesystemError> { let (exists, mounted) = Self::dataset_exists(name, &mountpoint)?; if exists { - if let Some(SizeDetails { quota, compression }) = size_details { + if let Some(SizeDetails { quota, reservation, compression }) = + size_details + { // apply quota and compression mode (in case they've changed across // sled-agent versions since creation) - Self::apply_properties(name, &mountpoint, quota, compression)?; + Self::apply_properties( + name, + &mountpoint, + quota, + reservation, + compression, + )?; } if encryption_details.is_none() { @@ -351,9 +360,17 @@ impl Zfs { })?; } - if let Some(SizeDetails { quota, compression }) = size_details { + if let Some(SizeDetails { quota, reservation, compression }) = + size_details + { // Apply any quota and compression mode. - Self::apply_properties(name, &mountpoint, quota, compression)?; + Self::apply_properties( + name, + &mountpoint, + quota, + reservation, + compression, + )?; } Ok(()) @@ -363,7 +380,8 @@ impl Zfs { name: &str, mountpoint: &Mountpoint, quota: Option, - compression: Option<&'static str>, + reservation: Option, + compression: Option, ) -> Result<(), EnsureFilesystemError> { if let Some(quota) = quota { if let Err(err) = @@ -377,8 +395,20 @@ impl Zfs { }); } } + if let Some(reservation) = reservation { + if let Err(err) = + Self::set_value(name, "reservation", &format!("{reservation}")) + { + return Err(EnsureFilesystemError { + name: name.to_string(), + mountpoint: mountpoint.clone(), + // Take the execution error from the SetValueError + err: err.err.into(), + }); + } + } if let Some(compression) = compression { - if let Err(err) = Self::set_value(name, "compression", compression) + if let Err(err) = Self::set_value(name, "compression", &compression) { return Err(EnsureFilesystemError { name: name.to_string(), diff --git a/sled-agent/src/backing_fs.rs b/sled-agent/src/backing_fs.rs index 2e9ea4c8d9..48002a8841 100644 --- a/sled-agent/src/backing_fs.rs +++ b/sled-agent/src/backing_fs.rs @@ -137,7 +137,8 @@ pub(crate) fn ensure_backing_fs( let size_details = Some(SizeDetails { quota: bfs.quota, - compression: bfs.compression, + reservation: None, + compression: bfs.compression.map(|s| s.to_string()), }); Zfs::ensure_filesystem( diff --git a/sled-agent/src/http_entrypoints.rs b/sled-agent/src/http_entrypoints.rs index 2612e504f5..9c1d5a4e11 100644 --- a/sled-agent/src/http_entrypoints.rs +++ b/sled-agent/src/http_entrypoints.rs @@ -8,11 +8,12 @@ use super::sled_agent::SledAgent; use crate::bootstrap::early_networking::EarlyNetworkConfig; use crate::bootstrap::params::AddSledRequest; use crate::params::{ - BootstoreStatus, CleanupContextUpdate, DiskEnsureBody, InstanceEnsureBody, - InstanceExternalIpBody, InstancePutMigrationIdsBody, InstancePutStateBody, - InstancePutStateResponse, InstanceUnregisterResponse, Inventory, - OmicronPhysicalDisksConfig, OmicronZonesConfig, SledRole, TimeSync, - VpcFirewallRulesEnsureBody, ZoneBundleId, ZoneBundleMetadata, Zpool, + BootstoreStatus, CleanupContextUpdate, DatasetsConfig, DiskEnsureBody, + InstanceEnsureBody, InstanceExternalIpBody, InstancePutMigrationIdsBody, + InstancePutStateBody, InstancePutStateResponse, InstanceUnregisterResponse, + Inventory, OmicronPhysicalDisksConfig, OmicronZonesConfig, SledRole, + TimeSync, VpcFirewallRulesEnsureBody, ZoneBundleId, ZoneBundleMetadata, + Zpool, }; use crate::sled_agent::Error as SledAgentError; use crate::zone_bundle; @@ -38,6 +39,7 @@ use omicron_uuid_kinds::{GenericUuid, InstanceUuid}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use sled_hardware::DiskVariant; +use sled_storage::resources::DatasetsManagementResult; use sled_storage::resources::DisksManagementResult; use std::collections::BTreeMap; use uuid::Uuid; @@ -62,6 +64,8 @@ pub fn api() -> SledApiDescription { api.register(omicron_zones_get)?; api.register(omicron_zones_put)?; api.register(zones_list)?; + api.register(datasets_get)?; + api.register(datasets_put)?; api.register(omicron_physical_disks_get)?; api.register(omicron_physical_disks_put)?; api.register(zone_bundle_list)?; @@ -345,6 +349,31 @@ async fn omicron_zones_get( Ok(HttpResponseOk(sa.omicron_zones_list().await?)) } +#[endpoint { + method = PUT, + path = "/datasets", +}] +async fn datasets_put( + rqctx: RequestContext, + body: TypedBody, +) -> Result, HttpError> { + let sa = rqctx.context(); + let body_args = body.into_inner(); + let result = sa.datasets_ensure(body_args).await?; + Ok(HttpResponseOk(result)) +} + +#[endpoint { + method = GET, + path = "/datasets", +}] +async fn datasets_get( + rqctx: RequestContext, +) -> Result, HttpError> { + let sa = rqctx.context(); + Ok(HttpResponseOk(sa.datasets_list().await?)) +} + #[endpoint { method = PUT, path = "/omicron-physical-disks", diff --git a/sled-agent/src/params.rs b/sled-agent/src/params.rs index 465a4abb56..836b030a87 100644 --- a/sled-agent/src/params.rs +++ b/sled-agent/src/params.rs @@ -300,6 +300,8 @@ pub type OmicronPhysicalDiskConfig = sled_storage::disk::OmicronPhysicalDiskConfig; pub type OmicronPhysicalDisksConfig = sled_storage::disk::OmicronPhysicalDisksConfig; +pub type DatasetConfig = sled_storage::disk::DatasetConfig; +pub type DatasetsConfig = sled_storage::disk::DatasetsConfig; /// Describes the set of Omicron-managed zones running on a sled #[derive( diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index dc946c1bfa..66e457b181 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -19,11 +19,12 @@ use crate::nexus::{ NexusNotifierTask, }; use crate::params::{ - DiskStateRequested, InstanceExternalIpBody, InstanceHardware, - InstanceMetadata, InstanceMigrationSourceParams, InstancePutStateResponse, - InstanceStateRequested, InstanceUnregisterResponse, Inventory, - OmicronPhysicalDisksConfig, OmicronZonesConfig, SledRole, TimeSync, - VpcFirewallRule, ZoneBundleMetadata, Zpool, + DatasetsConfig, DiskStateRequested, InstanceExternalIpBody, + InstanceHardware, InstanceMetadata, InstanceMigrationSourceParams, + InstancePutStateResponse, InstanceStateRequested, + InstanceUnregisterResponse, Inventory, OmicronPhysicalDisksConfig, + OmicronZonesConfig, SledRole, TimeSync, VpcFirewallRule, + ZoneBundleMetadata, Zpool, }; use crate::probe_manager::ProbeManager; use crate::services::{self, ServiceManager}; @@ -67,6 +68,7 @@ use sled_hardware::{underlay, HardwareManager}; use sled_hardware_types::underlay::BootstrapInterface; use sled_hardware_types::Baseboard; use sled_storage::manager::StorageHandle; +use sled_storage::resources::DatasetsManagementResult; use sled_storage::resources::DisksManagementResult; use slog::Logger; use std::collections::BTreeMap; @@ -803,6 +805,25 @@ impl SledAgent { self.inner.zone_bundler.cleanup().await.map_err(Error::from) } + pub async fn datasets_list(&self) -> Result { + Ok(self.storage().datasets_list().await?) + } + + pub async fn datasets_ensure( + &self, + config: DatasetsConfig, + ) -> Result { + info!(self.log, "datasets ensure"); + let datasets_result = self.storage().datasets_ensure(config).await?; + info!(self.log, "datasets ensure: Updated storage"); + + // TODO: See omicron_physical_disks_ensure, below - do we similarly + // need to ensure that old datasets are no longer in-use before we + // return here? + + Ok(datasets_result) + } + /// Requests the set of physical disks currently managed by the Sled Agent. /// /// This should be contrasted by the set of disks in the inventory, which @@ -891,7 +912,7 @@ impl SledAgent { &self, requested_zones: OmicronZonesConfig, ) -> Result<(), Error> { - // TODO: + // TODO(https://github.com/oxidecomputer/omicron/issues/6043): // - If these are the set of filesystems, we should also consider // removing the ones which are not listed here. // - It's probably worth sending a bulk request to the storage system, diff --git a/sled-storage/src/dataset.rs b/sled-storage/src/dataset.rs index 7846826ee8..c1267f81b6 100644 --- a/sled-storage/src/dataset.rs +++ b/sled-storage/src/dataset.rs @@ -129,7 +129,16 @@ impl ExpectedDataset { /// The type of a dataset, and an auxiliary information necessary /// to successfully launch a zone managing the associated data. #[derive( - Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, + Clone, + Debug, + Deserialize, + Serialize, + JsonSchema, + PartialEq, + Eq, + Hash, + Ord, + PartialOrd, )] #[serde(tag = "type", rename_all = "snake_case")] pub enum DatasetKind { @@ -198,7 +207,16 @@ impl std::fmt::Display for DatasetKind { } #[derive( - Debug, PartialEq, Eq, Hash, Serialize, Deserialize, Clone, JsonSchema, + Debug, + PartialEq, + Eq, + Hash, + Serialize, + Deserialize, + Clone, + JsonSchema, + PartialOrd, + Ord, )] pub struct DatasetName { // A unique identifier for the Zpool on which the dataset is stored. @@ -412,7 +430,8 @@ pub(crate) async fn ensure_zpool_has_datasets( let encryption_details = None; let size_details = Some(SizeDetails { quota: dataset.quota, - compression: dataset.compression, + reservation: None, + compression: dataset.compression.map(|s| s.to_string()), }); Zfs::ensure_filesystem( name, diff --git a/sled-storage/src/disk.rs b/sled-storage/src/disk.rs index c67cce0dfc..982e2bee26 100644 --- a/sled-storage/src/disk.rs +++ b/sled-storage/src/disk.rs @@ -12,6 +12,7 @@ use omicron_common::api::external::Generation; use omicron_common::disk::DiskIdentity; use omicron_common::ledger::Ledgerable; use omicron_common::zpool_name::{ZpoolKind, ZpoolName}; +use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::ZpoolUuid; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -43,6 +44,68 @@ pub struct OmicronPhysicalDiskConfig { pub pool_id: ZpoolUuid, } +/// Configuration information necessary to request a single dataset +#[derive( + Clone, + Debug, + Deserialize, + Serialize, + JsonSchema, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, +)] +pub struct DatasetConfig { + /// The UUID of the dataset being requested + pub id: DatasetUuid, + + /// The dataset's name + pub name: dataset::DatasetName, + + /// The compression mode to be supplied, if any + pub compression: Option, + + /// The upper bound on the amount of storage used by this dataset + pub quota: Option, + + /// The lower bound on the amount of storage usable by this dataset + pub reservation: Option, +} + +#[derive( + Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, +)] +pub struct DatasetsConfig { + /// generation number of this configuration + /// + /// This generation number is owned by the control plane (i.e., RSS or + /// Nexus, depending on whether RSS-to-Nexus handoff has happened). It + /// should not be bumped within Sled Agent. + /// + /// Sled Agent rejects attempts to set the configuration to a generation + /// older than the one it's currently running. + pub generation: Generation, + + pub datasets: Vec, +} + +impl Default for DatasetsConfig { + fn default() -> Self { + Self { generation: Generation::new(), datasets: vec![] } + } +} + +impl Ledgerable for DatasetsConfig { + fn is_newer_than(&self, other: &Self) -> bool { + self.generation > other.generation + } + + // No need to do this, the generation number is provided externally. + fn generation_bump(&mut self) {} +} + #[derive( Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, )] diff --git a/sled-storage/src/manager.rs b/sled-storage/src/manager.rs index e081bc5034..21da17a8ad 100644 --- a/sled-storage/src/manager.rs +++ b/sled-storage/src/manager.rs @@ -8,9 +8,14 @@ use std::collections::HashSet; use crate::config::MountConfig; use crate::dataset::{DatasetName, CONFIG_DATASET}; -use crate::disk::{OmicronPhysicalDisksConfig, RawDisk}; +use crate::disk::{ + DatasetConfig, DatasetsConfig, OmicronPhysicalDisksConfig, RawDisk, +}; use crate::error::Error; -use crate::resources::{AllDisks, DisksManagementResult, StorageResources}; +use crate::resources::{ + AllDisks, DatasetManagementStatus, DatasetsManagementResult, + DisksManagementResult, StorageResources, +}; use camino::Utf8PathBuf; use debug_ignore::DebugIgnore; use futures::future::FutureExt; @@ -19,6 +24,8 @@ use illumos_utils::zpool::ZpoolName; use key_manager::StorageKeyRequester; use omicron_common::disk::DiskIdentity; use omicron_common::ledger::Ledger; +use omicron_uuid_kinds::DatasetUuid; +use omicron_uuid_kinds::GenericUuid; use sled_hardware::DiskVariant; use slog::{info, o, warn, Logger}; use std::future::Future; @@ -112,6 +119,16 @@ pub(crate) enum StorageRequest { tx: DebugIgnore>>, }, + DatasetsEnsure { + config: DatasetsConfig, + tx: DebugIgnore< + oneshot::Sender>, + >, + }, + DatasetsList { + tx: DebugIgnore>>, + }, + // Requests to explicitly manage or stop managing a set of devices OmicronPhysicalDisksEnsure { config: OmicronPhysicalDisksConfig, @@ -238,6 +255,31 @@ impl StorageHandle { rx.map(|result| result.unwrap()) } + pub async fn datasets_ensure( + &self, + config: DatasetsConfig, + ) -> Result { + let (tx, rx) = oneshot::channel(); + self.tx + .send(StorageRequest::DatasetsEnsure { config, tx: tx.into() }) + .await + .unwrap(); + + rx.await.unwrap() + } + + /// Reads the last value written to storage by + /// [Self::datasets_ensure]. + pub async fn datasets_list(&self) -> Result { + let (tx, rx) = oneshot::channel(); + self.tx + .send(StorageRequest::DatasetsList { tx: tx.into() }) + .await + .unwrap(); + + rx.await.unwrap() + } + pub async fn omicron_physical_disks_ensure( &self, config: OmicronPhysicalDisksConfig, @@ -320,6 +362,10 @@ impl StorageHandle { rx.await.unwrap() } + // TODO(https://github.com/oxidecomputer/omicron/issues/6043): + // + // Deprecate usage of this function, prefer to call "datasets_ensure" + // and ask for the set of all datasets from Nexus. pub async fn upsert_filesystem( &self, dataset_id: Uuid, @@ -426,6 +472,12 @@ impl StorageManager { self.ensure_using_exactly_these_disks(raw_disks).await; let _ = tx.0.send(Ok(())); } + StorageRequest::DatasetsEnsure { config, tx } => { + let _ = tx.0.send(self.datasets_ensure(config).await); + } + StorageRequest::DatasetsList { tx } => { + let _ = tx.0.send(self.datasets_list().await); + } StorageRequest::OmicronPhysicalDisksEnsure { config, tx } => { let _ = tx.0.send(self.omicron_physical_disks_ensure(config).await); @@ -592,6 +644,103 @@ impl StorageManager { Ok(()) } + async fn datasets_ensure( + &mut self, + mut config: DatasetsConfig, + ) -> Result { + let log = self.log.new(o!("request" => "datasets_ensure")); + + // Ensure that the datasets arrive in a consistent order + config.datasets.sort_by(|a, b| a.id.partial_cmp(&b.id).unwrap()); + + // We rely on the schema being stable across reboots -- observe + // "test_datasets_schema" below for that property guarantee. + let ledger_paths = self.all_omicron_disk_ledgers().await; + let maybe_ledger = + Ledger::::new(&log, ledger_paths.clone()).await; + + let mut ledger = match maybe_ledger { + Some(ledger) => { + info!( + log, + "Comparing 'requested datasets' to ledger on internal storage" + ); + let ledger_data = ledger.data(); + if config.generation < ledger_data.generation { + warn!( + log, + "Request looks out-of-date compared to prior request" + ); + return Err(Error::PhysicalDiskConfigurationOutdated { + requested: config.generation, + current: ledger_data.generation, + }); + } + + // TODO: If the generation is equal, check that the values are + // also equal. + + info!(log, "Request looks newer than prior requests"); + ledger + } + None => { + info!(log, "No previously-stored 'requested datasets', creating new ledger"); + Ledger::::new_with( + &log, + ledger_paths.clone(), + DatasetsConfig::default(), + ) + } + }; + + let result = self.datasets_ensure_internal(&log, &config).await; + + let ledger_data = ledger.data_mut(); + if *ledger_data == config { + return Ok(result); + } + *ledger_data = config; + ledger.commit().await?; + + Ok(result) + } + + async fn datasets_ensure_internal( + &mut self, + log: &Logger, + config: &DatasetsConfig, + ) -> DatasetsManagementResult { + let mut status = vec![]; + for dataset in &config.datasets { + status.push(self.dataset_ensure_internal(log, dataset).await); + } + DatasetsManagementResult { status } + } + + async fn dataset_ensure_internal( + &mut self, + log: &Logger, + config: &DatasetConfig, + ) -> DatasetManagementStatus { + info!(log, "Ensuring dataset"; "name" => config.name.full_name()); + let mut status = DatasetManagementStatus { + dataset_name: config.name.clone(), + err: None, + }; + + if let Err(err) = self.ensure_dataset(config).await { + status.err = Some(err.to_string()); + }; + + status + } + + async fn datasets_list(&mut self) -> Result { + let log = self.log.new(o!("request" => "datasets_list")); + + todo!(); + } + // Makes an U.2 disk managed by the control plane within [`StorageResources`]. async fn omicron_physical_disks_ensure( &mut self, @@ -763,6 +912,60 @@ impl StorageManager { } } + // Ensures a dataset exists within a zpool, according to `config`. + async fn ensure_dataset( + &mut self, + config: &DatasetConfig, + ) -> Result<(), Error> { + info!(self.log, "ensure_dataset"; "config" => ?config); + if !self + .resources + .disks() + .iter_managed() + .any(|(_, disk)| disk.zpool_name() == config.name.pool()) + { + return Err(Error::ZpoolNotFound(format!( + "{}", + config.name.pool(), + ))); + } + + let zoned = true; + let fs_name = &config.name.full_name(); + let do_format = true; + let encryption_details = None; + let size_details = Some(illumos_utils::zfs::SizeDetails { + quota: config.quota, + reservation: config.reservation, + compression: config.compression.clone(), + }); + Zfs::ensure_filesystem( + fs_name, + Mountpoint::Path(Utf8PathBuf::from("/data")), + zoned, + do_format, + encryption_details, + size_details, + None, + )?; + // Ensure the dataset has a usable UUID. + if let Ok(id_str) = Zfs::get_oxide_value(&fs_name, "uuid") { + if let Ok(id) = id_str.parse::() { + if id != config.id { + return Err(Error::UuidMismatch { + name: Box::new(config.name.clone()), + old: id.into_untyped_uuid(), + new: config.id.into_untyped_uuid(), + }); + } + return Ok(()); + } + } + Zfs::set_oxide_value(&fs_name, "uuid", &config.id.to_string())?; + + Ok(()) + } + // Attempts to add a dataset within a zpool, according to `request`. async fn add_dataset( &mut self, @@ -1320,4 +1523,13 @@ mod test { &serde_json::to_string_pretty(&schema).unwrap(), ); } + + #[test] + fn test_datasets_schema() { + let schema = schemars::schema_for!(DatasetsConfig); + expectorate::assert_contents( + "../schema/datasets.json", + &serde_json::to_string_pretty(&schema).unwrap(), + ); + } } diff --git a/sled-storage/src/resources.rs b/sled-storage/src/resources.rs index f02f62e0a6..a13e816d11 100644 --- a/sled-storage/src/resources.rs +++ b/sled-storage/src/resources.rs @@ -57,6 +57,23 @@ impl DiskManagementError { } } +/// Identifies how a single dataset management operation may have succeeded or +/// failed. +#[derive(Debug, JsonSchema, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub struct DatasetManagementStatus { + pub dataset_name: crate::dataset::DatasetName, + pub err: Option, +} + +/// The result from attempting to manage datasets. +#[derive(Default, Debug, JsonSchema, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +#[must_use = "this `DatasetManagementResult` may contain errors, which should be handled"] +pub struct DatasetsManagementResult { + pub status: Vec, +} + /// Identifies how a single disk management operation may have succeeded or /// failed. #[derive(Debug, JsonSchema, Serialize, Deserialize)] diff --git a/uuid-kinds/src/lib.rs b/uuid-kinds/src/lib.rs index 53acc9c1ed..ebc505d7e3 100644 --- a/uuid-kinds/src/lib.rs +++ b/uuid-kinds/src/lib.rs @@ -50,6 +50,7 @@ macro_rules! impl_typed_uuid_kind { impl_typed_uuid_kind! { Collection => "collection", + Dataset => "dataset", Downstairs => "downstairs", DownstairsRegion => "downstairs_region", ExternalIp => "external_ip", From a80313dc82fe3f7fbf7d0b6a86c93ce79ee9bc96 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 22 Jul 2024 11:55:44 -0700 Subject: [PATCH 02/84] list implementation --- sled-storage/src/error.rs | 6 ++++ sled-storage/src/manager.rs | 66 ++++++++++++++++++++++++++++++++----- 2 files changed, 64 insertions(+), 8 deletions(-) diff --git a/sled-storage/src/error.rs b/sled-storage/src/error.rs index 4c5582fd79..c10095ad6d 100644 --- a/sled-storage/src/error.rs +++ b/sled-storage/src/error.rs @@ -83,6 +83,12 @@ pub enum Error { current: Generation, }, + #[error("Dataset configuration out-of-date (asked for {requested}, but latest is {current})")] + DatasetConfigurationOutdated { + requested: Generation, + current: Generation, + }, + #[error("Failed to update ledger in internal storage")] Ledger(#[from] omicron_common::ledger::Error), diff --git a/sled-storage/src/manager.rs b/sled-storage/src/manager.rs index 21da17a8ad..f77e703829 100644 --- a/sled-storage/src/manager.rs +++ b/sled-storage/src/manager.rs @@ -67,6 +67,9 @@ const SYNCHRONIZE_INTERVAL: Duration = Duration::from_secs(10); // The filename of the ledger storing physical disk info const DISKS_LEDGER_FILENAME: &str = "omicron-physical-disks.json"; +// The filename of the ledger storing dataset info +const DATASETS_LEDGER_FILENAME: &str = "omicron-datasets.json"; + #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum StorageManagerState { // We know that any attempts to manage disks will fail, as the key manager @@ -535,6 +538,10 @@ impl StorageManager { ); } + // Sled Agents can remember which disks they need to manage by reading + // a configuration file from the M.2s. + // + // This function returns the paths to those configuration files. async fn all_omicron_disk_ledgers(&self) -> Vec { self.resources .disks() @@ -544,6 +551,19 @@ impl StorageManager { .collect() } + // Sled Agents can remember which datasets they need to manage by reading + // a configuration file from the M.2s. + // + // This function returns the paths to those configuration files. + async fn all_omicron_dataset_ledgers(&self) -> Vec { + self.resources + .disks() + .all_m2_mountpoints(CONFIG_DATASET) + .into_iter() + .map(|p| p.join(DATASETS_LEDGER_FILENAME)) + .collect() + } + // Manages a newly detected disk that has been attached to this sled. // // For U.2s: we update our inventory. @@ -595,9 +615,9 @@ impl StorageManager { self.resources.insert_or_update_disk(raw_disk).await } - async fn load_ledger(&self) -> Option> { + async fn load_disks_ledger(&self) -> Option> { let ledger_paths = self.all_omicron_disk_ledgers().await; - let log = self.log.new(o!("request" => "load_ledger")); + let log = self.log.new(o!("request" => "load_disks_ledger")); let maybe_ledger = Ledger::::new( &log, ledger_paths.clone(), @@ -629,7 +649,7 @@ impl StorageManager { // Now that we're actually able to unpack U.2s, attempt to load the // set of disks which we previously stored in the ledger, if one // existed. - let ledger = self.load_ledger().await; + let ledger = self.load_disks_ledger().await; if let Some(ledger) = ledger { info!(self.log, "Setting StorageResources state to match ledger"); @@ -641,6 +661,12 @@ impl StorageManager { info!(self.log, "KeyManager ready, but no ledger detected"); } + // We don't load any configuration for datasets, since we aren't + // currently storing any dataset information in-memory. + // + // If we ever wanted to do so, however, we could load that information + // here. + Ok(()) } @@ -655,7 +681,7 @@ impl StorageManager { // We rely on the schema being stable across reboots -- observe // "test_datasets_schema" below for that property guarantee. - let ledger_paths = self.all_omicron_disk_ledgers().await; + let ledger_paths = self.all_omicron_dataset_ledgers().await; let maybe_ledger = Ledger::::new(&log, ledger_paths.clone()).await; @@ -671,7 +697,7 @@ impl StorageManager { log, "Request looks out-of-date compared to prior request" ); - return Err(Error::PhysicalDiskConfigurationOutdated { + return Err(Error::DatasetConfigurationOutdated { requested: config.generation, current: ledger_data.generation, }); @@ -722,13 +748,15 @@ impl StorageManager { log: &Logger, config: &DatasetConfig, ) -> DatasetManagementStatus { - info!(log, "Ensuring dataset"; "name" => config.name.full_name()); + let log = log.new(o!("name" => config.name.full_name())); + info!(log, "Ensuring dataset"); let mut status = DatasetManagementStatus { dataset_name: config.name.clone(), err: None, }; if let Err(err) = self.ensure_dataset(config).await { + warn!(log, "Failed to ensure dataset"; "err" => ?err); status.err = Some(err.to_string()); }; @@ -738,7 +766,23 @@ impl StorageManager { async fn datasets_list(&mut self) -> Result { let log = self.log.new(o!("request" => "datasets_list")); - todo!(); + let ledger_paths = self.all_omicron_dataset_ledgers().await; + let maybe_ledger = Ledger::::new( + &log, + ledger_paths.clone(), + ) + .await; + + match maybe_ledger { + Some(ledger) => { + info!(log, "Found ledger on internal storage"); + return Ok(ledger.data().clone()); + } + None => { + info!(log, "No ledger detected on internal storage"); + return Err(Error::LedgerNotFound); + } + } } // Makes an U.2 disk managed by the control plane within [`StorageResources`]. @@ -918,6 +962,10 @@ impl StorageManager { config: &DatasetConfig, ) -> Result<(), Error> { info!(self.log, "ensure_dataset"; "config" => ?config); + + // We can only place datasets within managed disks. + // If a disk is attached to this sled, but not a part of the Control + // Plane, it is treated as "not found" for dataset placement. if !self .resources .disks() @@ -930,6 +978,8 @@ impl StorageManager { ))); } + // TODO: Revisit these args, they might need more configuration + // tweaking. let zoned = true; let fs_name = &config.name.full_name(); let do_format = true; @@ -1528,7 +1578,7 @@ mod test { fn test_datasets_schema() { let schema = schemars::schema_for!(DatasetsConfig); expectorate::assert_contents( - "../schema/datasets.json", + "../schema/omicron-datasets.json", &serde_json::to_string_pretty(&schema).unwrap(), ); } From 7193d1b65667dfd11fe60e6cf920bd1e3ea21c28 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 22 Jul 2024 13:16:59 -0700 Subject: [PATCH 03/84] Tests --- schema/omicron-datasets.json | 198 ++++++++++++++++++++++++++++++++++ sled-storage/src/disk.rs | 4 + sled-storage/src/error.rs | 8 +- sled-storage/src/manager.rs | 110 +++++++++++++++++-- sled-storage/src/resources.rs | 11 ++ 5 files changed, 316 insertions(+), 15 deletions(-) create mode 100644 schema/omicron-datasets.json diff --git a/schema/omicron-datasets.json b/schema/omicron-datasets.json new file mode 100644 index 0000000000..6d0617b5b3 --- /dev/null +++ b/schema/omicron-datasets.json @@ -0,0 +1,198 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "DatasetsConfig", + "type": "object", + "required": [ + "datasets", + "generation" + ], + "properties": { + "datasets": { + "type": "array", + "items": { + "$ref": "#/definitions/DatasetConfig" + } + }, + "generation": { + "description": "generation number of this configuration\n\nThis generation number is owned by the control plane (i.e., RSS or Nexus, depending on whether RSS-to-Nexus handoff has happened). It should not be bumped within Sled Agent.\n\nSled Agent rejects attempts to set the configuration to a generation older than the one it's currently running.", + "allOf": [ + { + "$ref": "#/definitions/Generation" + } + ] + } + }, + "definitions": { + "DatasetConfig": { + "description": "Configuration information necessary to request a single dataset", + "type": "object", + "required": [ + "id", + "name" + ], + "properties": { + "compression": { + "description": "The compression mode to be supplied, if any", + "type": [ + "string", + "null" + ] + }, + "id": { + "description": "The UUID of the dataset being requested", + "allOf": [ + { + "$ref": "#/definitions/TypedUuidForDatasetKind" + } + ] + }, + "name": { + "description": "The dataset's name", + "allOf": [ + { + "$ref": "#/definitions/DatasetName" + } + ] + }, + "quota": { + "description": "The upper bound on the amount of storage used by this dataset", + "type": [ + "integer", + "null" + ], + "format": "uint", + "minimum": 0.0 + }, + "reservation": { + "description": "The lower bound on the amount of storage usable by this dataset", + "type": [ + "integer", + "null" + ], + "format": "uint", + "minimum": 0.0 + } + } + }, + "DatasetKind": { + "description": "The type of a dataset, and an auxiliary information necessary to successfully launch a zone managing the associated data.", + "oneOf": [ + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "cockroach_db" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "crucible" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "clickhouse" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "clickhouse_keeper" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "external_dns" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "internal_dns" + ] + } + } + } + ] + }, + "DatasetName": { + "type": "object", + "required": [ + "kind", + "pool_name" + ], + "properties": { + "kind": { + "$ref": "#/definitions/DatasetKind" + }, + "pool_name": { + "$ref": "#/definitions/ZpoolName" + } + } + }, + "Generation": { + "description": "Generation numbers stored in the database, used for optimistic concurrency control", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "TypedUuidForDatasetKind": { + "type": "string", + "format": "uuid" + }, + "ZpoolName": { + "title": "The name of a Zpool", + "description": "Zpool names are of the format ox{i,p}_. They are either Internal or External, and should be unique", + "type": "string", + "pattern": "^ox[ip]_[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + } + } +} \ No newline at end of file diff --git a/sled-storage/src/disk.rs b/sled-storage/src/disk.rs index 982e2bee26..7736bfe7ca 100644 --- a/sled-storage/src/disk.rs +++ b/sled-storage/src/disk.rs @@ -86,6 +86,10 @@ pub struct DatasetsConfig { /// /// Sled Agent rejects attempts to set the configuration to a generation /// older than the one it's currently running. + /// + /// Note that "Generation::new()", AKA, the first generation number, + /// is reserved for "no datasets". This is the default configuration + /// for a sled before any requests have been made. pub generation: Generation, pub datasets: Vec, diff --git a/sled-storage/src/error.rs b/sled-storage/src/error.rs index c10095ad6d..3b30df9e63 100644 --- a/sled-storage/src/error.rs +++ b/sled-storage/src/error.rs @@ -84,10 +84,10 @@ pub enum Error { }, #[error("Dataset configuration out-of-date (asked for {requested}, but latest is {current})")] - DatasetConfigurationOutdated { - requested: Generation, - current: Generation, - }, + DatasetConfigurationOutdated { requested: Generation, current: Generation }, + + #[error("Dataset configuration changed for the same generation number: {generation}")] + DatasetConfigurationChanged { generation: Generation }, #[error("Failed to update ledger in internal storage")] Ledger(#[from] omicron_common::ledger::Error), diff --git a/sled-storage/src/manager.rs b/sled-storage/src/manager.rs index f77e703829..06a6c5706f 100644 --- a/sled-storage/src/manager.rs +++ b/sled-storage/src/manager.rs @@ -27,7 +27,7 @@ use omicron_common::ledger::Ledger; use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use sled_hardware::DiskVariant; -use slog::{info, o, warn, Logger}; +use slog::{error, info, o, warn, Logger}; use std::future::Future; use tokio::sync::{mpsc, oneshot, watch}; use tokio::time::{interval, Duration, MissedTickBehavior}; @@ -615,7 +615,9 @@ impl StorageManager { self.resources.insert_or_update_disk(raw_disk).await } - async fn load_disks_ledger(&self) -> Option> { + async fn load_disks_ledger( + &self, + ) -> Option> { let ledger_paths = self.all_omicron_disk_ledgers().await; let log = self.log.new(o!("request" => "load_disks_ledger")); let maybe_ledger = Ledger::::new( @@ -701,12 +703,24 @@ impl StorageManager { requested: config.generation, current: ledger_data.generation, }); - } + } else if config.generation == ledger_data.generation { + info!( + log, + "Requested geenration number matches prior request", + ); - // TODO: If the generation is equal, check that the values are - // also equal. + if ledger_data != &config { + error!(log, "Requested configuration changed (with the same generation)"); + return Err(Error::DatasetConfigurationChanged { + generation: config.generation, + }); + } + } - info!(log, "Request looks newer than prior requests"); + info!( + log, + "Request looks newer than (or identical to) prior requests" + ); ledger } None => { @@ -767,11 +781,8 @@ impl StorageManager { let log = self.log.new(o!("request" => "datasets_list")); let ledger_paths = self.all_omicron_dataset_ledgers().await; - let maybe_ledger = Ledger::::new( - &log, - ledger_paths.clone(), - ) - .await; + let maybe_ledger = + Ledger::::new(&log, ledger_paths.clone()).await; match maybe_ledger { Some(ledger) => { @@ -1082,6 +1093,7 @@ mod tests { use super::*; use camino_tempfile::tempdir_in; + use omicron_common::api::external::Generation; use omicron_common::ledger; use omicron_test_utils::dev::test_setup_log; use sled_hardware::DiskFirmware; @@ -1560,6 +1572,82 @@ mod tests { harness.cleanup().await; logctx.cleanup_successful(); } + + #[tokio::test] + async fn ensure_datasets() { + illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); + let logctx = test_setup_log("ensure_datasets"); + let mut harness = StorageManagerTestHarness::new(&logctx.log).await; + + // Test setup: Add a U.2 and M.2, adopt them into the "control plane" + // for usage. + harness.handle().key_manager_ready().await; + let raw_disks = + harness.add_vdevs(&["u2_under_test.vdev", "m2_helping.vdev"]).await; + let config = harness.make_config(1, &raw_disks); + let result = harness + .handle() + .omicron_physical_disks_ensure(config.clone()) + .await + .expect("Ensuring disks should work after key manager is ready"); + assert!(!result.has_error(), "{:?}", result); + + // Create a dataset on the newly formatted U.2 + let id = DatasetUuid::new_v4(); + let zpool_name = ZpoolName::new_external(config.disks[0].pool_id); + let name = DatasetName::new(zpool_name.clone(), DatasetKind::Crucible); + let datasets = vec![DatasetConfig { + id, + name, + compression: None, + quota: None, + reservation: None, + }]; + // "Generation = 1" is reserved as "no requests seen yet", so we jump + // past it. + let generation = Generation::new().next(); + let mut config = DatasetsConfig { generation, datasets }; + + let status = + harness.handle().datasets_ensure(config.clone()).await.unwrap(); + assert!(!status.has_error()); + + // List datasets, expect to see what we just created + let observed_config = harness.handle().datasets_list().await.unwrap(); + assert_eq!(config, observed_config); + + // Calling "datasets_ensure" with the same input should succeed. + let status = + harness.handle().datasets_ensure(config.clone()).await.unwrap(); + assert!(!status.has_error()); + + let current_config_generation = config.generation; + let next_config_generation = config.generation.next(); + + // Calling "datasets_ensure" with an old generation should fail + config.generation = Generation::new(); + let err = + harness.handle().datasets_ensure(config.clone()).await.unwrap_err(); + assert!(matches!(err, Error::DatasetConfigurationOutdated { .. })); + + // However, calling it with a different input and the same generation + // number should fail. + config.generation = current_config_generation; + config.datasets[0].reservation = Some(1024); + let err = + harness.handle().datasets_ensure(config.clone()).await.unwrap_err(); + assert!(matches!(err, Error::DatasetConfigurationChanged { .. })); + + // If we bump the generation number while making a change, updated + // configs will work. + config.generation = next_config_generation; + let status = + harness.handle().datasets_ensure(config.clone()).await.unwrap(); + assert!(!status.has_error()); + + harness.cleanup().await; + logctx.cleanup_successful(); + } } #[cfg(test)] diff --git a/sled-storage/src/resources.rs b/sled-storage/src/resources.rs index a13e816d11..19313738af 100644 --- a/sled-storage/src/resources.rs +++ b/sled-storage/src/resources.rs @@ -74,6 +74,17 @@ pub struct DatasetsManagementResult { pub status: Vec, } +impl DatasetsManagementResult { + pub fn has_error(&self) -> bool { + for status in &self.status { + if status.err.is_some() { + return true; + } + } + false + } +} + /// Identifies how a single disk management operation may have succeeded or /// failed. #[derive(Debug, JsonSchema, Serialize, Deserialize)] From 9464cc1cdab1330d22666dde0cf89ceb60b18f50 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 22 Jul 2024 14:26:54 -0700 Subject: [PATCH 04/84] schemas n stuff --- openapi/sled-agent.json | 261 +++++++++++++++++++++++++++++++++++ schema/omicron-datasets.json | 2 +- 2 files changed, 262 insertions(+), 1 deletion(-) diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index 1323769da2..ac49552c76 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -177,6 +177,60 @@ } } }, + "/datasets": { + "get": { + "operationId": "datasets_get", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DatasetsConfig" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "operationId": "datasets_put", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DatasetsConfig" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DatasetsManagementResult" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/disks/{disk_id}": { "put": { "operationId": "disk_put", @@ -2103,6 +2157,209 @@ "target" ] }, + "DatasetConfig": { + "description": "Configuration information necessary to request a single dataset", + "type": "object", + "properties": { + "compression": { + "nullable": true, + "description": "The compression mode to be supplied, if any", + "type": "string" + }, + "id": { + "description": "The UUID of the dataset being requested", + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + ] + }, + "name": { + "description": "The dataset's name", + "allOf": [ + { + "$ref": "#/components/schemas/DatasetName" + } + ] + }, + "quota": { + "nullable": true, + "description": "The upper bound on the amount of storage used by this dataset", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "reservation": { + "nullable": true, + "description": "The lower bound on the amount of storage usable by this dataset", + "type": "integer", + "format": "uint", + "minimum": 0 + } + }, + "required": [ + "id", + "name" + ] + }, + "DatasetKind": { + "description": "The type of a dataset, and an auxiliary information necessary to successfully launch a zone managing the associated data.", + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "cockroach_db" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "crucible" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "clickhouse" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "clickhouse_keeper" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "external_dns" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "internal_dns" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "DatasetManagementStatus": { + "description": "Identifies how a single dataset management operation may have succeeded or failed.", + "type": "object", + "properties": { + "dataset_name": { + "$ref": "#/components/schemas/DatasetName" + }, + "err": { + "nullable": true, + "type": "string" + } + }, + "required": [ + "dataset_name" + ] + }, + "DatasetName": { + "type": "object", + "properties": { + "kind": { + "$ref": "#/components/schemas/DatasetKind" + }, + "pool_name": { + "$ref": "#/components/schemas/ZpoolName" + } + }, + "required": [ + "kind", + "pool_name" + ] + }, + "DatasetsConfig": { + "type": "object", + "properties": { + "datasets": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DatasetConfig" + } + }, + "generation": { + "description": "generation number of this configuration\n\nThis generation number is owned by the control plane (i.e., RSS or Nexus, depending on whether RSS-to-Nexus handoff has happened). It should not be bumped within Sled Agent.\n\nSled Agent rejects attempts to set the configuration to a generation older than the one it's currently running.\n\nNote that \"Generation::new()\", AKA, the first generation number, is reserved for \"no datasets\". This is the default configuration for a sled before any requests have been made.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + } + }, + "required": [ + "datasets", + "generation" + ] + }, + "DatasetsManagementResult": { + "description": "The result from attempting to manage datasets.", + "type": "object", + "properties": { + "status": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DatasetManagementStatus" + } + } + }, + "required": [ + "status" + ] + }, "DhcpConfig": { "description": "DHCP configuration for a port\n\nNot present here: Hostname (DHCPv4 option 12; used in DHCPv6 option 39); we use `InstanceRuntimeState::hostname` for this value.", "type": "object", @@ -4856,6 +5113,10 @@ "sync" ] }, + "TypedUuidForDatasetKind": { + "type": "string", + "format": "uuid" + }, "TypedUuidForPropolisKind": { "type": "string", "format": "uuid" diff --git a/schema/omicron-datasets.json b/schema/omicron-datasets.json index 6d0617b5b3..35da5de627 100644 --- a/schema/omicron-datasets.json +++ b/schema/omicron-datasets.json @@ -14,7 +14,7 @@ } }, "generation": { - "description": "generation number of this configuration\n\nThis generation number is owned by the control plane (i.e., RSS or Nexus, depending on whether RSS-to-Nexus handoff has happened). It should not be bumped within Sled Agent.\n\nSled Agent rejects attempts to set the configuration to a generation older than the one it's currently running.", + "description": "generation number of this configuration\n\nThis generation number is owned by the control plane (i.e., RSS or Nexus, depending on whether RSS-to-Nexus handoff has happened). It should not be bumped within Sled Agent.\n\nSled Agent rejects attempts to set the configuration to a generation older than the one it's currently running.\n\nNote that \"Generation::new()\", AKA, the first generation number, is reserved for \"no datasets\". This is the default configuration for a sled before any requests have been made.", "allOf": [ { "$ref": "#/definitions/Generation" From 817a39729c2cce1b4eaba7c1a652580b2115afb5 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 24 Jul 2024 10:53:52 -0700 Subject: [PATCH 05/84] The sled agent side of datasets in inventory --- illumos-utils/src/zfs.rs | 214 +++++++++++++++++++++++++++++++ sled-agent/src/params.rs | 45 +++++++ sled-agent/src/sim/sled_agent.rs | 2 + sled-agent/src/sled_agent.rs | 44 +++++++ 4 files changed, 305 insertions(+) diff --git a/illumos-utils/src/zfs.rs b/illumos-utils/src/zfs.rs index 21de2a50da..7dfd574c97 100644 --- a/illumos-utils/src/zfs.rs +++ b/illumos-utils/src/zfs.rs @@ -5,9 +5,12 @@ //! Utilities for poking at ZFS. use crate::{execute, PFEXEC}; +use anyhow::Context; use camino::{Utf8Path, Utf8PathBuf}; use omicron_common::disk::DiskIdentity; +use omicron_uuid_kinds::DatasetUuid; use std::fmt; +use std::str::FromStr; // These locations in the ramdisk must only be used by the switch zone. // @@ -207,9 +210,71 @@ pub struct SizeDetails { pub compression: Option, } +#[derive(Debug)] +pub struct DatasetProperties { + /// The Uuid of the dataset + pub id: Option, + /// The full name of the dataset. + pub name: String, + /// Remaining space in the dataset and descendents. + pub avail: u64, + /// Space used by dataset and descendents. + pub used: u64, + /// Maximum space usable by dataset and descendents. + pub quota: Option, + /// Minimum space guaranteed to dataset and descendents. + pub reservation: Option, + /// The compression algorithm used for this dataset. + pub compression: String, +} + +impl FromStr for DatasetProperties { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + let mut iter = s.split_whitespace(); + + let id = match iter.next().context("Missing UUID")? { + "-" => None, + anything_else => Some(anything_else.parse::()?), + }; + + let name = iter.next().context("Missing 'name'")?.to_string(); + let avail = iter.next().context("Missing 'avail'")?.parse::()?; + let used = iter.next().context("Missing 'used'")?.parse::()?; + let quota = + match iter.next().context("Missing 'quota'")?.parse::()? { + 0 => None, + q => Some(q), + }; + let reservation = match iter + .next() + .context("Missing 'reservation'")? + .parse::()? + { + 0 => None, + r => Some(r), + }; + let compression = + iter.next().context("Missing 'compression'")?.to_string(); + + Ok(DatasetProperties { + id, + name, + avail, + used, + quota, + reservation, + compression, + }) + } +} + #[cfg_attr(any(test, feature = "testing"), mockall::automock, allow(dead_code))] impl Zfs { /// Lists all datasets within a pool or existing dataset. + /// + /// Strips the input `name` from the output dataset names. pub fn list_datasets(name: &str) -> Result, ListDatasetsError> { let mut command = std::process::Command::new(ZFS); let cmd = command.args(&["list", "-d", "1", "-rHpo", "name", name]); @@ -228,6 +293,38 @@ impl Zfs { Ok(filesystems) } + /// Get information about datasets within a list of zpools / datasets. + /// + /// This function is similar to [Zfs::list_datasets], but provides a more + /// substantial results about the datasets found. + /// + /// Sorts results and de-duplicates them by name. + pub fn get_dataset_properties( + datasets: &[String], + ) -> Result, anyhow::Error> { + let mut command = std::process::Command::new(ZFS); + let cmd = command.args(&["list", "-d", "1", "-rHpo"]); + + // Note: this is tightly coupled with the layout of DatasetProperties + cmd.arg("oxide:uuid,name,avail,used,quota,reservation,compression"); + cmd.args(datasets); + + let output = execute(cmd).with_context(|| { + format!("Failed to get dataset properties for {datasets:?}") + })?; + let stdout = String::from_utf8_lossy(&output.stdout); + let mut datasets = stdout + .trim() + .split('\n') + .map(|row| row.parse::()) + .collect::, _>>()?; + + datasets.sort_by(|d1, d2| d1.name.partial_cmp(&d2.name).unwrap()); + datasets.dedup_by(|d1, d2| d1.name.eq(&d2.name)); + + Ok(datasets) + } + /// Return the name of a dataset for a ZFS object. /// /// The object can either be a dataset name, or a path, in which case it @@ -679,3 +776,120 @@ pub fn get_all_omicron_datasets_for_delete() -> anyhow::Result> { Ok(datasets) } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn parse_dataset_props() { + let input = + "- dataset_name 1234 5678 0 0 off"; + let props = DatasetProperties::from_str(&input) + .expect("Should have parsed data"); + + assert_eq!(props.id, None); + assert_eq!(props.name, "dataset_name"); + assert_eq!(props.avail, 1234); + assert_eq!(props.used, 5678); + assert_eq!(props.quota, None); + assert_eq!(props.reservation, None); + assert_eq!(props.compression, "off"); + } + + #[test] + fn parse_dataset_props_with_optionals() { + let input = "d4e1e554-7b98-4413-809e-4a42561c3d0c dataset_name 1234 5678 111 222 off"; + let props = DatasetProperties::from_str(&input) + .expect("Should have parsed data"); + + assert_eq!( + props.id, + Some("d4e1e554-7b98-4413-809e-4a42561c3d0c".parse().unwrap()) + ); + assert_eq!(props.name, "dataset_name"); + assert_eq!(props.avail, 1234); + assert_eq!(props.used, 5678); + assert_eq!(props.quota, Some(111)); + assert_eq!(props.reservation, Some(222)); + assert_eq!(props.compression, "off"); + } + + #[test] + fn parse_dataset_bad_uuid() { + let input = "bad dataset_name 1234 5678 111 222 off"; + let err = DatasetProperties::from_str(&input) + .expect_err("Should have failed to parse"); + assert!( + err.to_string().contains("error parsing UUID (dataset)"), + "{err}" + ); + } + + #[test] + fn parse_dataset_bad_avail() { + let input = "- dataset_name BADAVAIL 5678 111 222 off"; + let err = DatasetProperties::from_str(&input) + .expect_err("Should have failed to parse"); + assert!( + err.to_string().contains("invalid digit found in string"), + "{err}" + ); + } + + #[test] + fn parse_dataset_bad_usage() { + let input = "- dataset_name 1234 BADUSAGE 111 222 off"; + let err = DatasetProperties::from_str(&input) + .expect_err("Should have failed to parse"); + assert!( + err.to_string().contains("invalid digit found in string"), + "{err}" + ); + } + + #[test] + fn parse_dataset_bad_quota() { + let input = "- dataset_name 1234 5678 BADQUOTA 222 off"; + let err = DatasetProperties::from_str(&input) + .expect_err("Should have failed to parse"); + assert!( + err.to_string().contains("invalid digit found in string"), + "{err}" + ); + } + + #[test] + fn parse_dataset_bad_reservation() { + let input = "- dataset_name 1234 5678 111 BADRES off"; + let err = DatasetProperties::from_str(&input) + .expect_err("Should have failed to parse"); + assert!( + err.to_string().contains("invalid digit found in string"), + "{err}" + ); + } + + #[test] + fn parse_dataset_missing_fields() { + let expect_missing = |input: &str, what: &str| { + let err = DatasetProperties::from_str(input) + .expect_err("Should have failed to parse"); + assert!(err.to_string().contains(&format!("Missing {what}"))); + }; + + expect_missing( + "- dataset_name 1234 5678 111 222", + "'compression'", + ); + expect_missing( + "- dataset_name 1234 5678 111", + "'reservation'", + ); + expect_missing("- dataset_name 1234 5678", "'quota'"); + expect_missing("- dataset_name 1234", "'used'"); + expect_missing("- dataset_name", "'avail'"); + expect_missing("-", "'name'"); + expect_missing("", "UUID"); + } +} diff --git a/sled-agent/src/params.rs b/sled-agent/src/params.rs index a421bda3a6..e0b74e11b0 100644 --- a/sled-agent/src/params.rs +++ b/sled-agent/src/params.rs @@ -19,6 +19,7 @@ use omicron_common::api::internal::nexus::{ use omicron_common::api::internal::shared::{ NetworkInterface, SourceNatConfig, }; +use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::PropolisUuid; use omicron_uuid_kinds::ZpoolUuid; use schemars::JsonSchema; @@ -763,6 +764,49 @@ pub struct InventoryZpool { pub total_size: ByteCount, } +/// Identifies information about datasets within Oxide-managed zpools +#[derive(Clone, Debug, Deserialize, JsonSchema, Serialize)] +pub struct InventoryDataset { + /// Although datasets mandated by the control plane will have UUIDs, + /// datasets can be created (and have been created) without UUIDs. + pub id: Option, + + /// This name is the full path of the dataset. + // This is akin to [sled_storage::dataset::DatasetName::full_name], + // and it's also what you'd see when running "zfs list". + pub name: String, + + /// The amount of remaining space usable by the dataset (and children) + /// assuming there is no other activity within the pool. + pub available: u64, + + /// The amount of space consumed by this dataset and descendents. + pub used: u64, + + /// The maximum amount of space usable by a dataset and all descendents. + pub quota: Option, + + /// The minimum amount of space guaranteed to a dataset and descendents. + pub reservation: Option, + + /// The compression algorithm used for this dataset, if any. + pub compression: String, +} + +impl From for InventoryDataset { + fn from(props: illumos_utils::zfs::DatasetProperties) -> Self { + Self { + id: props.id, + name: props.name, + available: props.avail, + used: props.used, + quota: props.quota, + reservation: props.reservation, + compression: props.compression, + } + } +} + /// Identity and basic status information about this sled agent #[derive(Clone, Debug, Deserialize, JsonSchema, Serialize)] pub struct Inventory { @@ -775,6 +819,7 @@ pub struct Inventory { pub reservoir_size: ByteCount, pub disks: Vec, pub zpools: Vec, + pub datasets: Vec, } #[derive(Clone, Debug, Deserialize, JsonSchema, Serialize)] diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index f23b14c377..43d4fd310f 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -866,6 +866,8 @@ impl SledAgent { }) }) .collect::, anyhow::Error>>()?, + // TODO: Make this more real? + datasets: vec![], }) } diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index 6b212c96ce..6669e8e4ca 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -66,6 +66,7 @@ use sled_agent_types::early_networking::EarlyNetworkConfig; use sled_hardware::{underlay, HardwareManager}; use sled_hardware_types::underlay::BootstrapInterface; use sled_hardware_types::Baseboard; +use sled_storage::dataset::{CRYPT_DATASET, ZONE_DATASET}; use sled_storage::manager::StorageHandle; use sled_storage::resources::DatasetsManagementResult; use sled_storage::resources::DisksManagementResult; @@ -1250,6 +1251,7 @@ impl SledAgent { let mut disks = vec![]; let mut zpools = vec![]; + let mut datasets = vec![]; let all_disks = self.storage().get_latest_disks().await; for (identity, variant, slot, _firmware) in all_disks.iter_all() { disks.push(crate::params::InventoryDisk { @@ -1278,6 +1280,47 @@ impl SledAgent { id: zpool.id(), total_size: ByteCount::try_from(info.size())?, }); + + // We do care about the total space usage within zpools, but mapping + // the layering back to "datasets we care about" is a little + // awkward. + // + // We could query for all datasets within a pool, but the sled agent + // doesn't really care about the children of datasets that it + // allocates. As an example: Sled Agent might provision a "crucible" + // dataset, but how region allocation occurs within that dataset + // is a detail for Crucible to care about, not the Sled Agent. + // + // To balance this effort, we ask for information about datasets + // that the Sled Agent is directly resopnsible for managing. + let datasets_of_interest = [ + // We care about the zpool itself, and all direct children. + zpool.to_string(), + // Likewise, we care about the encrypted dataset, and all + // direct children. + format!("{zpool}/{CRYPT_DATASET}"), + // The zone dataset gives us additional context on "what zones + // have datasets provisioned". + format!("{zpool}/{ZONE_DATASET}"), + ]; + let inv_props = + match illumos_utils::zfs::Zfs::get_dataset_properties( + datasets_of_interest.as_slice(), + ) { + Ok(props) => props.into_iter().map(|prop| { + crate::params::InventoryDataset::from(prop) + }), + Err(err) => { + warn!( + self.log, + "Failed to access dataset info within zpool"; + "zpool" => %zpool, + "err" => %err + ); + continue; + } + }; + datasets.extend(inv_props); } Ok(Inventory { @@ -1290,6 +1333,7 @@ impl SledAgent { reservoir_size, disks, zpools, + datasets, }) } } From 54b465832ab38648433c258bd56352b35de17a99 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 24 Jul 2024 17:54:26 -0700 Subject: [PATCH 06/84] Plumb inventory into nexus, omdb --- clients/sled-agent-client/src/lib.rs | 1 + dev-tools/omdb/src/bin/omdb/db.rs | 38 ++++++++++ dev-tools/omdb/src/bin/omdb/sled_agent.rs | 33 +++++++++ dev-tools/omdb/tests/usage_errors.out | 1 + illumos-utils/src/zfs.rs | 37 ++++++---- nexus/db-model/src/inventory.rs | 53 +++++++++++++- nexus/db-model/src/schema.rs | 15 ++++ .../db-queries/src/db/datastore/inventory.rs | 68 ++++++++++++++++++ .../src/db/datastore/physical_disk.rs | 1 + nexus/inventory/src/builder.rs | 5 ++ nexus/inventory/src/examples.rs | 7 ++ nexus/reconfigurator/planning/src/system.rs | 6 +- nexus/types/src/inventory.rs | 43 ++++++++++++ openapi/sled-agent.json | 70 +++++++++++++++++++ schema/crdb/dbinit.sql | 26 +++++++ sled-agent/src/params.rs | 8 +-- sled-agent/src/rack_setup/service.rs | 1 + 17 files changed, 392 insertions(+), 21 deletions(-) diff --git a/clients/sled-agent-client/src/lib.rs b/clients/sled-agent-client/src/lib.rs index 8a63cecd4f..7cb1121988 100644 --- a/clients/sled-agent-client/src/lib.rs +++ b/clients/sled-agent-client/src/lib.rs @@ -58,6 +58,7 @@ progenitor::generate_api!( RouterVersion = omicron_common::api::internal::shared::RouterVersion, SourceNatConfig = omicron_common::api::internal::shared::SourceNatConfig, SwitchLocation = omicron_common::api::external::SwitchLocation, + TypedUuidForDatasetKind = omicron_uuid_kinds::DatasetUuid, TypedUuidForInstanceKind = omicron_uuid_kinds::InstanceUuid, TypedUuidForPropolisKind = omicron_uuid_kinds::PropolisUuid, TypedUuidForZpoolKind = omicron_uuid_kinds::ZpoolUuid, diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 98669ddc06..516a0f5028 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -3720,6 +3720,44 @@ fn inv_collection_print_sleds(collection: &Collection) { sled.reservoir_size.to_whole_gibibytes() ); + if !sled.zpools.is_empty() { + println!(" physical disks:"); + } + for disk in &sled.disks { + let nexus_types::inventory::PhysicalDisk { + identity, + variant, + slot, + } = disk; + println!(" {variant:?}: {identity:?} in {slot}"); + } + + if !sled.zpools.is_empty() { + println!(" zpools"); + } + for zpool in &sled.zpools { + let nexus_types::inventory::Zpool { id, total_size, .. } = zpool; + println!(" {id}: total size: {total_size}"); + } + + if !sled.datasets.is_empty() { + println!(" datasets:"); + } + for dataset in &sled.datasets { + let nexus_types::inventory::Dataset { + id, + name, + available, + used, + quota, + reservation, + compression, + } = dataset; + println!(" {name} - id: {id:?}, compression: {compression}"); + println!(" available: {available}, used: {used}"); + println!(" reservation: {reservation:?}, quota: {quota:?}"); + } + if let Some(zones) = collection.omicron_zones.get(&sled.sled_id) { println!( " zones collected from {} at {}", diff --git a/dev-tools/omdb/src/bin/omdb/sled_agent.rs b/dev-tools/omdb/src/bin/omdb/sled_agent.rs index 9a9a17eff4..b97fb35e8c 100644 --- a/dev-tools/omdb/src/bin/omdb/sled_agent.rs +++ b/dev-tools/omdb/src/bin/omdb/sled_agent.rs @@ -38,6 +38,10 @@ enum SledAgentCommands { #[clap(subcommand)] Zpools(ZpoolCommands), + /// print information about datasets + #[clap(subcommand)] + Datasets(DatasetCommands), + /// print information about the local bootstore node #[clap(subcommand)] Bootstore(BootstoreCommands), @@ -55,6 +59,12 @@ enum ZpoolCommands { List, } +#[derive(Debug, Subcommand)] +enum DatasetCommands { + /// Print list of all datasets managed by the sled agent + List, +} + #[derive(Debug, Subcommand)] enum BootstoreCommands { /// Show the internal state of the local bootstore node @@ -86,6 +96,9 @@ impl SledAgentArgs { SledAgentCommands::Zpools(ZpoolCommands::List) => { cmd_zpools_list(&client).await } + SledAgentCommands::Datasets(DatasetCommands::List) => { + cmd_datasets_list(&client).await + } SledAgentCommands::Bootstore(BootstoreCommands::Status) => { cmd_bootstore_status(&client).await } @@ -130,6 +143,26 @@ async fn cmd_zpools_list( Ok(()) } +/// Runs `omdb sled-agent datasets list` +async fn cmd_datasets_list( + client: &sled_agent_client::Client, +) -> Result<(), anyhow::Error> { + let response = client.datasets_get().await.context("listing datasets")?; + let response = response.into_inner(); + + println!("dataset configuration @ generation {}:", response.generation); + let datasets = response.datasets; + + if datasets.is_empty() { + println!(" "); + } + for dataset in &datasets { + println!(" {:?}", dataset); + } + + Ok(()) +} + /// Runs `omdb sled-agent bootstore status` async fn cmd_bootstore_status( client: &sled_agent_client::Client, diff --git a/dev-tools/omdb/tests/usage_errors.out b/dev-tools/omdb/tests/usage_errors.out index 3d6f2af112..8b240958ba 100644 --- a/dev-tools/omdb/tests/usage_errors.out +++ b/dev-tools/omdb/tests/usage_errors.out @@ -556,6 +556,7 @@ Usage: omdb sled-agent [OPTIONS] Commands: zones print information about zones zpools print information about zpools + datasets print information about datasets bootstore print information about the local bootstore node help Print this message or the help of the given subcommand(s) diff --git a/illumos-utils/src/zfs.rs b/illumos-utils/src/zfs.rs index 7dfd574c97..1eaf946911 100644 --- a/illumos-utils/src/zfs.rs +++ b/illumos-utils/src/zfs.rs @@ -7,6 +7,7 @@ use crate::{execute, PFEXEC}; use anyhow::Context; use camino::{Utf8Path, Utf8PathBuf}; +use omicron_common::api::external::ByteCount; use omicron_common::disk::DiskIdentity; use omicron_uuid_kinds::DatasetUuid; use std::fmt; @@ -217,13 +218,13 @@ pub struct DatasetProperties { /// The full name of the dataset. pub name: String, /// Remaining space in the dataset and descendents. - pub avail: u64, + pub avail: ByteCount, /// Space used by dataset and descendents. - pub used: u64, + pub used: ByteCount, /// Maximum space usable by dataset and descendents. - pub quota: Option, + pub quota: Option, /// Minimum space guaranteed to dataset and descendents. - pub reservation: Option, + pub reservation: Option, /// The compression algorithm used for this dataset. pub compression: String, } @@ -240,12 +241,20 @@ impl FromStr for DatasetProperties { }; let name = iter.next().context("Missing 'name'")?.to_string(); - let avail = iter.next().context("Missing 'avail'")?.parse::()?; - let used = iter.next().context("Missing 'used'")?.parse::()?; + let avail = iter + .next() + .context("Missing 'avail'")? + .parse::()? + .try_into()?; + let used = iter + .next() + .context("Missing 'used'")? + .parse::()? + .try_into()?; let quota = match iter.next().context("Missing 'quota'")?.parse::()? { 0 => None, - q => Some(q), + q => Some(q.try_into()?), }; let reservation = match iter .next() @@ -253,7 +262,7 @@ impl FromStr for DatasetProperties { .parse::()? { 0 => None, - r => Some(r), + r => Some(r.try_into()?), }; let compression = iter.next().context("Missing 'compression'")?.to_string(); @@ -790,8 +799,8 @@ mod test { assert_eq!(props.id, None); assert_eq!(props.name, "dataset_name"); - assert_eq!(props.avail, 1234); - assert_eq!(props.used, 5678); + assert_eq!(props.avail.to_bytes(), 1234); + assert_eq!(props.used.to_bytes(), 5678); assert_eq!(props.quota, None); assert_eq!(props.reservation, None); assert_eq!(props.compression, "off"); @@ -808,10 +817,10 @@ mod test { Some("d4e1e554-7b98-4413-809e-4a42561c3d0c".parse().unwrap()) ); assert_eq!(props.name, "dataset_name"); - assert_eq!(props.avail, 1234); - assert_eq!(props.used, 5678); - assert_eq!(props.quota, Some(111)); - assert_eq!(props.reservation, Some(222)); + assert_eq!(props.avail.to_bytes(), 1234); + assert_eq!(props.used.to_bytes(), 5678); + assert_eq!(props.quota.map(|q| q.to_bytes()), Some(111)); + assert_eq!(props.reservation.map(|r| r.to_bytes()), Some(222)); assert_eq!(props.compression, "off"); } diff --git a/nexus/db-model/src/inventory.rs b/nexus/db-model/src/inventory.rs index 14c4684e1e..cd259701e2 100644 --- a/nexus/db-model/src/inventory.rs +++ b/nexus/db-model/src/inventory.rs @@ -7,7 +7,7 @@ use crate::omicron_zone_config::{OmicronZone, OmicronZoneNic}; use crate::schema::{ hw_baseboard_id, inv_caboose, inv_collection, inv_collection_error, - inv_omicron_zone, inv_omicron_zone_nic, inv_physical_disk, + inv_dataset, inv_omicron_zone, inv_omicron_zone_nic, inv_physical_disk, inv_root_of_trust, inv_root_of_trust_page, inv_service_processor, inv_sled_agent, inv_sled_omicron_zones, inv_zpool, sw_caboose, sw_root_of_trust_page, @@ -34,6 +34,7 @@ use nexus_types::inventory::{ use omicron_common::api::internal::shared::NetworkInterface; use omicron_uuid_kinds::CollectionKind; use omicron_uuid_kinds::CollectionUuid; +use omicron_uuid_kinds::DatasetKind; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::SledKind; use omicron_uuid_kinds::SledUuid; @@ -921,6 +922,56 @@ impl From for nexus_types::inventory::Zpool { } } +/// See [`nexus_types::inventory::Dataset`]. +#[derive(Queryable, Clone, Debug, Selectable, Insertable)] +#[diesel(table_name = inv_dataset)] +pub struct InvDataset { + pub inv_collection_id: DbTypedUuid, + pub sled_id: DbTypedUuid, + pub id: Option>, + pub name: String, + pub available: ByteCount, + pub used: ByteCount, + pub quota: Option, + pub reservation: Option, + pub compression: String, +} + +impl InvDataset { + pub fn new( + inv_collection_id: CollectionUuid, + sled_id: SledUuid, + dataset: &nexus_types::inventory::Dataset, + ) -> Self { + Self { + inv_collection_id: inv_collection_id.into(), + sled_id: sled_id.into(), + + id: dataset.id.map(|id| id.into()), + name: dataset.name.clone(), + available: dataset.available.into(), + used: dataset.used.into(), + quota: dataset.quota.map(|q| q.into()), + reservation: dataset.reservation.map(|r| r.into()), + compression: dataset.compression.clone(), + } + } +} + +impl From for nexus_types::inventory::Dataset { + fn from(dataset: InvDataset) -> Self { + Self { + id: dataset.id.map(|id| id.0), + name: dataset.name, + available: *dataset.available, + used: *dataset.used, + quota: dataset.quota.map(|q| *q), + reservation: dataset.reservation.map(|r| *r), + compression: dataset.compression, + } + } +} + /// See [`nexus_types::inventory::OmicronZonesFound`]. #[derive(Queryable, Clone, Debug, Selectable, Insertable)] #[diesel(table_name = inv_sled_omicron_zones)] diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index dc57de9263..4412f642b5 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1442,6 +1442,21 @@ table! { } } +table! { + inv_dataset (inv_collection_id, sled_id, name) { + inv_collection_id -> Uuid, + sled_id -> Uuid, + + id -> Nullable, + name -> Text, + available -> Int8, + used -> Int8, + quota -> Nullable, + reservation -> Nullable, + compression -> Text, + } +} + table! { inv_sled_omicron_zones (inv_collection_id, sled_id) { inv_collection_id -> Uuid, diff --git a/nexus/db-queries/src/db/datastore/inventory.rs b/nexus/db-queries/src/db/datastore/inventory.rs index 289e443213..4840af9076 100644 --- a/nexus/db-queries/src/db/datastore/inventory.rs +++ b/nexus/db-queries/src/db/datastore/inventory.rs @@ -38,6 +38,7 @@ use nexus_db_model::HwRotSlotEnum; use nexus_db_model::InvCaboose; use nexus_db_model::InvCollection; use nexus_db_model::InvCollectionError; +use nexus_db_model::InvDataset; use nexus_db_model::InvOmicronZone; use nexus_db_model::InvOmicronZoneNic; use nexus_db_model::InvPhysicalDisk; @@ -157,6 +158,17 @@ impl DataStore { }) .collect(); + // Pull datasets out of all sled agents + let datasets: Vec<_> = collection + .sled_agents + .iter() + .flat_map(|(sled_id, sled_agent)| { + sled_agent.datasets.iter().map(|dataset| { + InvDataset::new(collection_id, *sled_id, dataset) + }) + }) + .collect(); + // Partition the sled agents into those with an associated baseboard id // and those without one. We handle these pretty differently. let (sled_agents_baseboards, sled_agents_no_baseboards): ( @@ -745,6 +757,25 @@ impl DataStore { } } + // Insert rows for all the datasets we found. + { + use db::schema::inv_dataset::dsl; + + let batch_size = SQL_BATCH_SIZE.get().try_into().unwrap(); + let mut datasets = datasets.into_iter(); + loop { + let some_datasets = + datasets.by_ref().take(batch_size).collect::>(); + if some_datasets.is_empty() { + break; + } + let _ = diesel::insert_into(dsl::inv_dataset) + .values(some_datasets) + .execute_async(&conn) + .await?; + } + } + // Insert rows for the sled agents that we found. In practice, we'd // expect these to all have baseboards (if using Oxide hardware) or // none have baseboards (if not). @@ -1601,6 +1632,39 @@ impl DataStore { zpools }; + // Mapping of "Sled ID" -> "All datasets reported by that sled" + let datasets: BTreeMap> = { + use db::schema::inv_dataset::dsl; + + let mut datasets = + BTreeMap::>::new(); + let mut paginator = Paginator::new(batch_size); + while let Some(p) = paginator.next() { + let batch = paginated_multicolumn( + dsl::inv_dataset, + (dsl::sled_id, dsl::name), + &p.current_pagparams(), + ) + .filter(dsl::inv_collection_id.eq(db_id)) + .select(InvDataset::as_select()) + .load_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })?; + paginator = p.found_batch(&batch, &|row| { + (row.sled_id, row.name.clone()) + }); + for dataset in batch { + datasets + .entry(dataset.sled_id.into_untyped_uuid()) + .or_default() + .push(dataset.into()); + } + } + datasets + }; + // Collect the unique baseboard ids referenced by SPs, RoTs, and Sled // Agents. let baseboard_id_ids: BTreeSet<_> = sps @@ -1709,6 +1773,10 @@ impl DataStore { .get(sled_id.as_untyped_uuid()) .map(|zpools| zpools.to_vec()) .unwrap_or_default(), + datasets: datasets + .get(sled_id.as_untyped_uuid()) + .map(|datasets| datasets.to_vec()) + .unwrap_or_default(), }; Ok((sled_id, sled_agent)) }) diff --git a/nexus/db-queries/src/db/datastore/physical_disk.rs b/nexus/db-queries/src/db/datastore/physical_disk.rs index 5e3b51f228..e63f476f9d 100644 --- a/nexus/db-queries/src/db/datastore/physical_disk.rs +++ b/nexus/db-queries/src/db/datastore/physical_disk.rs @@ -696,6 +696,7 @@ mod test { usable_physical_ram: ByteCount::from(1024 * 1024), disks, zpools: vec![], + datasets: vec![], }, ) .unwrap(); diff --git a/nexus/inventory/src/builder.rs b/nexus/inventory/src/builder.rs index 65bdae63ce..0f747c93d9 100644 --- a/nexus/inventory/src/builder.rs +++ b/nexus/inventory/src/builder.rs @@ -526,6 +526,11 @@ impl CollectionBuilder { .into_iter() .map(|z| Zpool::new(time_collected, z)) .collect(), + datasets: inventory + .datasets + .into_iter() + .map(|d| d.into()) + .collect(), }; if let Some(previous) = self.sleds.get(&sled_id) { diff --git a/nexus/inventory/src/examples.rs b/nexus/inventory/src/examples.rs index c2e283a640..f22b5fd8db 100644 --- a/nexus/inventory/src/examples.rs +++ b/nexus/inventory/src/examples.rs @@ -315,6 +315,7 @@ pub fn representative() -> Representative { }, ]; let zpools = vec![]; + let datasets = vec![]; builder .found_sled_inventory( @@ -329,6 +330,7 @@ pub fn representative() -> Representative { sled_agent_client::types::SledRole::Gimlet, disks, zpools, + datasets, ), ) .unwrap(); @@ -355,6 +357,7 @@ pub fn representative() -> Representative { sled_agent_client::types::SledRole::Scrimlet, vec![], vec![], + vec![], ), ) .unwrap(); @@ -376,6 +379,7 @@ pub fn representative() -> Representative { sled_agent_client::types::SledRole::Gimlet, vec![], vec![], + vec![], ), ) .unwrap(); @@ -395,6 +399,7 @@ pub fn representative() -> Representative { sled_agent_client::types::SledRole::Gimlet, vec![], vec![], + vec![], ), ) .unwrap(); @@ -505,6 +510,7 @@ pub fn sled_agent( sled_role: sled_agent_client::types::SledRole, disks: Vec, zpools: Vec, + datasets: Vec, ) -> sled_agent_client::types::Inventory { sled_agent_client::types::Inventory { baseboard, @@ -516,5 +522,6 @@ pub fn sled_agent( usable_physical_ram: ByteCount::from(1024 * 1024), disks, zpools, + datasets, } } diff --git a/nexus/reconfigurator/planning/src/system.rs b/nexus/reconfigurator/planning/src/system.rs index 5f00ea8172..ed961f330a 100644 --- a/nexus/reconfigurator/planning/src/system.rs +++ b/nexus/reconfigurator/planning/src/system.rs @@ -551,9 +551,10 @@ impl Sled { slot: i64::try_from(i).unwrap(), }) .collect(), - // Zpools won't necessarily show up until our first request - // to provision storage, so we omit them. + // Zpools & Datasets won't necessarily show up until our first + // request to provision storage, so we omit them. zpools: vec![], + datasets: vec![], } }; @@ -689,6 +690,7 @@ impl Sled { usable_physical_ram: inv_sled_agent.usable_physical_ram, disks: vec![], zpools: vec![], + datasets: vec![], }; Sled { diff --git a/nexus/types/src/inventory.rs b/nexus/types/src/inventory.rs index 661c4c088d..7f197057bb 100644 --- a/nexus/types/src/inventory.rs +++ b/nexus/types/src/inventory.rs @@ -24,6 +24,7 @@ pub use omicron_common::api::internal::shared::NetworkInterfaceKind; pub use omicron_common::api::internal::shared::SourceNatConfig; pub use omicron_common::zpool_name::ZpoolName; use omicron_uuid_kinds::CollectionUuid; +use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; use serde::{Deserialize, Serialize}; @@ -396,6 +397,47 @@ impl Zpool { } } +/// A dataset reported by a sled agent. +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub struct Dataset { + /// Although datasets mandated by the control plane will have UUIDs, + /// datasets can be created (and have been created) without UUIDs. + pub id: Option, + + /// This name is the full path of the dataset. + pub name: String, + + /// The amount of remaining space usable by the dataset (and children) + /// assuming there is no other activity within the pool. + pub available: ByteCount, + + /// The amount of space consumed by this dataset and descendents. + pub used: ByteCount, + + /// The maximum amount of space usable by a dataset and all descendents. + pub quota: Option, + + /// The minimum amount of space guaranteed to a dataset and descendents. + pub reservation: Option, + + /// The compression algorithm used for this dataset, if any. + pub compression: String, +} + +impl From for Dataset { + fn from(disk: sled_agent_client::types::InventoryDataset) -> Self { + Self { + id: disk.id, + name: disk.name, + available: disk.available, + used: disk.used, + quota: disk.quota, + reservation: disk.reservation, + compression: disk.compression, + } + } +} + /// Inventory reported by sled agent /// /// This is a software notion of a sled, distinct from an underlying baseboard. @@ -415,6 +457,7 @@ pub struct SledAgent { pub reservoir_size: ByteCount, pub disks: Vec, pub zpools: Vec, + pub datasets: Vec, } #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index ac49552c76..97d0a60f9e 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -3577,6 +3577,12 @@ "baseboard": { "$ref": "#/components/schemas/Baseboard" }, + "datasets": { + "type": "array", + "items": { + "$ref": "#/components/schemas/InventoryDataset" + } + }, "disks": { "type": "array", "items": { @@ -3613,6 +3619,7 @@ }, "required": [ "baseboard", + "datasets", "disks", "reservoir_size", "sled_agent_address", @@ -3623,6 +3630,69 @@ "zpools" ] }, + "InventoryDataset": { + "description": "Identifies information about datasets within Oxide-managed zpools", + "type": "object", + "properties": { + "available": { + "description": "The amount of remaining space usable by the dataset (and children) assuming there is no other activity within the pool.", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + }, + "compression": { + "description": "The compression algorithm used for this dataset, if any.", + "type": "string" + }, + "id": { + "nullable": true, + "description": "Although datasets mandated by the control plane will have UUIDs, datasets can be created (and have been created) without UUIDs.", + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + ] + }, + "name": { + "description": "This name is the full path of the dataset.", + "type": "string" + }, + "quota": { + "nullable": true, + "description": "The maximum amount of space usable by a dataset and all descendents.", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + }, + "reservation": { + "nullable": true, + "description": "The minimum amount of space guaranteed to a dataset and descendents.", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + }, + "used": { + "description": "The amount of space consumed by this dataset and descendents.", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + } + }, + "required": [ + "available", + "compression", + "name", + "used" + ] + }, "InventoryDisk": { "description": "Identifies information about disks which may be attached to Sleds.", "type": "object", diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 7fc83ad5d0..ecef64d197 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -3184,6 +3184,32 @@ CREATE TABLE IF NOT EXISTS omicron.public.inv_zpool ( -- Allow looking up the most recent Zpool by ID CREATE INDEX IF NOT EXISTS inv_zpool_by_id_and_time ON omicron.public.inv_zpool (id, time_collected DESC); +CREATE TABLE IF NOT EXISTS omicron.public.inv_dataset ( + -- where this observation came from + -- (foreign key into `inv_collection` table) + inv_collection_id UUID NOT NULL, + sled_id UUID NOT NULL, + + -- The control plane ID of the zpool. + -- This is nullable because datasets have been historically + -- self-managed by the Sled Agent, and some don't have explicit UUIDs. + id UUID, + + name TEXT NOT NULL, + available INT8 NOT NULL, + used INT8 NOT NULL, + quota INT8, + reservation INT8, + compression TEXT NOT NULL, + + -- PK consisting of: + -- - Which collection this was + -- - The sled reporting the disk + -- - The name of this dataset + -- - The slot in which this disk was found + PRIMARY KEY (inv_collection_id, sled_id, name) +); + CREATE TABLE IF NOT EXISTS omicron.public.inv_sled_omicron_zones ( -- where this observation came from -- (foreign key into `inv_collection` table) diff --git a/sled-agent/src/params.rs b/sled-agent/src/params.rs index e0b74e11b0..7de7e996f4 100644 --- a/sled-agent/src/params.rs +++ b/sled-agent/src/params.rs @@ -778,16 +778,16 @@ pub struct InventoryDataset { /// The amount of remaining space usable by the dataset (and children) /// assuming there is no other activity within the pool. - pub available: u64, + pub available: ByteCount, /// The amount of space consumed by this dataset and descendents. - pub used: u64, + pub used: ByteCount, /// The maximum amount of space usable by a dataset and all descendents. - pub quota: Option, + pub quota: Option, /// The minimum amount of space guaranteed to a dataset and descendents. - pub reservation: Option, + pub reservation: Option, /// The compression algorithm used for this dataset, if any. pub compression: String, diff --git a/sled-agent/src/rack_setup/service.rs b/sled-agent/src/rack_setup/service.rs index c8e56ae9f4..b23e31e1b4 100644 --- a/sled-agent/src/rack_setup/service.rs +++ b/sled-agent/src/rack_setup/service.rs @@ -1591,6 +1591,7 @@ mod test { }) .collect(), zpools: vec![], + datasets: vec![], }, true, ) From 24d93a8b058ff1eec93b6ac1da4e4daf9304907d Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 26 Jul 2024 12:52:59 -0700 Subject: [PATCH 07/84] Fix mismerge --- common/src/api/internal/shared.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/common/src/api/internal/shared.rs b/common/src/api/internal/shared.rs index 149d6020f5..99a8037393 100644 --- a/common/src/api/internal/shared.rs +++ b/common/src/api/internal/shared.rs @@ -718,7 +718,8 @@ pub struct ResolvedVpcRouteSet { )] #[serde(tag = "type", rename_all = "snake_case")] pub enum DatasetKind { - CockroachDb, + #[serde(rename = "cockroach_db")] + Cockroach, Crucible, Clickhouse, ClickhouseKeeper, @@ -744,7 +745,7 @@ impl fmt::Display for DatasetKind { use DatasetKind::*; let s = match self { Crucible => "crucible", - CockroachDb => "cockroach_db", + Cockroach => "cockroach_db", Clickhouse => "clickhouse", ClickhouseKeeper => "clickhouse_keeper", ExternalDns => "external_dns", @@ -767,7 +768,7 @@ impl FromStr for DatasetKind { use DatasetKind::*; let kind = match s { "crucible" => Crucible, - "cockroach" | "cockroachdb" | "cockroach_db" => CockroachDb, + "cockroach" | "cockroachdb" | "cockroach_db" => Cockroach, "clickhouse" => Clickhouse, "clickhouse_keeper" => ClickhouseKeeper, "external_dns" => ExternalDns, From 178e20e4a5fe4727752e63edb3f026476032826e Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 26 Jul 2024 13:35:36 -0700 Subject: [PATCH 08/84] Clippy and helios --- sled-agent/src/rack_setup/plan/service.rs | 2 +- sled-storage/src/manager.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/sled-agent/src/rack_setup/plan/service.rs b/sled-agent/src/rack_setup/plan/service.rs index ec19863bef..6f763dddbf 100644 --- a/sled-agent/src/rack_setup/plan/service.rs +++ b/sled-agent/src/rack_setup/plan/service.rs @@ -925,7 +925,7 @@ impl SledInfo { // enumerates the valid zpool indexes. let allocator = self .u2_zpool_allocators - .entry(kind.clone()) + .entry(kind) .or_insert_with(|| Box::new(0..self.u2_zpools.len())); match allocator.next() { None => Err(PlanError::NotEnoughSleds), diff --git a/sled-storage/src/manager.rs b/sled-storage/src/manager.rs index fd28b8a67d..59c1dd8bf5 100644 --- a/sled-storage/src/manager.rs +++ b/sled-storage/src/manager.rs @@ -1086,7 +1086,6 @@ impl StorageManager { /// systems. #[cfg(all(test, target_os = "illumos"))] mod tests { - use crate::dataset::DatasetType; use crate::disk::RawSyntheticDisk; use crate::manager_test_harness::StorageManagerTestHarness; use crate::resources::DiskManagementError; @@ -1094,6 +1093,7 @@ mod tests { use super::*; use camino_tempfile::tempdir_in; use omicron_common::api::external::Generation; + use omicron_common::disk::DatasetKind; use omicron_common::ledger; use omicron_test_utils::dev::test_setup_log; use sled_hardware::DiskFirmware; @@ -1562,7 +1562,7 @@ mod tests { let dataset_id = Uuid::new_v4(); let zpool_name = ZpoolName::new_external(config.disks[0].pool_id); let dataset_name = - DatasetName::new(zpool_name.clone(), DatasetType::Crucible); + DatasetName::new(zpool_name.clone(), DatasetKind::Crucible); harness .handle() .upsert_filesystem(dataset_id, dataset_name) From cf3f35c95d215bf7396b46f9c5842f64e8f9cb07 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 26 Jul 2024 14:26:03 -0700 Subject: [PATCH 09/84] openapi --- openapi/nexus-internal.json | 93 +++++++++++++++++++++++++++++++++---- openapi/sled-agent.json | 93 +++++++++++++++++++++++++++++++++---- 2 files changed, 170 insertions(+), 16 deletions(-) diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 4e1a11aff1..a50af1990d 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -2592,14 +2592,91 @@ }, "DatasetKind": { "description": "Describes the purpose of the dataset.", - "type": "string", - "enum": [ - "crucible", - "cockroach", - "clickhouse", - "clickhouse_keeper", - "external_dns", - "internal_dns" + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "cockroach_db" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "crucible" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "clickhouse" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "clickhouse_keeper" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "external_dns" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "internal_dns" + ] + } + }, + "required": [ + "type" + ] + } ] }, "DatasetPutRequest": { diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index d0a1ddd8ad..bba3812b94 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -2204,14 +2204,91 @@ }, "DatasetKind": { "description": "Describes the purpose of the dataset.", - "type": "string", - "enum": [ - "crucible", - "cockroach", - "clickhouse", - "clickhouse_keeper", - "external_dns", - "internal_dns" + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "cockroach_db" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "crucible" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "clickhouse" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "clickhouse_keeper" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "external_dns" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "internal_dns" + ] + } + }, + "required": [ + "type" + ] + } ] }, "DatasetManagementStatus": { From 67bf6faa844ad02c507b8dc4fc0fa0edea527db3 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 26 Jul 2024 15:02:08 -0700 Subject: [PATCH 10/84] schema upgrade --- nexus/db-model/src/schema_versions.rs | 3 ++- schema/crdb/dbinit.sql | 3 +-- schema/crdb/inv-dataset/up01.sql | 16 ++++++++++++++++ 3 files changed, 19 insertions(+), 3 deletions(-) create mode 100644 schema/crdb/inv-dataset/up01.sql diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index cc34a3581c..2e7226305a 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -17,7 +17,7 @@ use std::collections::BTreeMap; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(83, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(84, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -29,6 +29,7 @@ static KNOWN_VERSIONS: Lazy> = Lazy::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(84, "inv-dataset"), KnownVersion::new(83, "dataset-address-optional"), KnownVersion::new(82, "region-port"), KnownVersion::new(81, "add-nullable-filesystem-pool"), diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index ecef64d197..957de216f7 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -3206,7 +3206,6 @@ CREATE TABLE IF NOT EXISTS omicron.public.inv_dataset ( -- - Which collection this was -- - The sled reporting the disk -- - The name of this dataset - -- - The slot in which this disk was found PRIMARY KEY (inv_collection_id, sled_id, name) ); @@ -4171,7 +4170,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '83.0.0', NULL) + (TRUE, NOW(), NOW(), '84.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/inv-dataset/up01.sql b/schema/crdb/inv-dataset/up01.sql new file mode 100644 index 0000000000..4504768c40 --- /dev/null +++ b/schema/crdb/inv-dataset/up01.sql @@ -0,0 +1,16 @@ +CREATE TABLE IF NOT EXISTS omicron.public.inv_dataset ( + inv_collection_id UUID NOT NULL, + sled_id UUID NOT NULL, + + id UUID, + + name TEXT NOT NULL, + available INT8 NOT NULL, + used INT8 NOT NULL, + quota INT8, + reservation INT8, + compression TEXT NOT NULL, + + PRIMARY KEY (inv_collection_id, sled_id, name) +); + From 11f49fbc49d208e95583b41ab3dfb7e0700400a9 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 29 Jul 2024 18:37:16 -0700 Subject: [PATCH 11/84] More broad support for datasets --- common/src/api/internal/shared.rs | 51 +++++++++++++++++-- nexus/db-model/src/dataset.rs | 3 ++ nexus/db-model/src/dataset_kind.rs | 6 +++ nexus/db-model/src/schema.rs | 1 + nexus/db-queries/src/db/datastore/dataset.rs | 4 ++ nexus/db-queries/src/db/datastore/mod.rs | 4 ++ .../reconfigurator/execution/src/datasets.rs | 3 +- .../execution/src/omicron_physical_disks.rs | 1 + .../tasks/decommissioned_disk_cleaner.rs | 1 + nexus/src/app/rack.rs | 3 +- .../src/app/sagas/region_replacement_start.rs | 4 ++ nexus/src/app/sled.rs | 8 +-- nexus/src/lib.rs | 7 +-- openapi/nexus-internal.json | 46 +++++++++++++++++ openapi/sled-agent.json | 46 +++++++++++++++++ schema/crdb/dbinit.sql | 13 ++++- schema/omicron-datasets.json | 46 +++++++++++++++++ sled-agent/src/rack_setup/plan/service.rs | 2 +- sled-agent/src/rack_setup/service.rs | 2 +- sled-agent/src/sled_agent.rs | 10 ++-- sled-storage/src/manager.rs | 41 ++++++++++----- 21 files changed, 269 insertions(+), 33 deletions(-) diff --git a/common/src/api/internal/shared.rs b/common/src/api/internal/shared.rs index 99a8037393..673378c1d3 100644 --- a/common/src/api/internal/shared.rs +++ b/common/src/api/internal/shared.rs @@ -709,7 +709,6 @@ pub struct ResolvedVpcRouteSet { Deserialize, JsonSchema, Clone, - Copy, PartialEq, Eq, Ord, @@ -718,6 +717,11 @@ pub struct ResolvedVpcRouteSet { )] #[serde(tag = "type", rename_all = "snake_case")] pub enum DatasetKind { + // Durable datasets for zones + + // This renaming exists for backwards compatibility -- this enum variant + // was serialized to "all-zones-request" as "cockroach_db" and should + // stay that way, unless we perform an explicit schema change. #[serde(rename = "cockroach_db")] Cockroach, Crucible, @@ -725,6 +729,15 @@ pub enum DatasetKind { ClickhouseKeeper, ExternalDns, InternalDns, + + // Zone filesystems + ZoneRoot, + Zone { + name: String, + }, + + // Other datasets + Debug, } impl DatasetKind { @@ -738,18 +751,50 @@ impl DatasetKind { _ => true, } } + + /// Returns true if this dataset is delegated to a non-global zone. + pub fn zoned(&self) -> bool { + use DatasetKind::*; + match self { + Cockroach | Crucible | Clickhouse | ClickhouseKeeper + | ExternalDns | InternalDns => true, + ZoneRoot | Zone { .. } | Debug => false, + } + } + + /// Returns the zone name, if this is dataset for a zone filesystem. + /// + /// Otherwise, returns "None". + pub fn zone_name(&self) -> Option { + if let DatasetKind::Zone { name } = self { + Some(name.clone()) + } else { + None + } + } } +// Be cautious updating this implementation: +// +// - It should align with [DatasetKind::FromStr], below +// - The strings here are used here comprise the dataset name, stored durably +// on-disk impl fmt::Display for DatasetKind { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use DatasetKind::*; let s = match self { Crucible => "crucible", - Cockroach => "cockroach_db", + Cockroach => "cockroachdb", Clickhouse => "clickhouse", ClickhouseKeeper => "clickhouse_keeper", ExternalDns => "external_dns", InternalDns => "internal_dns", + ZoneRoot => "zone", + Zone { name } => { + write!(f, "zone/{}", name)?; + return Ok(()); + } + Debug => "debug", }; write!(f, "{}", s) } @@ -768,7 +813,7 @@ impl FromStr for DatasetKind { use DatasetKind::*; let kind = match s { "crucible" => Crucible, - "cockroach" | "cockroachdb" | "cockroach_db" => Cockroach, + "cockroachdb" | "cockroach_db" => Cockroach, "clickhouse" => Clickhouse, "clickhouse_keeper" => ClickhouseKeeper, "external_dns" => ExternalDns, diff --git a/nexus/db-model/src/dataset.rs b/nexus/db-model/src/dataset.rs index a9dee990b9..d525b80241 100644 --- a/nexus/db-model/src/dataset.rs +++ b/nexus/db-model/src/dataset.rs @@ -41,6 +41,7 @@ pub struct Dataset { pub kind: DatasetKind, pub size_used: Option, + zone_name: Option, } impl Dataset { @@ -49,6 +50,7 @@ impl Dataset { pool_id: Uuid, addr: Option, kind: DatasetKind, + zone_name: Option, ) -> Self { let size_used = match kind { DatasetKind::Crucible => Some(0), @@ -63,6 +65,7 @@ impl Dataset { port: addr.map(|addr| addr.port().into()), kind, size_used, + zone_name, } } diff --git a/nexus/db-model/src/dataset_kind.rs b/nexus/db-model/src/dataset_kind.rs index 395d01353e..2e71b96a41 100644 --- a/nexus/db-model/src/dataset_kind.rs +++ b/nexus/db-model/src/dataset_kind.rs @@ -22,6 +22,9 @@ impl_enum_type!( ClickhouseKeeper => b"clickhouse_keeper" ExternalDns => b"external_dns" InternalDns => b"internal_dns" + ZoneRoot => b"zone_root" + Zone => b"zone" + Debug => b"debug" ); impl From for DatasetKind { @@ -41,6 +44,9 @@ impl From for DatasetKind { internal::shared::DatasetKind::InternalDns => { DatasetKind::InternalDns } + internal::shared::DatasetKind::ZoneRoot => DatasetKind::ZoneRoot, + internal::shared::DatasetKind::Zone { .. } => DatasetKind::Zone, + internal::shared::DatasetKind::Debug => DatasetKind::Debug, } } } diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index dc57de9263..6e94f3458a 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1027,6 +1027,7 @@ table! { kind -> crate::DatasetKindEnum, size_used -> Nullable, + zone_name -> Nullable, } } diff --git a/nexus/db-queries/src/db/datastore/dataset.rs b/nexus/db-queries/src/db/datastore/dataset.rs index a08e346fe8..8a814aea80 100644 --- a/nexus/db-queries/src/db/datastore/dataset.rs +++ b/nexus/db-queries/src/db/datastore/dataset.rs @@ -292,6 +292,7 @@ mod test { zpool_id, Some("[::1]:0".parse().unwrap()), DatasetKind::Crucible, + None, )) .await .expect("failed to insert dataset") @@ -325,6 +326,7 @@ mod test { zpool_id, Some("[::1]:12345".parse().unwrap()), DatasetKind::Cockroach, + None, )) .await .expect("failed to do-nothing insert dataset"); @@ -341,6 +343,7 @@ mod test { zpool_id, Some("[::1]:0".parse().unwrap()), DatasetKind::Cockroach, + None, )) .await .expect("failed to upsert dataset"); @@ -373,6 +376,7 @@ mod test { zpool_id, Some("[::1]:12345".parse().unwrap()), DatasetKind::Cockroach, + None, )) .await .expect("failed to do-nothing insert dataset"); diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 2540790477..364ae3e970 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -907,6 +907,7 @@ mod test { zpool.pool_id, bogus_addr, DatasetKind::Crucible, + None, ); let datastore = datastore.clone(); @@ -1279,6 +1280,7 @@ mod test { zpool_id, bogus_addr, DatasetKind::Crucible, + None, ); let datastore = datastore.clone(); async move { @@ -1379,6 +1381,7 @@ mod test { zpool_id, bogus_addr, DatasetKind::Crucible, + None, ); let datastore = datastore.clone(); async move { @@ -1454,6 +1457,7 @@ mod test { zpool_id, bogus_addr, DatasetKind::Crucible, + None, ); datastore.dataset_upsert(dataset).await.unwrap(); physical_disk_ids.push(physical_disk_id); diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index 6444934ba6..003861519e 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -67,7 +67,8 @@ pub(crate) async fn ensure_dataset_records_exist( id.into_untyped_uuid(), pool_id.into_untyped_uuid(), Some(address), - kind.into(), + kind.clone().into(), + kind.zone_name(), ); let maybe_inserted = datastore .dataset_insert_if_not_exists(dataset) diff --git a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs index 9ae72d2049..4d1fe67bc5 100644 --- a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs +++ b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs @@ -435,6 +435,7 @@ mod test { 0, )), DatasetKind::Crucible, + None, )) .await .unwrap(); diff --git a/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs b/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs index cb3ef9a569..982909b644 100644 --- a/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs +++ b/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs @@ -246,6 +246,7 @@ mod tests { 0, )), DatasetKind::Crucible, + None, )) .await .unwrap(); diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 13b30fd47a..f4162f55ab 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -146,7 +146,8 @@ impl super::Nexus { dataset.dataset_id, dataset.zpool_id, Some(dataset.request.address), - dataset.request.kind.into(), + dataset.request.kind.clone().into(), + dataset.request.kind.zone_name(), ) }) .collect(); diff --git a/nexus/src/app/sagas/region_replacement_start.rs b/nexus/src/app/sagas/region_replacement_start.rs index 1297158b24..73842d03f1 100644 --- a/nexus/src/app/sagas/region_replacement_start.rs +++ b/nexus/src/app/sagas/region_replacement_start.rs @@ -896,24 +896,28 @@ pub(crate) mod test { Uuid::new_v4(), Some("[fd00:1122:3344:101::1]:12345".parse().unwrap()), DatasetKind::Crucible, + None, ), Dataset::new( Uuid::new_v4(), Uuid::new_v4(), Some("[fd00:1122:3344:102::1]:12345".parse().unwrap()), DatasetKind::Crucible, + None, ), Dataset::new( Uuid::new_v4(), Uuid::new_v4(), Some("[fd00:1122:3344:103::1]:12345".parse().unwrap()), DatasetKind::Crucible, + None, ), Dataset::new( Uuid::new_v4(), Uuid::new_v4(), Some("[fd00:1122:3344:104::1]:12345".parse().unwrap()), DatasetKind::Crucible, + None, ), ]; diff --git a/nexus/src/app/sled.rs b/nexus/src/app/sled.rs index 261045670e..a0e1cc3526 100644 --- a/nexus/src/app/sled.rs +++ b/nexus/src/app/sled.rs @@ -292,13 +292,12 @@ impl super::Nexus { // Datasets (contained within zpools) - /// Upserts a dataset into the database, updating it if it already exists. - pub(crate) async fn upsert_dataset( + /// Upserts a crucible dataset into the database, updating it if it already exists. + pub(crate) async fn upsert_crucible_dataset( &self, id: Uuid, zpool_id: Uuid, address: SocketAddrV6, - kind: DatasetKind, ) -> Result<(), Error> { info!( self.log, @@ -307,8 +306,9 @@ impl super::Nexus { "dataset_id" => id.to_string(), "address" => address.to_string() ); + let kind = DatasetKind::Crucible; let dataset = - db::model::Dataset::new(id, zpool_id, Some(address), kind); + db::model::Dataset::new(id, zpool_id, Some(address), kind, None); self.db_datastore.dataset_upsert(dataset).await?; Ok(()) } diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index d5c853b15b..284e8de2ea 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -384,12 +384,7 @@ impl nexus_test_interface::NexusServer for Server { self.apictx .context .nexus - .upsert_dataset( - dataset_id, - zpool_id, - address, - nexus_db_queries::db::model::DatasetKind::Crucible, - ) + .upsert_crucible_dataset(dataset_id, zpool_id, address) .await .unwrap(); } diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index a50af1990d..49a96934e4 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -2676,6 +2676,52 @@ "required": [ "type" ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "zone_root" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "zone" + ] + } + }, + "required": [ + "name", + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "debug" + ] + } + }, + "required": [ + "type" + ] } ] }, diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index bba3812b94..d42ffd8fae 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -2288,6 +2288,52 @@ "required": [ "type" ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "zone_root" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "zone" + ] + } + }, + "required": [ + "name", + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "debug" + ] + } + }, + "required": [ + "type" + ] } ] }, diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 7fc83ad5d0..e6eec9b7b6 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -507,7 +507,10 @@ CREATE TYPE IF NOT EXISTS omicron.public.dataset_kind AS ENUM ( 'clickhouse', 'clickhouse_keeper', 'external_dns', - 'internal_dns' + 'internal_dns', + 'zone_root', + 'zone', + 'debug' ); /* @@ -533,6 +536,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.dataset ( /* An upper bound on the amount of space that might be in-use */ size_used INT, + /* Only valid if kind = zone -- the name of this zone */ + zone_name TEXT, + /* Crucible must make use of 'size_used'; other datasets manage their own storage */ CONSTRAINT size_used_column_set_for_crucible CHECK ( (kind != 'crucible') OR @@ -542,6 +548,11 @@ CREATE TABLE IF NOT EXISTS omicron.public.dataset ( CONSTRAINT ip_and_port_set_for_crucible CHECK ( (kind != 'crucible') OR (kind = 'crucible' AND ip IS NOT NULL and port IS NOT NULL) + ), + + CONSTRAINT zone_name_for_zone_kind CHECK ( + (kind != 'zone') OR + (kind = 'zone' AND zone_name IS NOT NULL) ) ); diff --git a/schema/omicron-datasets.json b/schema/omicron-datasets.json index 5a5cc2ea18..1821eddac7 100644 --- a/schema/omicron-datasets.json +++ b/schema/omicron-datasets.json @@ -160,6 +160,52 @@ ] } } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "zone_root" + ] + } + } + }, + { + "type": "object", + "required": [ + "name", + "type" + ], + "properties": { + "name": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "zone" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "debug" + ] + } + } } ] }, diff --git a/sled-agent/src/rack_setup/plan/service.rs b/sled-agent/src/rack_setup/plan/service.rs index 6f763dddbf..ec19863bef 100644 --- a/sled-agent/src/rack_setup/plan/service.rs +++ b/sled-agent/src/rack_setup/plan/service.rs @@ -925,7 +925,7 @@ impl SledInfo { // enumerates the valid zpool indexes. let allocator = self .u2_zpool_allocators - .entry(kind) + .entry(kind.clone()) .or_insert_with(|| Box::new(0..self.u2_zpools.len())); match allocator.next() { None => Err(PlanError::NotEnoughSleds), diff --git a/sled-agent/src/rack_setup/service.rs b/sled-agent/src/rack_setup/service.rs index 0f6a77fd80..07c8794a73 100644 --- a/sled-agent/src/rack_setup/service.rs +++ b/sled-agent/src/rack_setup/service.rs @@ -725,7 +725,7 @@ impl ServiceInner { dataset_id: zone.id, request: NexusTypes::DatasetPutRequest { address: dataset_address.to_string(), - kind: *dataset_name.dataset(), + kind: dataset_name.dataset().clone(), }, }) } diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index 9ea3a0fb3c..44fc46987b 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -818,9 +818,13 @@ impl SledAgent { let datasets_result = self.storage().datasets_ensure(config).await?; info!(self.log, "datasets ensure: Updated storage"); - // TODO: See omicron_physical_disks_ensure, below - do we similarly - // need to ensure that old datasets are no longer in-use before we - // return here? + // TODO(https://github.com/oxidecomputer/omicron/issues/6177): + // At the moment, we don't actually remove any datasets -- this function + // just adds new datasets. + // + // Once we start removing old datasets, we should probably ensure that + // they are not longer in-use before returning (similar to + // omicron_physical_disks_ensure). Ok(datasets_result) } diff --git a/sled-storage/src/manager.rs b/sled-storage/src/manager.rs index 59c1dd8bf5..7f61097b6d 100644 --- a/sled-storage/src/manager.rs +++ b/sled-storage/src/manager.rs @@ -706,21 +706,22 @@ impl StorageManager { } else if config.generation == ledger_data.generation { info!( log, - "Requested geenration number matches prior request", + "Requested generation number matches prior request", ); if ledger_data != &config { - error!(log, "Requested configuration changed (with the same generation)"); + error!( + log, + "Requested configuration changed (with the same generation)"; + "generation" => ?config.generation + ); return Err(Error::DatasetConfigurationChanged { generation: config.generation, }); } + } else { + info!(log, "Request looks newer than prior requests"); } - - info!( - log, - "Request looks newer than (or identical to) prior requests" - ); ledger } None => { @@ -745,6 +746,11 @@ impl StorageManager { Ok(result) } + // Attempts to ensure that each dataset exist. + // + // Does not return an error, because the [DatasetsManagementResult] type + // includes details about all possible errors that may occur on + // a per-dataset granularity. async fn datasets_ensure_internal( &mut self, log: &Logger, @@ -770,7 +776,7 @@ impl StorageManager { }; if let Err(err) = self.ensure_dataset(config).await { - warn!(log, "Failed to ensure dataset"; "err" => ?err); + warn!(log, "Failed to ensure dataset"; "dataset" => ?status.dataset_name, "err" => ?err); status.err = Some(err.to_string()); }; @@ -989,11 +995,22 @@ impl StorageManager { ))); } - // TODO: Revisit these args, they might need more configuration - // tweaking. - let zoned = true; + let zoned = config.name.dataset().zoned(); + let mountpoint_path = if zoned { + Utf8PathBuf::from("/data") + } else { + config.name.pool().dataset_mountpoint( + &Utf8PathBuf::from("/"), + &config.name.dataset().to_string(), + ) + }; + let mountpoint = Mountpoint::Path(mountpoint_path); + let fs_name = &config.name.full_name(); let do_format = true; + + // The "crypt" dataset needs these details, but should already exist + // by the time we're creating datasets inside. let encryption_details = None; let size_details = Some(illumos_utils::zfs::SizeDetails { quota: config.quota, @@ -1002,7 +1019,7 @@ impl StorageManager { }); Zfs::ensure_filesystem( fs_name, - Mountpoint::Path(Utf8PathBuf::from("/data")), + mountpoint, zoned, do_format, encryption_details, From b999bf709050e76c3983ddbff33075a02d9c5689 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 29 Jul 2024 18:43:52 -0700 Subject: [PATCH 12/84] make it a schema change --- nexus/db-model/src/schema_versions.rs | 3 ++- schema/crdb/dataset-kinds-zone-and-debug/up01.sql | 1 + schema/crdb/dataset-kinds-zone-and-debug/up02.sql | 1 + schema/crdb/dataset-kinds-zone-and-debug/up03.sql | 1 + schema/crdb/dataset-kinds-zone-and-debug/up04.sql | 1 + schema/crdb/dataset-kinds-zone-and-debug/up05.sql | 4 ++++ schema/crdb/dbinit.sql | 2 +- 7 files changed, 11 insertions(+), 2 deletions(-) create mode 100644 schema/crdb/dataset-kinds-zone-and-debug/up01.sql create mode 100644 schema/crdb/dataset-kinds-zone-and-debug/up02.sql create mode 100644 schema/crdb/dataset-kinds-zone-and-debug/up03.sql create mode 100644 schema/crdb/dataset-kinds-zone-and-debug/up04.sql create mode 100644 schema/crdb/dataset-kinds-zone-and-debug/up05.sql diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index cc34a3581c..8b4ddce45a 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -17,7 +17,7 @@ use std::collections::BTreeMap; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(83, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(84, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -29,6 +29,7 @@ static KNOWN_VERSIONS: Lazy> = Lazy::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(84, "dataset-kinds-zone-and-debug"), KnownVersion::new(83, "dataset-address-optional"), KnownVersion::new(82, "region-port"), KnownVersion::new(81, "add-nullable-filesystem-pool"), diff --git a/schema/crdb/dataset-kinds-zone-and-debug/up01.sql b/schema/crdb/dataset-kinds-zone-and-debug/up01.sql new file mode 100644 index 0000000000..1cfe718d00 --- /dev/null +++ b/schema/crdb/dataset-kinds-zone-and-debug/up01.sql @@ -0,0 +1 @@ +ALTER TYPE omicron.public.dataset_kind ADD VALUE IF NOT EXISTS 'zone_root' AFTER 'internal_dns'; diff --git a/schema/crdb/dataset-kinds-zone-and-debug/up02.sql b/schema/crdb/dataset-kinds-zone-and-debug/up02.sql new file mode 100644 index 0000000000..93178e3685 --- /dev/null +++ b/schema/crdb/dataset-kinds-zone-and-debug/up02.sql @@ -0,0 +1 @@ +ALTER TYPE omicron.public.dataset_kind ADD VALUE IF NOT EXISTS 'zone' AFTER 'zone_root'; diff --git a/schema/crdb/dataset-kinds-zone-and-debug/up03.sql b/schema/crdb/dataset-kinds-zone-and-debug/up03.sql new file mode 100644 index 0000000000..58d215d177 --- /dev/null +++ b/schema/crdb/dataset-kinds-zone-and-debug/up03.sql @@ -0,0 +1 @@ +ALTER TYPE omicron.public.dataset_kind ADD VALUE IF NOT EXISTS 'debug' AFTER 'zone'; diff --git a/schema/crdb/dataset-kinds-zone-and-debug/up04.sql b/schema/crdb/dataset-kinds-zone-and-debug/up04.sql new file mode 100644 index 0000000000..b92bce1b6c --- /dev/null +++ b/schema/crdb/dataset-kinds-zone-and-debug/up04.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.dataset ADD COLUMN IF NOT EXISTS zone_name TEXT; diff --git a/schema/crdb/dataset-kinds-zone-and-debug/up05.sql b/schema/crdb/dataset-kinds-zone-and-debug/up05.sql new file mode 100644 index 0000000000..3f33b79c72 --- /dev/null +++ b/schema/crdb/dataset-kinds-zone-and-debug/up05.sql @@ -0,0 +1,4 @@ +ALTER TABLE omicron.public.dataset ADD CONSTRAINT IF NOT EXISTS zone_name_for_zone_kind CHECK ( + (kind != 'zone') OR + (kind = 'zone' AND zone_name IS NOT NULL) +) diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index e6eec9b7b6..49d338851a 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -4156,7 +4156,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '83.0.0', NULL) + (TRUE, NOW(), NOW(), '84.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; From 1baf7dd6262ce5766e141aee12647bd52efe27fe Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 29 Jul 2024 22:18:33 -0700 Subject: [PATCH 13/84] welp I guess I changed some db queries --- .../tests/output/region_allocate_distinct_sleds.sql | 5 ++++- .../db-queries/tests/output/region_allocate_random_sleds.sql | 5 ++++- .../output/region_allocate_with_snapshot_distinct_sleds.sql | 5 ++++- .../output/region_allocate_with_snapshot_random_sleds.sql | 5 ++++- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/nexus/db-queries/tests/output/region_allocate_distinct_sleds.sql b/nexus/db-queries/tests/output/region_allocate_distinct_sleds.sql index 6331770ef5..4e7dde244b 100644 --- a/nexus/db-queries/tests/output/region_allocate_distinct_sleds.sql +++ b/nexus/db-queries/tests/output/region_allocate_distinct_sleds.sql @@ -270,7 +270,8 @@ WITH dataset.ip, dataset.port, dataset.kind, - dataset.size_used + dataset.size_used, + dataset.zone_name ) ( SELECT @@ -284,6 +285,7 @@ WITH dataset.port, dataset.kind, dataset.size_used, + dataset.zone_name, old_regions.id, old_regions.time_created, old_regions.time_modified, @@ -310,6 +312,7 @@ UNION updated_datasets.port, updated_datasets.kind, updated_datasets.size_used, + updated_datasets.zone_name, inserted_regions.id, inserted_regions.time_created, inserted_regions.time_modified, diff --git a/nexus/db-queries/tests/output/region_allocate_random_sleds.sql b/nexus/db-queries/tests/output/region_allocate_random_sleds.sql index e713121d34..b2c164a6d9 100644 --- a/nexus/db-queries/tests/output/region_allocate_random_sleds.sql +++ b/nexus/db-queries/tests/output/region_allocate_random_sleds.sql @@ -268,7 +268,8 @@ WITH dataset.ip, dataset.port, dataset.kind, - dataset.size_used + dataset.size_used, + dataset.zone_name ) ( SELECT @@ -282,6 +283,7 @@ WITH dataset.port, dataset.kind, dataset.size_used, + dataset.zone_name, old_regions.id, old_regions.time_created, old_regions.time_modified, @@ -308,6 +310,7 @@ UNION updated_datasets.port, updated_datasets.kind, updated_datasets.size_used, + updated_datasets.zone_name, inserted_regions.id, inserted_regions.time_created, inserted_regions.time_modified, diff --git a/nexus/db-queries/tests/output/region_allocate_with_snapshot_distinct_sleds.sql b/nexus/db-queries/tests/output/region_allocate_with_snapshot_distinct_sleds.sql index 0b8dc4fca6..97ee23f82e 100644 --- a/nexus/db-queries/tests/output/region_allocate_with_snapshot_distinct_sleds.sql +++ b/nexus/db-queries/tests/output/region_allocate_with_snapshot_distinct_sleds.sql @@ -281,7 +281,8 @@ WITH dataset.ip, dataset.port, dataset.kind, - dataset.size_used + dataset.size_used, + dataset.zone_name ) ( SELECT @@ -295,6 +296,7 @@ WITH dataset.port, dataset.kind, dataset.size_used, + dataset.zone_name, old_regions.id, old_regions.time_created, old_regions.time_modified, @@ -321,6 +323,7 @@ UNION updated_datasets.port, updated_datasets.kind, updated_datasets.size_used, + updated_datasets.zone_name, inserted_regions.id, inserted_regions.time_created, inserted_regions.time_modified, diff --git a/nexus/db-queries/tests/output/region_allocate_with_snapshot_random_sleds.sql b/nexus/db-queries/tests/output/region_allocate_with_snapshot_random_sleds.sql index 9ac945f71d..a1cc103594 100644 --- a/nexus/db-queries/tests/output/region_allocate_with_snapshot_random_sleds.sql +++ b/nexus/db-queries/tests/output/region_allocate_with_snapshot_random_sleds.sql @@ -279,7 +279,8 @@ WITH dataset.ip, dataset.port, dataset.kind, - dataset.size_used + dataset.size_used, + dataset.zone_name ) ( SELECT @@ -293,6 +294,7 @@ WITH dataset.port, dataset.kind, dataset.size_used, + dataset.zone_name, old_regions.id, old_regions.time_created, old_regions.time_modified, @@ -319,6 +321,7 @@ UNION updated_datasets.port, updated_datasets.kind, updated_datasets.size_used, + updated_datasets.zone_name, inserted_regions.id, inserted_regions.time_created, inserted_regions.time_modified, From fdf0644aa0f825d0fb9fb88799087365bbf24b43 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 29 Jul 2024 22:23:04 -0700 Subject: [PATCH 14/84] queries --- .../tests/output/region_allocate_distinct_sleds.sql | 5 ++++- .../db-queries/tests/output/region_allocate_random_sleds.sql | 5 ++++- .../output/region_allocate_with_snapshot_distinct_sleds.sql | 5 ++++- .../output/region_allocate_with_snapshot_random_sleds.sql | 5 ++++- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/nexus/db-queries/tests/output/region_allocate_distinct_sleds.sql b/nexus/db-queries/tests/output/region_allocate_distinct_sleds.sql index 6331770ef5..4e7dde244b 100644 --- a/nexus/db-queries/tests/output/region_allocate_distinct_sleds.sql +++ b/nexus/db-queries/tests/output/region_allocate_distinct_sleds.sql @@ -270,7 +270,8 @@ WITH dataset.ip, dataset.port, dataset.kind, - dataset.size_used + dataset.size_used, + dataset.zone_name ) ( SELECT @@ -284,6 +285,7 @@ WITH dataset.port, dataset.kind, dataset.size_used, + dataset.zone_name, old_regions.id, old_regions.time_created, old_regions.time_modified, @@ -310,6 +312,7 @@ UNION updated_datasets.port, updated_datasets.kind, updated_datasets.size_used, + updated_datasets.zone_name, inserted_regions.id, inserted_regions.time_created, inserted_regions.time_modified, diff --git a/nexus/db-queries/tests/output/region_allocate_random_sleds.sql b/nexus/db-queries/tests/output/region_allocate_random_sleds.sql index e713121d34..b2c164a6d9 100644 --- a/nexus/db-queries/tests/output/region_allocate_random_sleds.sql +++ b/nexus/db-queries/tests/output/region_allocate_random_sleds.sql @@ -268,7 +268,8 @@ WITH dataset.ip, dataset.port, dataset.kind, - dataset.size_used + dataset.size_used, + dataset.zone_name ) ( SELECT @@ -282,6 +283,7 @@ WITH dataset.port, dataset.kind, dataset.size_used, + dataset.zone_name, old_regions.id, old_regions.time_created, old_regions.time_modified, @@ -308,6 +310,7 @@ UNION updated_datasets.port, updated_datasets.kind, updated_datasets.size_used, + updated_datasets.zone_name, inserted_regions.id, inserted_regions.time_created, inserted_regions.time_modified, diff --git a/nexus/db-queries/tests/output/region_allocate_with_snapshot_distinct_sleds.sql b/nexus/db-queries/tests/output/region_allocate_with_snapshot_distinct_sleds.sql index 0b8dc4fca6..97ee23f82e 100644 --- a/nexus/db-queries/tests/output/region_allocate_with_snapshot_distinct_sleds.sql +++ b/nexus/db-queries/tests/output/region_allocate_with_snapshot_distinct_sleds.sql @@ -281,7 +281,8 @@ WITH dataset.ip, dataset.port, dataset.kind, - dataset.size_used + dataset.size_used, + dataset.zone_name ) ( SELECT @@ -295,6 +296,7 @@ WITH dataset.port, dataset.kind, dataset.size_used, + dataset.zone_name, old_regions.id, old_regions.time_created, old_regions.time_modified, @@ -321,6 +323,7 @@ UNION updated_datasets.port, updated_datasets.kind, updated_datasets.size_used, + updated_datasets.zone_name, inserted_regions.id, inserted_regions.time_created, inserted_regions.time_modified, diff --git a/nexus/db-queries/tests/output/region_allocate_with_snapshot_random_sleds.sql b/nexus/db-queries/tests/output/region_allocate_with_snapshot_random_sleds.sql index 9ac945f71d..a1cc103594 100644 --- a/nexus/db-queries/tests/output/region_allocate_with_snapshot_random_sleds.sql +++ b/nexus/db-queries/tests/output/region_allocate_with_snapshot_random_sleds.sql @@ -279,7 +279,8 @@ WITH dataset.ip, dataset.port, dataset.kind, - dataset.size_used + dataset.size_used, + dataset.zone_name ) ( SELECT @@ -293,6 +294,7 @@ WITH dataset.port, dataset.kind, dataset.size_used, + dataset.zone_name, old_regions.id, old_regions.time_created, old_regions.time_modified, @@ -319,6 +321,7 @@ UNION updated_datasets.port, updated_datasets.kind, updated_datasets.size_used, + updated_datasets.zone_name, inserted_regions.id, inserted_regions.time_created, inserted_regions.time_modified, From 07511e926494c6ffb12aed5fc266094d45b92dba Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 2 Aug 2024 13:04:01 -0700 Subject: [PATCH 15/84] Neck deep in reconfigurator --- common/src/disk.rs | 10 +- illumos-utils/src/zfs.rs | 8 +- nexus/db-model/src/dataset.rs | 32 +- nexus/db-model/src/dataset_kind.rs | 22 ++ nexus/db-model/src/deployment.rs | 65 +++- nexus/db-model/src/schema.rs | 29 ++ .../db-queries/src/db/datastore/deployment.rs | 73 +++- nexus/db-queries/src/db/datastore/rack.rs | 5 + nexus/reconfigurator/execution/src/dns.rs | 4 + .../execution/src/omicron_physical_disks.rs | 1 + .../execution/src/omicron_zones.rs | 1 + .../planning/src/blueprint_builder/builder.rs | 364 ++++++++++++++++++ .../planning/src/blueprint_builder/zones.rs | 22 +- nexus/reconfigurator/planning/src/planner.rs | 51 ++- nexus/reconfigurator/planning/src/system.rs | 10 +- nexus/reconfigurator/preparation/src/lib.rs | 28 +- .../background/tasks/blueprint_execution.rs | 6 +- .../app/background/tasks/blueprint_load.rs | 1 + nexus/src/app/deployment.rs | 3 + nexus/test-utils/src/lib.rs | 1 + nexus/types/src/deployment.rs | 24 ++ nexus/types/src/deployment/planning_input.rs | 20 +- schema/crdb/dbinit.sql | 38 ++ sled-agent/src/backing_fs.rs | 6 +- sled-agent/src/rack_setup/plan/service.rs | 7 +- sled-agent/src/rack_setup/service.rs | 29 +- sled-agent/src/sim/server.rs | 7 +- sled-agent/src/sim/sled_agent.rs | 8 +- sled-storage/src/dataset.rs | 10 +- 29 files changed, 823 insertions(+), 62 deletions(-) diff --git a/common/src/disk.rs b/common/src/disk.rs index b9a259574e..2f74cd0005 100644 --- a/common/src/disk.rs +++ b/common/src/disk.rs @@ -152,14 +152,14 @@ pub struct DatasetConfig { /// The dataset's name pub name: DatasetName, - /// The compression mode to be supplied, if any - pub compression: Option, - /// The upper bound on the amount of storage used by this dataset - pub quota: Option, + pub quota: Option, /// The lower bound on the amount of storage usable by this dataset - pub reservation: Option, + pub reservation: Option, + + /// The compression mode to be supplied, if any + pub compression: Option, } #[derive( diff --git a/illumos-utils/src/zfs.rs b/illumos-utils/src/zfs.rs index 1eaf946911..5e1f445576 100644 --- a/illumos-utils/src/zfs.rs +++ b/illumos-utils/src/zfs.rs @@ -206,8 +206,8 @@ pub struct EncryptionDetails { #[derive(Debug, Default)] pub struct SizeDetails { - pub quota: Option, - pub reservation: Option, + pub quota: Option, + pub reservation: Option, pub compression: Option, } @@ -485,8 +485,8 @@ impl Zfs { fn apply_properties( name: &str, mountpoint: &Mountpoint, - quota: Option, - reservation: Option, + quota: Option, + reservation: Option, compression: Option, ) -> Result<(), EnsureFilesystemError> { if let Some(quota) = quota { diff --git a/nexus/db-model/src/dataset.rs b/nexus/db-model/src/dataset.rs index d525b80241..0886b51528 100644 --- a/nexus/db-model/src/dataset.rs +++ b/nexus/db-model/src/dataset.rs @@ -2,12 +2,16 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use super::{DatasetKind, Generation, Region, SqlU16}; +use super::{ByteCount, DatasetKind, Generation, Region, SqlU16}; use crate::collection::DatastoreCollectionConfig; use crate::ipv6; use crate::schema::{dataset, region}; use chrono::{DateTime, Utc}; use db_macros::Asset; +use omicron_common::api::external::Error; +use omicron_uuid_kinds::DatasetUuid; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::ZpoolUuid; use serde::{Deserialize, Serialize}; use std::net::{Ipv6Addr, SocketAddrV6}; use uuid::Uuid; @@ -42,6 +46,10 @@ pub struct Dataset { pub kind: DatasetKind, pub size_used: Option, zone_name: Option, + + quota: Option, + reservation: Option, + compression: Option, } impl Dataset { @@ -66,6 +74,9 @@ impl Dataset { kind, size_used, zone_name, + quota: None, + reservation: None, + compression: None, } } @@ -78,6 +89,25 @@ impl Dataset { } } +impl TryFrom for omicron_common::disk::DatasetConfig { + type Error = Error; + + fn try_from(dataset: Dataset) -> Result { + Ok(Self { + id: DatasetUuid::from_untyped_uuid(dataset.identity.id), + name: omicron_common::disk::DatasetName::new( + omicron_common::zpool_name::ZpoolName::new_external( + ZpoolUuid::from_untyped_uuid(dataset.pool_id), + ), + dataset.kind.try_into_api(dataset.zone_name)?, + ), + quota: dataset.quota.map(|q| q.to_bytes()), + reservation: dataset.reservation.map(|r| r.to_bytes()), + compression: dataset.compression, + }) + } +} + // Datasets contain regions impl DatastoreCollectionConfig for Dataset { type CollectionId = Uuid; diff --git a/nexus/db-model/src/dataset_kind.rs b/nexus/db-model/src/dataset_kind.rs index 2e71b96a41..d3b45f83d9 100644 --- a/nexus/db-model/src/dataset_kind.rs +++ b/nexus/db-model/src/dataset_kind.rs @@ -4,6 +4,7 @@ use super::impl_enum_type; use omicron_common::api::internal; +use omicron_common::api::external::Error; use serde::{Deserialize, Serialize}; impl_enum_type!( @@ -27,6 +28,27 @@ impl_enum_type!( Debug => b"debug" ); +impl DatasetKind { + pub fn try_into_api(self, zone_name: Option) -> Result { + use internal::shared::DatasetKind as ApiKind; + let k = match (self, zone_name) { + (Self::Crucible, None) => ApiKind::Crucible, + (Self::Cockroach, None) => ApiKind::Cockroach, + (Self::Clickhouse, None) => ApiKind::Clickhouse, + (Self::ClickhouseKeeper, None) => ApiKind::ClickhouseKeeper, + (Self::ExternalDns, None) => ApiKind::ExternalDns, + (Self::InternalDns, None) => ApiKind::InternalDns, + (Self::ZoneRoot, None) => ApiKind::ZoneRoot, + (Self::Zone, Some(name)) => ApiKind::Zone { name }, + (Self::Debug, None) => ApiKind::Debug, + (Self::Zone, None) => return Err(Error::internal_error("Zone kind needs name")), + (_, Some(_)) => return Err(Error::internal_error("Only zone kind needs name")), + }; + + Ok(k) + } +} + impl From for DatasetKind { fn from(k: internal::shared::DatasetKind) -> Self { match k { diff --git a/nexus/db-model/src/deployment.rs b/nexus/db-model/src/deployment.rs index 6bef893a5b..34c59ef938 100644 --- a/nexus/db-model/src/deployment.rs +++ b/nexus/db-model/src/deployment.rs @@ -8,17 +8,19 @@ use crate::inventory::ZoneType; use crate::omicron_zone_config::{OmicronZone, OmicronZoneNic}; use crate::schema::{ - blueprint, bp_omicron_physical_disk, bp_omicron_zone, bp_omicron_zone_nic, + blueprint, bp_omicron_dataset, bp_omicron_physical_disk, bp_omicron_zone, + bp_omicron_zone_nic, bp_sled_omicron_datasets, bp_sled_omicron_physical_disks, bp_sled_omicron_zones, bp_sled_state, bp_target, }; use crate::typed_uuid::DbTypedUuid; use crate::{ - impl_enum_type, ipv6, Generation, MacAddr, Name, SledState, SqlU16, SqlU32, - SqlU8, + impl_enum_type, ipv6, ByteCount, Generation, MacAddr, Name, SledState, + SqlU16, SqlU32, SqlU8, }; use chrono::{DateTime, Utc}; use ipnetwork::IpNetwork; +use nexus_types::deployment::BlueprintDatasetConfig; use nexus_types::deployment::BlueprintPhysicalDiskConfig; use nexus_types::deployment::BlueprintPhysicalDisksConfig; use nexus_types::deployment::BlueprintTarget; @@ -31,7 +33,7 @@ use omicron_common::disk::DiskIdentity; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; -use omicron_uuid_kinds::{ExternalIpKind, SledKind, ZpoolKind}; +use omicron_uuid_kinds::{DatasetKind, ExternalIpKind, SledKind, ZpoolKind}; use uuid::Uuid; /// See [`nexus_types::deployment::Blueprint`]. @@ -197,6 +199,61 @@ impl From for BlueprintPhysicalDiskConfig { } } +#[derive(Queryable, Clone, Debug, Selectable, Insertable)] +#[diesel(table_name = bp_sled_omicron_datasets)] +pub struct BpSledOmicronDatasets { + pub blueprint_id: Uuid, + pub sled_id: DbTypedUuid, + pub generation: Generation, +} + +impl BpSledOmicronDatasets { +// pub fn new( +// blueprint_id: Uuid, +// sled_id: Uuid, +// ) { +// } +} + +/// DB representation of [BlueprintDatasetConfig] +#[derive(Queryable, Clone, Debug, Selectable, Insertable)] +#[diesel(table_name = bp_omicron_dataset)] +pub struct BpOmicronDataset { + pub blueprint_id: Uuid, + pub sled_id: DbTypedUuid, + pub id: DbTypedUuid, + + pub pool_id: DbTypedUuid, + pub kind: crate::DatasetKind, + zone_name: Option, + + pub quota: Option, + pub reservation: Option, + pub compression: Option, +} + +impl BpOmicronDataset { + // TODO: Needs constructor? +} + +impl TryFrom for BlueprintDatasetConfig { + type Error = anyhow::Error; + + fn try_from(dataset: BpOmicronDataset) -> Result { + Ok(Self { + id: dataset.id.into(), + pool: omicron_common::zpool_name::ZpoolName::new_external( + dataset.pool_id.into(), + ), + kind: crate::DatasetKind::try_into_api(dataset.kind, dataset.zone_name)?, + quota: dataset.quota.map(|b| b.into()), + reservation: dataset.reservation.map(|b| b.into()), + compression: dataset.compression, + }) + } +} + + /// See [`nexus_types::deployment::BlueprintZonesConfig`]. #[derive(Queryable, Clone, Debug, Selectable, Insertable)] #[diesel(table_name = bp_sled_omicron_zones)] diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index d76d380afb..b6f8ed5063 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1028,6 +1028,10 @@ table! { kind -> crate::DatasetKindEnum, size_used -> Nullable, zone_name -> Nullable, + + quota -> Nullable, + reservation -> Nullable, + compression -> Nullable, } } @@ -1577,6 +1581,31 @@ table! { } } +table! { + bp_sled_omicron_datasets (blueprint_id, sled_id) { + blueprint_id -> Uuid, + sled_id -> Uuid, + + generation -> Int8, + } +} + +table! { + bp_omicron_dataset (blueprint_id, id) { + blueprint_id -> Uuid, + sled_id -> Uuid, + id -> Uuid, + + pool_id -> Uuid, + kind -> crate::DatasetKindEnum, + zone_name -> Nullable, + + quota -> Nullable, + reservation -> Nullable, + compression -> Nullable, + } +} + table! { bp_sled_omicron_zones (blueprint_id, sled_id) { blueprint_id -> Uuid, diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index 617413f172..ffaf480ea3 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -38,11 +38,13 @@ use nexus_db_model::Blueprint as DbBlueprint; use nexus_db_model::BpOmicronPhysicalDisk; use nexus_db_model::BpOmicronZone; use nexus_db_model::BpOmicronZoneNic; +use nexus_db_model::BpSledOmicronDatasets; use nexus_db_model::BpSledOmicronPhysicalDisks; use nexus_db_model::BpSledOmicronZones; use nexus_db_model::BpSledState; use nexus_db_model::BpTarget; use nexus_types::deployment::Blueprint; +use nexus_types::deployment::BlueprintDatasetsConfig; use nexus_types::deployment::BlueprintMetadata; use nexus_types::deployment::BlueprintPhysicalDisksConfig; use nexus_types::deployment::BlueprintTarget; @@ -450,6 +452,50 @@ impl DataStore { blueprint_physical_disks }; + // Do the same thing we just did for zones, but for datasets too. + let mut blueprint_datasets: BTreeMap< + SledUuid, + BlueprintDatasetsConfig, + > = { + use db::schema::bp_sled_omicron_datasets::dsl; + + let mut blueprint_datasets = BTreeMap::new(); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + let batch = paginated( + dsl::bp_sled_omicron_datasets, + dsl::sled_id, + &p.current_pagparams(), + ) + .filter(dsl::blueprint_id.eq(blueprint_id)) + .select(BpSledOmicronDatasets::as_select()) + .load_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })?; + + paginator = p.found_batch(&batch, &|s| s.sled_id); + + for s in batch { + let old = blueprint_datasets.insert( + s.sled_id.into(), + BlueprintDatasetsConfig { + generation: *s.generation, + datasets: Vec::new(), + }, + ); + bail_unless!( + old.is_none(), + "found duplicate sled ID in bp_sled_omicron_datasets: {}", + s.sled_id + ); + } + } + + blueprint_datasets + }; + // Assemble a mutable map of all the NICs found, by NIC id. As we // match these up with the corresponding zone below, we'll remove items // from this set. That way we can tell if the same NIC was used twice @@ -617,11 +663,16 @@ impl DataStore { for (_, disks_config) in blueprint_disks.iter_mut() { disks_config.disks.sort_unstable_by_key(|d| d.id); } + // Sort all datasets to match what blueprint builders do. + for (_, datasets_config) in blueprint_datasets.iter_mut() { + datasets_config.datasets.sort_unstable_by_key(|d| d.id); + } Ok(Blueprint { id: blueprint_id, blueprint_zones, blueprint_disks, + blueprint_datasets, sled_state, parent_blueprint_id, internal_dns_version, @@ -1418,16 +1469,20 @@ mod tests { .map(|i| { ( ZpoolUuid::new_v4(), - SledDisk { - disk_identity: DiskIdentity { - vendor: String::from("v"), - serial: format!("s-{i}"), - model: String::from("m"), + ( + SledDisk { + disk_identity: DiskIdentity { + vendor: String::from("v"), + serial: format!("s-{i}"), + model: String::from("m"), + }, + disk_id: PhysicalDiskUuid::new_v4(), + policy: PhysicalDiskPolicy::InService, + state: PhysicalDiskState::Active, }, - disk_id: PhysicalDiskUuid::new_v4(), - policy: PhysicalDiskPolicy::InService, - state: PhysicalDiskState::Active, - }, + // Datasets + vec![] + ) ) }) .collect(); diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index c9fb61b15a..17a1fcf15f 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -1058,6 +1058,7 @@ mod test { id: Uuid::new_v4(), blueprint_zones: BTreeMap::new(), blueprint_disks: BTreeMap::new(), + blueprint_datasets: BTreeMap::new(), sled_state: BTreeMap::new(), cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade::DoNotModify, @@ -1545,6 +1546,7 @@ mod test { sled_state: sled_states_active(blueprint_zones.keys().copied()), blueprint_zones, blueprint_disks: BTreeMap::new(), + blueprint_datasets: BTreeMap::new(), cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade::DoNotModify, parent_blueprint_id: None, @@ -1806,6 +1808,7 @@ mod test { sled_state: sled_states_active(blueprint_zones.keys().copied()), blueprint_zones, blueprint_disks: BTreeMap::new(), + blueprint_datasets: BTreeMap::new(), cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade::DoNotModify, parent_blueprint_id: None, @@ -2020,6 +2023,7 @@ mod test { sled_state: sled_states_active(blueprint_zones.keys().copied()), blueprint_zones, blueprint_disks: BTreeMap::new(), + blueprint_datasets: BTreeMap::new(), cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade::DoNotModify, parent_blueprint_id: None, @@ -2163,6 +2167,7 @@ mod test { sled_state: sled_states_active(blueprint_zones.keys().copied()), blueprint_zones, blueprint_disks: BTreeMap::new(), + blueprint_datasets: BTreeMap::new(), cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade::DoNotModify, parent_blueprint_id: None, diff --git a/nexus/reconfigurator/execution/src/dns.rs b/nexus/reconfigurator/execution/src/dns.rs index 3504d41e4d..8379c69cfc 100644 --- a/nexus/reconfigurator/execution/src/dns.rs +++ b/nexus/reconfigurator/execution/src/dns.rs @@ -604,6 +604,7 @@ mod test { id: Uuid::new_v4(), blueprint_zones, blueprint_disks: BTreeMap::new(), + blueprint_datasets: BTreeMap::new(), sled_state, cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade::DoNotModify, @@ -1235,6 +1236,8 @@ mod test { .unwrap(); let zpool_rows = datastore.zpool_list_all_external_batched(&opctx).await.unwrap(); + let dataset_rows = + datastore.dataset_list_all_batched(&opctx, None).await.unwrap(); let ip_pool_range_rows = { let (authz_service_ip_pool, _) = datastore.ip_pools_service_lookup(&opctx).await.unwrap(); @@ -1247,6 +1250,7 @@ mod test { let mut builder = PlanningInputFromDb { sled_rows: &sled_rows, zpool_rows: &zpool_rows, + dataset_rows: &dataset_rows, ip_pool_range_rows: &ip_pool_range_rows, internal_dns_version: Generation::from( u32::try_from(dns_initial_internal.generation).unwrap(), diff --git a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs index 9dcaa098d5..73dacf1c91 100644 --- a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs +++ b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs @@ -169,6 +169,7 @@ mod test { id, blueprint_zones: BTreeMap::new(), blueprint_disks, + blueprint_datasets: BTreeMap::new(), sled_state: BTreeMap::new(), cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade::DoNotModify, diff --git a/nexus/reconfigurator/execution/src/omicron_zones.rs b/nexus/reconfigurator/execution/src/omicron_zones.rs index acbb7a6b33..c8c02531a0 100644 --- a/nexus/reconfigurator/execution/src/omicron_zones.rs +++ b/nexus/reconfigurator/execution/src/omicron_zones.rs @@ -343,6 +343,7 @@ mod test { id, blueprint_zones, blueprint_disks: BTreeMap::new(), + blueprint_datasets: BTreeMap::new(), sled_state: BTreeMap::new(), cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade::DoNotModify, diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 09ae4132f3..090d67b250 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -16,6 +16,8 @@ use nexus_sled_agent_shared::inventory::OmicronZoneDataset; use nexus_sled_agent_shared::inventory::ZoneKind; use nexus_types::deployment::blueprint_zone_type; use nexus_types::deployment::Blueprint; +use nexus_types::deployment::BlueprintDatasetConfig; +use nexus_types::deployment::BlueprintDatasetsConfig; use nexus_types::deployment::BlueprintPhysicalDiskConfig; use nexus_types::deployment::BlueprintPhysicalDisksConfig; use nexus_types::deployment::BlueprintZoneConfig; @@ -39,10 +41,14 @@ use omicron_common::address::get_switch_zone_address; use omicron_common::address::CP_SERVICES_RESERVED_ADDRESSES; use omicron_common::address::NTP_PORT; use omicron_common::address::SLED_RESERVED_ADDRESSES; +use omicron_common::disk::DatasetConfig; +use omicron_common::disk::DatasetName; use omicron_common::api::external::Generation; use omicron_common::api::external::Vni; +use omicron_common::api::internal::shared::DatasetKind; use omicron_common::api::internal::shared::NetworkInterface; use omicron_common::api::internal::shared::NetworkInterfaceKind; +use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::ExternalIpKind; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneKind; @@ -57,6 +63,7 @@ use slog::error; use slog::info; use slog::o; use slog::Logger; +use std::borrow::Cow; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::collections::HashSet; @@ -134,6 +141,7 @@ pub enum EnsureMultiple { pub(crate) enum Operation { AddZone { sled_id: SledUuid, kind: ZoneKind }, UpdateDisks { sled_id: SledUuid, added: usize, removed: usize }, + UpdateDatasets { sled_id: SledUuid, added: usize, removed: usize }, ZoneExpunged { sled_id: SledUuid, reason: ZoneExpungeReason, count: usize }, } @@ -146,6 +154,9 @@ impl fmt::Display for Operation { Self::UpdateDisks { sled_id, added, removed } => { write!(f, "sled {sled_id}: added {added} disks, removed {removed} disks") } + Self::UpdateDatasets { sled_id, added, removed } => { + write!(f, "sled {sled_id}: added {added} datasets, removed {removed} datasets") + } Self::ZoneExpunged { sled_id, reason, count } => { let reason = match reason { ZoneExpungeReason::DiskExpunged => { @@ -198,6 +209,7 @@ pub struct BlueprintBuilder<'a> { // corresponding fields in `Blueprint`. pub(super) zones: BlueprintZonesBuilder<'a>, disks: BlueprintDisksBuilder<'a>, + datasets: BlueprintDatasetsBuilder<'a>, sled_state: BTreeMap, cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade, @@ -259,6 +271,7 @@ impl<'a> BlueprintBuilder<'a> { id: rng.blueprint_rng.next(), blueprint_zones, blueprint_disks: BTreeMap::new(), + blueprint_datasets: BTreeMap::new(), sled_state, parent_blueprint_id: None, internal_dns_version: Generation::new(), @@ -321,6 +334,7 @@ impl<'a> BlueprintBuilder<'a> { external_networking, zones: BlueprintZonesBuilder::new(parent_blueprint), disks: BlueprintDisksBuilder::new(parent_blueprint), + datasets: BlueprintDatasetsBuilder::new(parent_blueprint), sled_state, cockroachdb_setting_preserve_downgrade: parent_blueprint .cockroachdb_setting_preserve_downgrade, @@ -355,10 +369,14 @@ impl<'a> BlueprintBuilder<'a> { let blueprint_disks = self .disks .into_disks_map(self.input.all_sled_ids(SledFilter::InService)); + let blueprint_datasets = self + .datasets + .into_datasets_map(self.input.all_sled_ids(SledFilter::InService)); Blueprint { id: self.rng.blueprint_rng.next(), blueprint_zones, blueprint_disks, + blueprint_datasets, sled_state: self.sled_state, parent_blueprint_id: Some(self.parent_blueprint.id), internal_dns_version: self.input.internal_dns_version(), @@ -613,6 +631,272 @@ impl<'a> BlueprintBuilder<'a> { Ok(EnsureMultiple::Changed { added, removed }) } + /// Ensures that a zpool has the following datasets, as recorded in the + /// blueprint: + /// - Debug + /// - Zone Root + /// + /// If these datasets do not exist: + /// - We return them from "database_datasets", if they exist there + /// - Otherwise, we create them + /// + /// This function returns all new datasets that should be added to the + /// blueprint. + pub fn zpool_ensure_fundamental_datasets( + zpool_id: ZpoolUuid, + blueprint_datasets: &Vec<&BlueprintDatasetConfig>, + database_datasets: &Vec, + ) -> BTreeMap { + let mut new_datasets = BTreeMap::new(); + let mut bp_already_has_debug = false; + let mut bp_already_has_zone_root = false; + for dataset in blueprint_datasets { + match dataset.kind { + DatasetKind::Debug => bp_already_has_debug = true, + DatasetKind::ZoneRoot => bp_already_has_zone_root = true, + _ => (), + } + } + + let mut db_debug = None; + let mut db_zone_root = None; + for dataset in database_datasets { + match dataset.name.dataset() { + DatasetKind::Debug => db_debug = Some(dataset), + DatasetKind::ZoneRoot => db_zone_root = Some(dataset), + _ => (), + } + }; + + if !bp_already_has_debug { + if let Some(db_debug) = db_debug { + new_datasets.insert(db_debug.id, (db_debug.name.pool().id(), db_debug.clone())); + } else { + let id = DatasetUuid::new_v4(); + new_datasets.insert( + id, + ( + zpool_id, + DatasetConfig { + id, + name: DatasetName::new(ZpoolName::new_external( + zpool_id, + ), DatasetKind::Debug), + quota: Some(100 * (1 << 30)), + reservation: None, + compression: None, + } + ) + ); + } + } + + if !bp_already_has_zone_root { + if let Some(db_zone_root) = db_zone_root { + new_datasets.insert(db_zone_root.id, (db_zone_root.name.pool().id(), db_zone_root.clone())); + } else { + let id = DatasetUuid::new_v4(); + new_datasets.insert( + id, + ( + zpool_id, + DatasetConfig { + id, + name: DatasetName::new(ZpoolName::new_external( + zpool_id, + ), DatasetKind::ZoneRoot), + quota: None, + reservation: None, + compression: None, + } + ) + ); + } + } + new_datasets + } + + pub fn sled_ensure_datasets( + &mut self, + sled_id: SledUuid, + resources: &SledResources, + ) -> Result { + let (additions, removals) = { + // All blueprint datasets, known to this blueprint or the last. + // + // Indexed by dataset ID. + let blueprint_datasets: BTreeMap<_, _> = self + .datasets + .current_sled_datasets(sled_id) + .map(|dataset| { + (dataset.id, dataset) + }) + .collect(); + // Blueprint datasets, indexed by zpool ID. + let mut blueprint_datasets_by_zpool = BTreeMap::<_, Vec<&BlueprintDatasetConfig>>::new(); + for dataset in blueprint_datasets.values() { + blueprint_datasets_by_zpool.entry(dataset.pool.id()) + .and_modify(|values: &mut Vec<_>| values.push(dataset)) + .or_insert_with(|| vec![dataset]); + } + // All blueprint zpools, regardless of whether or not they + // currently contain datasets or not. + let blueprint_zpools: BTreeSet<_> = self + .disks + .current_sled_disks(sled_id) + .map(|disk| disk.pool_id) + .collect(); + + // All DB datasets, indexed by zpool ID + let database_datasets_by_zpool: BTreeMap<_, &Vec<_>> = resources + .all_datasets(ZpoolFilter::InService) + .collect(); + // All DB datasets, indexed by dataset ID + let database_datasets: BTreeMap<_, _> = database_datasets_by_zpool + .clone() + .into_iter() + .flat_map(|(zpool, datasets)| { + let zpool = *zpool; + datasets.iter().map(move |dataset| { + (dataset.id, (zpool, dataset)) + }) + }) + .collect(); + + // New datasets which we plan on adding to the blueprint. + // + // During execution, datasets added to the blueprint will be added + // into the DB, if they don't already exist there. + let mut new_datasets = BTreeMap::new(); + + // Datasets that should exist on every zpool. + // + // Ensure these exist in the blueprint, but check for them in the DB + // before deciding to make new datasets. + for zpool_id in &blueprint_zpools { + let bp = blueprint_datasets_by_zpool.get(zpool_id) + .map(Cow::Borrowed) + .unwrap_or_else(|| Cow::Owned(vec![])); + let db = database_datasets_by_zpool.get(zpool_id) + .map(|v| Cow::Borrowed(*v)) + .unwrap_or_else(|| Cow::Owned(vec![])); + + let mut added_datasets = Self::zpool_ensure_fundamental_datasets(*zpool_id, &bp, &db); + new_datasets.append( + &mut added_datasets + ); + } + + // Datasets that should exist because our zones need them + for (zone, _zone_state) in self.zones.current_sled_zones(sled_id) { + if !zone.disposition.matches(BlueprintZoneFilter::ShouldBeRunning) { + continue; + } + + // TODO: check if the dataset(s) already exist? + + // Dataset for transient zone filesystem + if let Some(fs_zpool) = &zone.filesystem_pool { + let name = format!( + "oxp_{}_{}", + zone.zone_type.kind().zone_prefix(), zone.id, + ); + let dataset_kind = DatasetKind::Zone { name }; + let dataset_name = DatasetName::new( + fs_zpool.clone(), + dataset_kind, + ); + + let id = DatasetUuid::new_v4(); + new_datasets.insert( + id, + ( + fs_zpool.id(), + DatasetConfig { + id, + name: dataset_name, + quota: None, + reservation: None, + compression: None, + } + ) + ); + } + + // Dataset for durable dataset co-located with zone + if let Some(dataset) = zone.zone_type.durable_dataset() { + let zpool = &dataset.dataset.pool_name; + let dataset_name = DatasetName::new( + zpool.clone(), + dataset.kind, + ); + + let id = DatasetUuid::new_v4(); + new_datasets.insert( + id, + ( + zpool.id(), + DatasetConfig { + id, + name: dataset_name, + quota: None, + reservation: None, + compression: None, + } + ) + ); + } + } + + // TODO: Note that we also have datasets in "zone/" for propolis + // zones, but these are not currently being tracked by blueprints. + + // TODO: Ensure zone datasets exist too + // TODO: upsert dataset records during execution + // NOTE: we add dataset records for durable datasets during + // the execution phase? need a different addition/removal criteria + + // TODO: For each in-service disk, ensure that we add zone root + debug + // TODO: Iterate over all zones, ensure they have the zones needed + // (transient + durable) + + // Add any disks that appear in the database, but not the blueprint + let additions = database_datasets + .iter() + .filter_map(|(dataset_id, (zpool, dataset))| { + if !blueprint_datasets.contains_key(dataset_id) { + Some(BlueprintDatasetConfig { + id: *dataset_id, + pool: ZpoolName::new_external(*zpool), + kind: dataset.name.dataset().clone(), + quota: dataset.quota.map(|q| q.try_into().unwrap()), + reservation: dataset.reservation.map(|r| r.try_into().unwrap()), + compression: dataset.compression.clone(), + }) + } else { + None + } + }) + .collect::>(); + + // Remove any datasets that appear in the blueprint, but not the database + let removals: HashSet = blueprint_datasets + .keys() + .filter_map(|dataset_id| { + if !database_datasets.contains_key(dataset_id) { + Some(*dataset_id) + } else { + None + } + }) + .collect(); + + (additions, removals) + }; + + todo!(); + } + pub fn sled_ensure_zone_ntp( &mut self, sled_id: SledUuid, @@ -1266,6 +1550,86 @@ impl<'a> BlueprintDisksBuilder<'a> { } } +/// Helper for working with sets of datasets on each sled +struct BlueprintDatasetsBuilder<'a> { + changed_datasets: BTreeMap, + parent_datasets: &'a BTreeMap, +} + +impl<'a> BlueprintDatasetsBuilder<'a> { + pub fn new(parent_blueprint: &'a Blueprint) -> BlueprintDatasetsBuilder { + BlueprintDatasetsBuilder { + changed_datasets: BTreeMap::new(), + parent_datasets: &parent_blueprint.blueprint_datasets, + } + } + + pub fn change_sled_datasets( + &mut self, + sled_id: SledUuid, + ) -> &mut BlueprintDatasetsConfig { + self.changed_datasets.entry(sled_id).or_insert_with(|| { + if let Some(old_sled_datasets) = self.parent_datasets.get(&sled_id) { + BlueprintDatasetsConfig { + generation: old_sled_datasets.generation.next(), + datasets: old_sled_datasets.datasets.clone(), + } + } else { + BlueprintDatasetsConfig { + generation: Generation::new(), + datasets: vec![], + } + } + }) + } + + /// Iterates over the list of Omicron datasets currently configured for this + /// sled in the blueprint that's being built + pub fn current_sled_datasets( + &self, + sled_id: SledUuid, + ) -> Box + '_> { + if let Some(sled_datasets) = self + .changed_datasets + .get(&sled_id) + .or_else(|| self.parent_datasets.get(&sled_id)) + { + Box::new(sled_datasets.datasets.iter()) + } else { + Box::new(std::iter::empty()) + } + } + + /// Produces an owned map of datasets for the requested sleds + pub fn into_datasets_map( + mut self, + sled_ids: impl Iterator, + ) -> BTreeMap { + sled_ids + .map(|sled_id| { + // Start with self.changed_datasets, which contains entries for any + // sled whose datasets config is changing in this blueprint. + let mut datasets = self + .changed_datasets + .remove(&sled_id) + // If it's not there, use the config from the parent + // blueprint. + .or_else(|| self.parent_datasets.get(&sled_id).cloned()) + // If it's not there either, then this must be a new sled + // and we haven't added any datasets to it yet. Use the + // standard initial config. + .unwrap_or_else(|| BlueprintDatasetsConfig { + generation: Generation::new(), + datasets: vec![], + }); + datasets.datasets.sort_unstable_by_key(|d| d.id); + + (sled_id, datasets) + }) + .collect() + } +} + #[cfg(test)] pub mod test { use super::*; diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs b/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs index 6cb76539ec..c707702be4 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs @@ -242,16 +242,20 @@ mod tests { ), zpools: BTreeMap::from([( ZpoolUuid::new_v4(), - SledDisk { - disk_identity: DiskIdentity { - vendor: String::from("fake-vendor"), - serial: String::from("fake-serial"), - model: String::from("fake-model"), + ( + SledDisk { + disk_identity: DiskIdentity { + vendor: String::from("fake-vendor"), + serial: String::from("fake-serial"), + model: String::from("fake-model"), + }, + disk_id: PhysicalDiskUuid::new_v4(), + policy: PhysicalDiskPolicy::InService, + state: PhysicalDiskState::Active, }, - disk_id: PhysicalDiskUuid::new_v4(), - policy: PhysicalDiskPolicy::InService, - state: PhysicalDiskState::Active, - }, + // Datasets: Leave empty + vec![] + ), )]), }, }, diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 509c6722cb..e656efdb0f 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -249,6 +249,15 @@ impl<'a> Planner<'a> { // we send this request first. } + // TODO: Ensure the "baseline" datasets exist (zone root, debug) + // TODO: Ensure all durable datasets exist (with zone allocation) + // TODO: Ensure all transient datasets exist (with zone allocation) + // + // NOTE: Make sure this works even if the zone was already + // provisioned? + // + // TODO: Ensure that all these datasets get deleted eventually? + // Check for an NTP zone. Every sled should have one. If it's not // there, all we can do is provision that one zone. We have to wait // for that to succeed and synchronize the clock before we can @@ -339,7 +348,32 @@ impl<'a> Planner<'a> { } } - self.do_plan_add_discretionary_zones(&sleds_waiting_for_ntp_zone) + self.do_plan_add_discretionary_zones(&sleds_waiting_for_ntp_zone)?; + + // Now that we've added all the disks and zones we plan on adding, + // ensure that all sleds have the datasets they need to have. + self.do_plan_datasets()?; + + Ok(()) + } + + fn do_plan_datasets(&mut self) -> Result<(), Error> { + for (sled_id, sled_resources) in self.input.all_sled_resources(SledFilter::InService) { + if let EnsureMultiple::Changed { added, removed } = + self.blueprint.sled_ensure_datasets(sled_id, &sled_resources)? { + info!( + &self.log, + "altered datasets"; + "sled_id" => %sled_id + ); + self.blueprint.record_operation(Operation::UpdateDatasets { + sled_id, + added, + removed, + }); + } + } + Ok(()) } fn do_plan_add_discretionary_zones( @@ -1173,13 +1207,20 @@ mod test { for _ in 0..NEW_IN_SERVICE_DISKS { sled_details.resources.zpools.insert( ZpoolUuid::from(zpool_rng.next()), - new_sled_disk(PhysicalDiskPolicy::InService), + ( + new_sled_disk(PhysicalDiskPolicy::InService), + vec![], + ) + ); } for _ in 0..NEW_EXPUNGED_DISKS { sled_details.resources.zpools.insert( ZpoolUuid::from(zpool_rng.next()), - new_sled_disk(PhysicalDiskPolicy::Expunged), + ( + new_sled_disk(PhysicalDiskPolicy::Expunged), + vec![], + ) ); } @@ -1247,7 +1288,7 @@ mod test { } } let (_, sled_details) = builder.sleds_mut().iter_mut().next().unwrap(); - let (_, disk) = sled_details + let (_, (disk, _datasets)) = sled_details .resources .zpools .iter_mut() @@ -1362,7 +1403,7 @@ mod test { // For that pool, find the physical disk behind it, and mark it // expunged. let (_, sled_details) = builder.sleds_mut().iter_mut().next().unwrap(); - let disk = sled_details + let (disk, _datasets) = sled_details .resources .zpools .get_mut(&pool_to_expunge.id()) diff --git a/nexus/reconfigurator/planning/src/system.rs b/nexus/reconfigurator/planning/src/system.rs index cef0c81b6f..483d9f1468 100644 --- a/nexus/reconfigurator/planning/src/system.rs +++ b/nexus/reconfigurator/planning/src/system.rs @@ -38,6 +38,7 @@ use omicron_common::address::RACK_PREFIX; use omicron_common::address::SLED_PREFIX; use omicron_common::api::external::ByteCount; use omicron_common::api::external::Generation; +use omicron_common::disk::DatasetConfig; use omicron_common::disk::DiskIdentity; use omicron_common::disk::DiskVariant; use omicron_uuid_kinds::GenericUuid; @@ -448,7 +449,7 @@ struct Sled { sled_subnet: Ipv6Subnet, inventory_sp: Option<(u16, SpState)>, inventory_sled_agent: Inventory, - zpools: BTreeMap, + zpools: BTreeMap)>, policy: SledPolicy, } @@ -485,7 +486,8 @@ impl Sled { policy: PhysicalDiskPolicy::InService, state: PhysicalDiskState::Active, }; - (zpool, disk) + let datasets = vec![]; + (zpool, (disk, datasets)) }) .collect(); let inventory_sp = match hardware { @@ -547,8 +549,8 @@ impl Sled { disks: zpools .values() .enumerate() - .map(|(i, d)| InventoryDisk { - identity: d.disk_identity.clone(), + .map(|(i, (disk, _datasets))| InventoryDisk { + identity: disk.disk_identity.clone(), variant: DiskVariant::U2, slot: i64::try_from(i).unwrap(), }) diff --git a/nexus/reconfigurator/preparation/src/lib.rs b/nexus/reconfigurator/preparation/src/lib.rs index 68971ec3e1..188ded9406 100644 --- a/nexus/reconfigurator/preparation/src/lib.rs +++ b/nexus/reconfigurator/preparation/src/lib.rs @@ -38,6 +38,7 @@ use omicron_common::address::NEXUS_REDUNDANCY; use omicron_common::address::SLED_PREFIX; use omicron_common::api::external::Error; use omicron_common::api::external::LookupType; +use omicron_common::disk::DatasetConfig; use omicron_common::disk::DiskIdentity; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneUuid; @@ -57,6 +58,7 @@ pub struct PlanningInputFromDb<'a> { pub sled_rows: &'a [nexus_db_model::Sled], pub zpool_rows: &'a [(nexus_db_model::Zpool, nexus_db_model::PhysicalDisk)], + pub dataset_rows: &'a [nexus_db_model::Dataset], pub ip_pool_range_rows: &'a [nexus_db_model::IpPoolRange], pub external_ip_rows: &'a [nexus_db_model::ExternalIp], pub service_nic_rows: &'a [nexus_db_model::ServiceNetworkInterface], @@ -88,6 +90,24 @@ impl PlanningInputFromDb<'_> { ); let mut zpools_by_sled_id = { + // Gather all the datasets first, by Zpool ID + let mut datasets: Vec<_> = self.dataset_rows.iter() + .map(|dataset| { + ( + ZpoolUuid::from_untyped_uuid(dataset.pool_id), + dataset.clone(), + ) + }) + .collect(); + datasets.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); + let mut datasets_by_zpool: BTreeMap<_, Vec<_>> = BTreeMap::new(); + for (zpool_id, dataset) in datasets { + datasets_by_zpool.entry(zpool_id) + .or_default() + .push(DatasetConfig::try_from(dataset)?); + } + + // Iterate over all Zpools, identifying their disks and datasets let mut zpools = BTreeMap::new(); for (zpool, disk) in self.zpool_rows { let sled_zpool_names = @@ -104,7 +124,8 @@ impl PlanningInputFromDb<'_> { state: disk.disk_state.into(), }; - sled_zpool_names.insert(zpool_id, disk); + let datasets = datasets_by_zpool.remove(&zpool_id).unwrap_or_else(|| vec![]); + sled_zpool_names.insert(zpool_id, (disk, datasets)); } zpools }; @@ -199,6 +220,10 @@ pub async fn reconfigurator_state_load( .zpool_list_all_external_batched(opctx) .await .context("listing zpools")?; + let dataset_rows = datastore + .dataset_list_all_batched(opctx, None) + .await + .context("listing datasets")?; let ip_pool_range_rows = { let (authz_service_ip_pool, _) = datastore .ip_pools_service_lookup(opctx) @@ -235,6 +260,7 @@ pub async fn reconfigurator_state_load( let planning_input = PlanningInputFromDb { sled_rows: &sled_rows, zpool_rows: &zpool_rows, + dataset_rows: &dataset_rows, ip_pool_range_rows: &ip_pool_range_rows, target_nexus_zone_count: NEXUS_REDUNDANCY, target_cockroachdb_zone_count: COCKROACHDB_REDUNDANCY, diff --git a/nexus/src/app/background/tasks/blueprint_execution.rs b/nexus/src/app/background/tasks/blueprint_execution.rs index 460d74360d..d1769f2765 100644 --- a/nexus/src/app/background/tasks/blueprint_execution.rs +++ b/nexus/src/app/background/tasks/blueprint_execution.rs @@ -128,7 +128,7 @@ mod test { use nexus_test_utils_macros::nexus_test; use nexus_types::deployment::BlueprintZoneFilter; use nexus_types::deployment::{ - blueprint_zone_type, Blueprint, BlueprintPhysicalDisksConfig, + blueprint_zone_type, Blueprint, BlueprintDatasetsConfig, BlueprintPhysicalDisksConfig, BlueprintTarget, BlueprintZoneConfig, BlueprintZoneDisposition, BlueprintZoneType, BlueprintZonesConfig, CockroachDbPreserveDowngrade, }; @@ -153,6 +153,7 @@ mod test { fn create_blueprint( blueprint_zones: BTreeMap, blueprint_disks: BTreeMap, + blueprint_datasets: BTreeMap, dns_version: Generation, ) -> (BlueprintTarget, Blueprint) { let id = Uuid::new_v4(); @@ -172,6 +173,7 @@ mod test { id, blueprint_zones, blueprint_disks, + blueprint_datasets, sled_state, cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade::DoNotModify, @@ -254,6 +256,7 @@ mod test { // complete and report a successful (empty) summary. let generation = Generation::new(); let blueprint = Arc::new(create_blueprint( + BTreeMap::new(), BTreeMap::new(), BTreeMap::new(), generation, @@ -305,6 +308,7 @@ mod test { (sled_id2, make_zones(BlueprintZoneDisposition::Quiesced)), ]), BTreeMap::new(), + BTreeMap::new(), generation, ); diff --git a/nexus/src/app/background/tasks/blueprint_load.rs b/nexus/src/app/background/tasks/blueprint_load.rs index 31bc00441d..290c1b8c57 100644 --- a/nexus/src/app/background/tasks/blueprint_load.rs +++ b/nexus/src/app/background/tasks/blueprint_load.rs @@ -213,6 +213,7 @@ mod test { id, blueprint_zones: BTreeMap::new(), blueprint_disks: BTreeMap::new(), + blueprint_datasets: BTreeMap::new(), sled_state: BTreeMap::new(), cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade::DoNotModify, diff --git a/nexus/src/app/deployment.rs b/nexus/src/app/deployment.rs index ca4635b13e..61849ec238 100644 --- a/nexus/src/app/deployment.rs +++ b/nexus/src/app/deployment.rs @@ -137,6 +137,8 @@ impl super::Nexus { .await?; let zpool_rows = datastore.zpool_list_all_external_batched(opctx).await?; + let dataset_rows = + datastore.dataset_list_all_batched(opctx, None).await?; let ip_pool_range_rows = { let (authz_service_ip_pool, _) = datastore.ip_pools_service_lookup(opctx).await?; @@ -172,6 +174,7 @@ impl super::Nexus { let planning_input = PlanningInputFromDb { sled_rows: &sled_rows, zpool_rows: &zpool_rows, + dataset_rows: &dataset_rows, ip_pool_range_rows: &ip_pool_range_rows, external_ip_rows: &external_ip_rows, service_nic_rows: &service_nic_rows, diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 7c190974a1..edbaa6a786 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -797,6 +797,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { // // However, for now, this isn't necessary. blueprint_disks: BTreeMap::new(), + blueprint_datasets: BTreeMap::new(), sled_state, parent_blueprint_id: None, internal_dns_version: dns_config diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 4342adb02b..96e12dc5c3 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -23,11 +23,14 @@ use nexus_sled_agent_shared::inventory::OmicronZoneConfig; use nexus_sled_agent_shared::inventory::OmicronZoneType; use nexus_sled_agent_shared::inventory::OmicronZonesConfig; use nexus_sled_agent_shared::inventory::ZoneKind; +use omicron_common::api::external::ByteCount; use omicron_common::api::external::Generation; +use omicron_common::api::internal::shared::DatasetKind; use omicron_common::disk::DiskIdentity; use omicron_common::disk::OmicronPhysicalDisksConfig; use omicron_uuid_kinds::CollectionUuid; use omicron_uuid_kinds::ExternalIpUuid; +use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::SledUuid; use schemars::JsonSchema; @@ -146,6 +149,9 @@ pub struct Blueprint { /// A map of sled id -> disks in use on each sled. pub blueprint_disks: BTreeMap, + /// A map of sled id -> datasets in use on each sled + pub blueprint_datasets: BTreeMap, + /// which blueprint this blueprint is based on pub parent_blueprint_id: Option, @@ -922,6 +928,24 @@ pub type BlueprintPhysicalDisksConfig = pub type BlueprintPhysicalDiskConfig = omicron_common::disk::OmicronPhysicalDiskConfig; +/// Information about Omicron datasets as recorded in a blueprint. +#[derive(Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize)] +pub struct BlueprintDatasetsConfig { + pub generation: Generation, + pub datasets: Vec, +} + +/// Information about a dataset as recorded in a blueprint +#[derive(Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize)] +pub struct BlueprintDatasetConfig { + pub id: DatasetUuid, + pub pool: ZpoolName, + pub kind: DatasetKind, + pub quota: Option, + pub reservation: Option, + pub compression: Option, +} + /// Describe high-level metadata about a blueprint // These fields are a subset of [`Blueprint`], and include only the data we can // quickly fetch from the main blueprint table (e.g., when listing all diff --git a/nexus/types/src/deployment/planning_input.rs b/nexus/types/src/deployment/planning_input.rs index a5feff067a..389845f9df 100644 --- a/nexus/types/src/deployment/planning_input.rs +++ b/nexus/types/src/deployment/planning_input.rs @@ -21,6 +21,7 @@ use omicron_common::address::Ipv6Subnet; use omicron_common::address::SLED_PREFIX; use omicron_common::api::external::Generation; use omicron_common::api::internal::shared::SourceNatConfigError; +use omicron_common::disk::DatasetConfig; use omicron_common::disk::DiskIdentity; use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::PhysicalDiskUuid; @@ -453,7 +454,7 @@ pub struct SledResources { /// storage) // NOTE: I'd really like to make this private, to make it harder to // accidentally pick a zpool that is not in-service. - pub zpools: BTreeMap, + pub zpools: BTreeMap)>, /// the IPv6 subnet of this sled on the underlay network /// @@ -465,7 +466,7 @@ pub struct SledResources { impl SledResources { /// Returns if the zpool is provisionable (known, in-service, and active). pub fn zpool_is_provisionable(&self, zpool: &ZpoolUuid) -> bool { - let Some(disk) = self.zpools.get(zpool) else { return false }; + let Some((disk, _datasets)) = self.zpools.get(zpool) else { return false }; disk.provisionable() } @@ -474,7 +475,7 @@ impl SledResources { &self, filter: ZpoolFilter, ) -> impl Iterator + '_ { - self.zpools.iter().filter_map(move |(zpool, disk)| { + self.zpools.iter().filter_map(move |(zpool, (disk, _datasets))| { filter .matches_policy_and_state(disk.policy, disk.state) .then_some(zpool) @@ -485,12 +486,23 @@ impl SledResources { &self, filter: DiskFilter, ) -> impl Iterator + '_ { - self.zpools.iter().filter_map(move |(zpool, disk)| { + self.zpools.iter().filter_map(move |(zpool, (disk, _datasets))| { filter .matches_policy_and_state(disk.policy, disk.state) .then_some((zpool, disk)) }) } + + pub fn all_datasets( + &self, + filter: ZpoolFilter, + ) -> impl Iterator)> + '_ { + self.zpools.iter().filter_map(move |(zpool, (disk, datasets))| { + filter + .matches_policy_and_state(disk.policy, disk.state) + .then_some((zpool, datasets)) + }) + } } /// Filters that apply to sleds. diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index c62c2fc72b..3cc9fa0014 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -539,6 +539,10 @@ CREATE TABLE IF NOT EXISTS omicron.public.dataset ( /* Only valid if kind = zone -- the name of this zone */ zone_name TEXT, + quota INT8, + reservation INT8, + compression TEXT, + /* Crucible must make use of 'size_used'; other datasets manage their own storage */ CONSTRAINT size_used_column_set_for_crucible CHECK ( (kind != 'crucible') OR @@ -3477,6 +3481,40 @@ CREATE TABLE IF NOT EXISTS omicron.public.bp_omicron_physical_disk ( PRIMARY KEY (blueprint_id, id) ); +-- description of a collection of omicron datasets stored in a blueprint +CREATE TABLE IF NOT EXISTS omicron.public.bp_sled_omicron_datasets ( + -- foreign key into the `blueprint` table + blueprint_id UUID NOT NULL, + sled_id UUID NOT NULL, + generation INT8 NOT NULL, + + PRIMARY KEY (blueprint_id, sled_id) +); + +-- description of an omicron dataset specified in a blueprint. +CREATE TABLE IF NOT EXISTS omicron.public.bp_omicron_dataset ( + -- foreign key into the `blueprint` table + blueprint_id UUID NOT NULL, + sled_id UUID NOT NULL, + id UUID NOT NULL, + + pool_id UUID NOT NULL, + kind omicron.public.dataset_kind NOT NULL, + -- Only valid if kind = zone + zone_name TEXT, + + quota INT8, + reservation INT8, + compression TEXT, + + CONSTRAINT zone_name_for_zone_kind CHECK ( + (kind != 'zone') OR + (kind = 'zone' AND zone_name IS NOT NULL) + ), + + PRIMARY KEY (blueprint_id, id) +); + -- see inv_sled_omicron_zones, which is identical except it references a -- collection whereas this table references a blueprint CREATE TABLE IF NOT EXISTS omicron.public.bp_sled_omicron_zones ( diff --git a/sled-agent/src/backing_fs.rs b/sled-agent/src/backing_fs.rs index 48002a8841..f8a53724f8 100644 --- a/sled-agent/src/backing_fs.rs +++ b/sled-agent/src/backing_fs.rs @@ -48,7 +48,7 @@ struct BackingFs<'a> { // Mountpoint mountpoint: &'static str, // Optional quota, in _bytes_ - quota: Option, + quota: Option, // Optional compression mode compression: Option<&'static str>, // Linked service @@ -74,7 +74,7 @@ impl<'a> BackingFs<'a> { self } - const fn quota(mut self, quota: usize) -> Self { + const fn quota(mut self, quota: u64) -> Self { self.quota = Some(quota); self } @@ -99,7 +99,7 @@ const BACKING_FMD_DATASET: &'static str = "fmd"; const BACKING_FMD_MOUNTPOINT: &'static str = "/var/fm/fmd"; const BACKING_FMD_SUBDIRS: [&'static str; 3] = ["rsrc", "ckpt", "xprt"]; const BACKING_FMD_SERVICE: &'static str = "svc:/system/fmd:default"; -const BACKING_FMD_QUOTA: usize = 500 * (1 << 20); // 500 MiB +const BACKING_FMD_QUOTA: u64 = 500 * (1 << 20); // 500 MiB const BACKING_COMPRESSION: &'static str = "on"; diff --git a/sled-agent/src/rack_setup/plan/service.rs b/sled-agent/src/rack_setup/plan/service.rs index ec19863bef..e724bec8a5 100644 --- a/sled-agent/src/rack_setup/plan/service.rs +++ b/sled-agent/src/rack_setup/plan/service.rs @@ -28,8 +28,8 @@ use omicron_common::backoff::{ retry_notify_ext, retry_policy_internal_service_aggressive, BackoffError, }; use omicron_common::disk::{ - DatasetKind, DatasetName, DiskVariant, OmicronPhysicalDiskConfig, - OmicronPhysicalDisksConfig, + DatasetsConfig, DatasetKind, DatasetName, DiskVariant, + OmicronPhysicalDiskConfig, OmicronPhysicalDisksConfig, }; use omicron_common::ledger::{self, Ledger, Ledgerable}; use omicron_uuid_kinds::{GenericUuid, OmicronZoneUuid, SledUuid, ZpoolUuid}; @@ -110,6 +110,9 @@ pub struct SledConfig { /// Control plane disks configured for this sled pub disks: OmicronPhysicalDisksConfig, + /// Datasets configured for this sled + pub datasets: DatasetsConfig, + /// zones configured for this sled pub zones: Vec, } diff --git a/sled-agent/src/rack_setup/service.rs b/sled-agent/src/rack_setup/service.rs index 9e90487954..7a1d1f675a 100644 --- a/sled-agent/src/rack_setup/service.rs +++ b/sled-agent/src/rack_setup/service.rs @@ -92,12 +92,13 @@ use nexus_sled_agent_shared::inventory::{ OmicronZoneConfig, OmicronZoneType, OmicronZonesConfig, }; use nexus_types::deployment::{ - Blueprint, BlueprintPhysicalDisksConfig, BlueprintZoneConfig, - BlueprintZoneDisposition, BlueprintZonesConfig, + Blueprint, BlueprintDatasetConfig, BlueprintDatasetsConfig, BlueprintPhysicalDisksConfig, + BlueprintZoneConfig, BlueprintZoneDisposition, BlueprintZonesConfig, CockroachDbPreserveDowngrade, InvalidOmicronZoneType, }; use nexus_types::external_api::views::SledState; use omicron_common::address::get_sled_address; +use omicron_common::api::external::ByteCount; use omicron_common::api::external::Generation; use omicron_common::api::internal::shared::ExternalPortDiscovery; use omicron_common::backoff::{ @@ -1412,6 +1413,29 @@ pub(crate) fn build_initial_blueprint_from_sled_configs( ); } + let mut blueprint_datasets = BTreeMap::new(); + for (sled_id, sled_config) in sled_configs_by_id { + blueprint_datasets.insert( + *sled_id, + BlueprintDatasetsConfig { + generation: sled_config.datasets.generation, + datasets: sled_config + .datasets + .datasets + .iter() + .map(|d| BlueprintDatasetConfig { + id: d.id, + pool: d.name.pool().clone(), + kind: d.name.dataset().clone(), + compression: d.compression.clone(), + quota: d.quota.map(|q| ByteCount::try_from(q).unwrap()), + reservation: d.reservation.map(|r| ByteCount::try_from(r).unwrap()), + }) + .collect(), + }, + ); + } + let mut blueprint_zones = BTreeMap::new(); let mut sled_state = BTreeMap::new(); for (sled_id, sled_config) in sled_configs_by_id { @@ -1442,6 +1466,7 @@ pub(crate) fn build_initial_blueprint_from_sled_configs( id: Uuid::new_v4(), blueprint_zones, blueprint_disks, + blueprint_datasets, sled_state, parent_blueprint_id: None, internal_dns_version, diff --git a/sled-agent/src/sim/server.rs b/sled-agent/src/sim/server.rs index 189f775adb..98dbe58725 100644 --- a/sled-agent/src/sim/server.rs +++ b/sled-agent/src/sim/server.rs @@ -519,11 +519,14 @@ pub async fn run_standalone_server( None => vec![], }; - let disks = server.sled_agent.omicron_physical_disks_list().await?; let mut sled_configs = BTreeMap::new(); sled_configs.insert( SledUuid::from_untyped_uuid(config.id), - SledConfig { disks, zones }, + SledConfig { + disks: server.sled_agent.omicron_physical_disks_list().await?, + datasets: server.sled_agent.datasets_list().await?, + zones + }, ); let rack_init_request = NexusTypes::RackInitializationRequest { diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index 05339c201c..c3839e7ac5 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -40,7 +40,7 @@ use omicron_common::api::internal::shared::{ ResolvedVpcRouteState, RouterId, RouterKind, RouterVersion, }; use omicron_common::disk::{ - DiskIdentity, DiskVariant, OmicronPhysicalDisksConfig, + DatasetsConfig, DiskIdentity, DiskVariant, OmicronPhysicalDisksConfig, }; use omicron_uuid_kinds::{GenericUuid, InstanceUuid, PropolisUuid, ZpoolUuid}; use oxnet::Ipv6Net; @@ -893,6 +893,12 @@ impl SledAgent { }) } + pub async fn datasets_list( + &self, + ) -> Result { + todo!(); + } + pub async fn omicron_physical_disks_list( &self, ) -> Result { diff --git a/sled-storage/src/dataset.rs b/sled-storage/src/dataset.rs index b95877418e..8d5bdc5910 100644 --- a/sled-storage/src/dataset.rs +++ b/sled-storage/src/dataset.rs @@ -32,16 +32,16 @@ pub const M2_BACKING_DATASET: &'static str = "backing"; cfg_if! { if #[cfg(any(test, feature = "testing"))] { // Tuned for zone_bundle tests - pub const DEBUG_DATASET_QUOTA: usize = 1 << 20; + pub const DEBUG_DATASET_QUOTA: u64 = 1 << 20; } else { // TODO-correctness: This value of 100GiB is a pretty wild guess, and should be // tuned as needed. - pub const DEBUG_DATASET_QUOTA: usize = 100 * (1 << 30); + pub const DEBUG_DATASET_QUOTA: u64 = 100 * (1 << 30); } } // TODO-correctness: This value of 100GiB is a pretty wild guess, and should be // tuned as needed. -pub const DUMP_DATASET_QUOTA: usize = 100 * (1 << 30); +pub const DUMP_DATASET_QUOTA: u64 = 100 * (1 << 30); // passed to zfs create -o compression= pub const DUMP_DATASET_COMPRESSION: &'static str = "gzip-9"; @@ -96,7 +96,7 @@ struct ExpectedDataset { // Name for the dataset name: &'static str, // Optional quota, in _bytes_ - quota: Option, + quota: Option, // Identifies if the dataset should be deleted on boot wipe: bool, // Optional compression mode @@ -108,7 +108,7 @@ impl ExpectedDataset { ExpectedDataset { name, quota: None, wipe: false, compression: None } } - const fn quota(mut self, quota: usize) -> Self { + const fn quota(mut self, quota: u64) -> Self { self.quota = Some(quota); self } From d753dbd333c7c7c5a654e3a2e9201ab0cd28d410 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 5 Aug 2024 12:04:05 -0700 Subject: [PATCH 16/84] sled_ensure_datasets mostly filled out --- dev-tools/reconfigurator-cli/src/main.rs | 4 +- nexus/db-model/src/dataset_kind.rs | 15 +- nexus/db-model/src/deployment.rs | 16 +- .../db-queries/src/db/datastore/deployment.rs | 6 +- nexus/reconfigurator/execution/src/dns.rs | 5 +- .../planning/src/blueprint_builder/builder.rs | 596 +++++++++++------- .../planning/src/blueprint_builder/zones.rs | 2 +- nexus/reconfigurator/planning/src/planner.rs | 26 +- nexus/reconfigurator/preparation/src/lib.rs | 11 +- .../background/tasks/blueprint_execution.rs | 7 +- nexus/types/src/deployment.rs | 2 +- nexus/types/src/deployment/planning_input.rs | 4 +- 12 files changed, 411 insertions(+), 283 deletions(-) diff --git a/dev-tools/reconfigurator-cli/src/main.rs b/dev-tools/reconfigurator-cli/src/main.rs index 983dde412d..6d505d8b7d 100644 --- a/dev-tools/reconfigurator-cli/src/main.rs +++ b/dev-tools/reconfigurator-cli/src/main.rs @@ -754,7 +754,7 @@ fn cmd_blueprint_edit( .context("failed to add Nexus zone")?; assert_matches::assert_matches!( added, - EnsureMultiple::Changed { added: 1, removed: 0 } + EnsureMultiple::Changed { added: 1, updated: 0, removed: 0 } ); format!("added Nexus zone to sled {}", sled_id) } @@ -766,7 +766,7 @@ fn cmd_blueprint_edit( .context("failed to add CockroachDB zone")?; assert_matches::assert_matches!( added, - EnsureMultiple::Changed { added: 1, removed: 0 } + EnsureMultiple::Changed { added: 1, updated: 0, removed: 0 } ); format!("added CockroachDB zone to sled {}", sled_id) } diff --git a/nexus/db-model/src/dataset_kind.rs b/nexus/db-model/src/dataset_kind.rs index d3b45f83d9..480347ebcf 100644 --- a/nexus/db-model/src/dataset_kind.rs +++ b/nexus/db-model/src/dataset_kind.rs @@ -3,8 +3,8 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use super::impl_enum_type; -use omicron_common::api::internal; use omicron_common::api::external::Error; +use omicron_common::api::internal; use serde::{Deserialize, Serialize}; impl_enum_type!( @@ -29,7 +29,10 @@ impl_enum_type!( ); impl DatasetKind { - pub fn try_into_api(self, zone_name: Option) -> Result { + pub fn try_into_api( + self, + zone_name: Option, + ) -> Result { use internal::shared::DatasetKind as ApiKind; let k = match (self, zone_name) { (Self::Crucible, None) => ApiKind::Crucible, @@ -41,8 +44,12 @@ impl DatasetKind { (Self::ZoneRoot, None) => ApiKind::ZoneRoot, (Self::Zone, Some(name)) => ApiKind::Zone { name }, (Self::Debug, None) => ApiKind::Debug, - (Self::Zone, None) => return Err(Error::internal_error("Zone kind needs name")), - (_, Some(_)) => return Err(Error::internal_error("Only zone kind needs name")), + (Self::Zone, None) => { + return Err(Error::internal_error("Zone kind needs name")) + } + (_, Some(_)) => { + return Err(Error::internal_error("Only zone kind needs name")) + } }; Ok(k) diff --git a/nexus/db-model/src/deployment.rs b/nexus/db-model/src/deployment.rs index 34c59ef938..ed1c6e17c6 100644 --- a/nexus/db-model/src/deployment.rs +++ b/nexus/db-model/src/deployment.rs @@ -208,11 +208,11 @@ pub struct BpSledOmicronDatasets { } impl BpSledOmicronDatasets { -// pub fn new( -// blueprint_id: Uuid, -// sled_id: Uuid, -// ) { -// } + // pub fn new( + // blueprint_id: Uuid, + // sled_id: Uuid, + // ) { + // } } /// DB representation of [BlueprintDatasetConfig] @@ -245,7 +245,10 @@ impl TryFrom for BlueprintDatasetConfig { pool: omicron_common::zpool_name::ZpoolName::new_external( dataset.pool_id.into(), ), - kind: crate::DatasetKind::try_into_api(dataset.kind, dataset.zone_name)?, + kind: crate::DatasetKind::try_into_api( + dataset.kind, + dataset.zone_name, + )?, quota: dataset.quota.map(|b| b.into()), reservation: dataset.reservation.map(|b| b.into()), compression: dataset.compression, @@ -253,7 +256,6 @@ impl TryFrom for BlueprintDatasetConfig { } } - /// See [`nexus_types::deployment::BlueprintZonesConfig`]. #[derive(Queryable, Clone, Debug, Selectable, Insertable)] #[diesel(table_name = bp_sled_omicron_zones)] diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index ffaf480ea3..2cbf516d27 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -1481,8 +1481,8 @@ mod tests { state: PhysicalDiskState::Active, }, // Datasets - vec![] - ) + vec![], + ), ) }) .collect(); @@ -1707,7 +1707,7 @@ mod tests { .clone(), ) .unwrap(), - EnsureMultiple::Changed { added: 4, removed: 0 } + EnsureMultiple::Changed { added: 4, updated: 0, removed: 0 } ); // Add zones to our new sled. diff --git a/nexus/reconfigurator/execution/src/dns.rs b/nexus/reconfigurator/execution/src/dns.rs index 8379c69cfc..c37433cfb2 100644 --- a/nexus/reconfigurator/execution/src/dns.rs +++ b/nexus/reconfigurator/execution/src/dns.rs @@ -1296,7 +1296,10 @@ mod test { let rv = builder .sled_ensure_zone_multiple_nexus(sled_id, nalready + 1) .unwrap(); - assert_eq!(rv, EnsureMultiple::Changed { added: 1, removed: 0 }); + assert_eq!( + rv, + EnsureMultiple::Changed { added: 1, updated: 0, removed: 0 } + ); let blueprint2 = builder.build(); eprintln!("blueprint2: {}", blueprint2.display()); // Figure out the id of the new zone. diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 090d67b250..e9ae065ac2 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -41,13 +41,14 @@ use omicron_common::address::get_switch_zone_address; use omicron_common::address::CP_SERVICES_RESERVED_ADDRESSES; use omicron_common::address::NTP_PORT; use omicron_common::address::SLED_RESERVED_ADDRESSES; -use omicron_common::disk::DatasetConfig; -use omicron_common::disk::DatasetName; +use omicron_common::api::external::ByteCount; use omicron_common::api::external::Generation; use omicron_common::api::external::Vni; use omicron_common::api::internal::shared::DatasetKind; use omicron_common::api::internal::shared::NetworkInterface; use omicron_common::api::internal::shared::NetworkInterfaceKind; +use omicron_common::disk::DatasetConfig; +use omicron_common::disk::DatasetName; use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::ExternalIpKind; use omicron_uuid_kinds::GenericUuid; @@ -63,7 +64,6 @@ use slog::error; use slog::info; use slog::o; use slog::Logger; -use std::borrow::Cow; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::collections::HashSet; @@ -126,7 +126,7 @@ pub enum Ensure { #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum EnsureMultiple { /// action was taken, and multiple items were added - Changed { added: usize, removed: usize }, + Changed { added: usize, updated: usize, removed: usize }, /// no action was necessary NotNeeded, @@ -139,10 +139,27 @@ pub enum EnsureMultiple { /// "comment", identifying which operations have occurred on the blueprint. #[derive(Debug, Clone, Eq, PartialEq)] pub(crate) enum Operation { - AddZone { sled_id: SledUuid, kind: ZoneKind }, - UpdateDisks { sled_id: SledUuid, added: usize, removed: usize }, - UpdateDatasets { sled_id: SledUuid, added: usize, removed: usize }, - ZoneExpunged { sled_id: SledUuid, reason: ZoneExpungeReason, count: usize }, + AddZone { + sled_id: SledUuid, + kind: ZoneKind, + }, + UpdateDisks { + sled_id: SledUuid, + added: usize, + updated: usize, + removed: usize, + }, + UpdateDatasets { + sled_id: SledUuid, + added: usize, + updated: usize, + removed: usize, + }, + ZoneExpunged { + sled_id: SledUuid, + reason: ZoneExpungeReason, + count: usize, + }, } impl fmt::Display for Operation { @@ -151,11 +168,11 @@ impl fmt::Display for Operation { Self::AddZone { sled_id, kind } => { write!(f, "sled {sled_id}: added zone: {}", kind.report_str()) } - Self::UpdateDisks { sled_id, added, removed } => { - write!(f, "sled {sled_id}: added {added} disks, removed {removed} disks") + Self::UpdateDisks { sled_id, added, updated, removed } => { + write!(f, "sled {sled_id}: added {added} disks, updated {updated}, removed {removed} disks") } - Self::UpdateDatasets { sled_id, added, removed } => { - write!(f, "sled {sled_id}: added {added} datasets, removed {removed} datasets") + Self::UpdateDatasets { sled_id, added, updated, removed } => { + write!(f, "sled {sled_id}: added {added} datasets, updated: {updated}, removed {removed} datasets") } Self::ZoneExpunged { sled_id, reason, count } => { let reason = match reason { @@ -628,222 +645,100 @@ impl<'a> BlueprintBuilder<'a> { !removals.contains(&PhysicalDiskUuid::from_untyped_uuid(config.id)) }); - Ok(EnsureMultiple::Changed { added, removed }) + Ok(EnsureMultiple::Changed { added, updated: 0, removed }) } - /// Ensures that a zpool has the following datasets, as recorded in the - /// blueprint: - /// - Debug - /// - Zone Root + /// Ensures that a sled in the blueprint has all the datasets it should. /// - /// If these datasets do not exist: - /// - We return them from "database_datasets", if they exist there - /// - Otherwise, we create them + /// We perform the following process to decide what datasets should exist + /// in the blueprint during the planning phase: /// - /// This function returns all new datasets that should be added to the - /// blueprint. - pub fn zpool_ensure_fundamental_datasets( - zpool_id: ZpoolUuid, - blueprint_datasets: &Vec<&BlueprintDatasetConfig>, - database_datasets: &Vec, - ) -> BTreeMap { - let mut new_datasets = BTreeMap::new(); - let mut bp_already_has_debug = false; - let mut bp_already_has_zone_root = false; - for dataset in blueprint_datasets { - match dataset.kind { - DatasetKind::Debug => bp_already_has_debug = true, - DatasetKind::ZoneRoot => bp_already_has_zone_root = true, - _ => (), - } - } - - let mut db_debug = None; - let mut db_zone_root = None; - for dataset in database_datasets { - match dataset.name.dataset() { - DatasetKind::Debug => db_debug = Some(dataset), - DatasetKind::ZoneRoot => db_zone_root = Some(dataset), - _ => (), - } - }; - - if !bp_already_has_debug { - if let Some(db_debug) = db_debug { - new_datasets.insert(db_debug.id, (db_debug.name.pool().id(), db_debug.clone())); - } else { - let id = DatasetUuid::new_v4(); - new_datasets.insert( - id, - ( - zpool_id, - DatasetConfig { - id, - name: DatasetName::new(ZpoolName::new_external( - zpool_id, - ), DatasetKind::Debug), - quota: Some(100 * (1 << 30)), - reservation: None, - compression: None, - } - ) - ); - } - } - - if !bp_already_has_zone_root { - if let Some(db_zone_root) = db_zone_root { - new_datasets.insert(db_zone_root.id, (db_zone_root.name.pool().id(), db_zone_root.clone())); - } else { - let id = DatasetUuid::new_v4(); - new_datasets.insert( - id, - ( - zpool_id, - DatasetConfig { - id, - name: DatasetName::new(ZpoolName::new_external( - zpool_id, - ), DatasetKind::ZoneRoot), - quota: None, - reservation: None, - compression: None, - } - ) - ); - } - } - new_datasets - } - + /// INPUT | OUTPUT + /// ---------------------------------------------------------------------- + /// zpools in the blueprint | blueprint datasets for debug, root filesystem + /// | (All zpools should have these datasets) + /// ---------------------------------------------------------------------- + /// zones in the blueprint | blueprint datasets for filesystems, durable data + /// | (These datasets are needed for zones) + /// ---------------------------------------------------------------------- + /// discretionary datasets | blueprint datasets for discretionary datasets + /// NOTE: These don't exist, | + /// at the moment | + /// ---------------------------------------------------------------------- + /// + /// From this process, we should be able to construct "all datasets that + /// should exist in the new blueprint". + /// + /// - If new datasets are proposed, they are added + /// - If datasets are changed, they are updated + /// - If datasets are not proposed, but they exist in the parent blueprint, + /// they are removed. pub fn sled_ensure_datasets( &mut self, sled_id: SledUuid, resources: &SledResources, ) -> Result { - let (additions, removals) = { - // All blueprint datasets, known to this blueprint or the last. - // - // Indexed by dataset ID. - let blueprint_datasets: BTreeMap<_, _> = self - .datasets - .current_sled_datasets(sled_id) - .map(|dataset| { - (dataset.id, dataset) - }) - .collect(); - // Blueprint datasets, indexed by zpool ID. - let mut blueprint_datasets_by_zpool = BTreeMap::<_, Vec<&BlueprintDatasetConfig>>::new(); - for dataset in blueprint_datasets.values() { - blueprint_datasets_by_zpool.entry(dataset.pool.id()) - .and_modify(|values: &mut Vec<_>| values.push(dataset)) - .or_insert_with(|| vec![dataset]); - } - // All blueprint zpools, regardless of whether or not they - // currently contain datasets or not. - let blueprint_zpools: BTreeSet<_> = self - .disks - .current_sled_disks(sled_id) - .map(|disk| disk.pool_id) - .collect(); - - // All DB datasets, indexed by zpool ID - let database_datasets_by_zpool: BTreeMap<_, &Vec<_>> = resources - .all_datasets(ZpoolFilter::InService) - .collect(); - // All DB datasets, indexed by dataset ID - let database_datasets: BTreeMap<_, _> = database_datasets_by_zpool - .clone() - .into_iter() - .flat_map(|(zpool, datasets)| { - let zpool = *zpool; - datasets.iter().map(move |dataset| { - (dataset.id, (zpool, dataset)) - }) - }) - .collect(); + let (mut additions, mut updates, removals) = { + let mut datasets_builder = BlueprintSledDatasetsBuilder::new( + sled_id, + &self.datasets, + resources, + ); - // New datasets which we plan on adding to the blueprint. - // - // During execution, datasets added to the blueprint will be added - // into the DB, if they don't already exist there. - let mut new_datasets = BTreeMap::new(); - - // Datasets that should exist on every zpool. - // - // Ensure these exist in the blueprint, but check for them in the DB - // before deciding to make new datasets. - for zpool_id in &blueprint_zpools { - let bp = blueprint_datasets_by_zpool.get(zpool_id) - .map(Cow::Borrowed) - .unwrap_or_else(|| Cow::Owned(vec![])); - let db = database_datasets_by_zpool.get(zpool_id) - .map(|v| Cow::Borrowed(*v)) - .unwrap_or_else(|| Cow::Owned(vec![])); - - let mut added_datasets = Self::zpool_ensure_fundamental_datasets(*zpool_id, &bp, &db); - new_datasets.append( - &mut added_datasets + // Ensure each zpool has a "Debug" and "Zone Root" dataset. + let bp_zpools = + datasets_builder.all_bp_zpools().collect::>(); + for zpool_id in bp_zpools { + let zpool = ZpoolName::new_external(zpool_id); + datasets_builder.ensure( + DatasetName::new(zpool.clone(), DatasetKind::Debug), + Some(ByteCount::from_gibibytes_u32(100)), + None, + None, + ); + datasets_builder.ensure( + DatasetName::new(zpool, DatasetKind::ZoneRoot), + None, + None, + None, ); } - // Datasets that should exist because our zones need them + // Ensure that datasets needed for zones exist. for (zone, _zone_state) in self.zones.current_sled_zones(sled_id) { - if !zone.disposition.matches(BlueprintZoneFilter::ShouldBeRunning) { + if !zone + .disposition + .matches(BlueprintZoneFilter::ShouldBeRunning) + { continue; } - // TODO: check if the dataset(s) already exist? - // Dataset for transient zone filesystem if let Some(fs_zpool) = &zone.filesystem_pool { let name = format!( "oxp_{}_{}", - zone.zone_type.kind().zone_prefix(), zone.id, - ); - let dataset_kind = DatasetKind::Zone { name }; - let dataset_name = DatasetName::new( - fs_zpool.clone(), - dataset_kind, + zone.zone_type.kind().zone_prefix(), + zone.id, ); - - let id = DatasetUuid::new_v4(); - new_datasets.insert( - id, - ( - fs_zpool.id(), - DatasetConfig { - id, - name: dataset_name, - quota: None, - reservation: None, - compression: None, - } - ) + datasets_builder.ensure( + DatasetName::new( + fs_zpool.clone(), + DatasetKind::Zone { name }, + ), + None, + None, + None, ); } // Dataset for durable dataset co-located with zone if let Some(dataset) = zone.zone_type.durable_dataset() { let zpool = &dataset.dataset.pool_name; - let dataset_name = DatasetName::new( - zpool.clone(), - dataset.kind, - ); - - let id = DatasetUuid::new_v4(); - new_datasets.insert( - id, - ( - zpool.id(), - DatasetConfig { - id, - name: dataset_name, - quota: None, - reservation: None, - compression: None, - } - ) + datasets_builder.ensure( + DatasetName::new(zpool.clone(), dataset.kind), + None, + None, + None, ); } } @@ -851,50 +746,53 @@ impl<'a> BlueprintBuilder<'a> { // TODO: Note that we also have datasets in "zone/" for propolis // zones, but these are not currently being tracked by blueprints. - // TODO: Ensure zone datasets exist too // TODO: upsert dataset records during execution // NOTE: we add dataset records for durable datasets during // the execution phase? need a different addition/removal criteria - // TODO: For each in-service disk, ensure that we add zone root + debug - // TODO: Iterate over all zones, ensure they have the zones needed - // (transient + durable) - - // Add any disks that appear in the database, but not the blueprint - let additions = database_datasets - .iter() - .filter_map(|(dataset_id, (zpool, dataset))| { - if !blueprint_datasets.contains_key(dataset_id) { - Some(BlueprintDatasetConfig { - id: *dataset_id, - pool: ZpoolName::new_external(*zpool), - kind: dataset.name.dataset().clone(), - quota: dataset.quota.map(|q| q.try_into().unwrap()), - reservation: dataset.reservation.map(|r| r.try_into().unwrap()), - compression: dataset.compression.clone(), - }) - } else { - None - } + let removals = datasets_builder.get_unused_datasets(); + + let additions = datasets_builder + .new_datasets + .into_values() + .flat_map(|datasets| datasets.into_values()) + .collect::>(); + let updates = datasets_builder + .updated_datasets + .into_values() + .flat_map(|datasets| { + datasets.into_values().map(|dataset| (dataset.id, dataset)) }) - .collect::>(); + .collect::>(); + (additions, updates, removals) + }; - // Remove any datasets that appear in the blueprint, but not the database - let removals: HashSet = blueprint_datasets - .keys() - .filter_map(|dataset_id| { - if !database_datasets.contains_key(dataset_id) { - Some(*dataset_id) - } else { - None - } - }) - .collect(); + if additions.is_empty() && updates.is_empty() && removals.is_empty() { + return Ok(EnsureMultiple::NotNeeded); + } + let added = additions.len(); + let updated = updates.len(); + let removed = removals.len(); - (additions, removals) - }; + let datasets = + &mut self.datasets.change_sled_datasets(sled_id).datasets; + + // Apply updates & removals in the same iteration + datasets.retain_mut(|config| { + if let Some(new_config) = updates.remove(&config.id) { + *config = new_config; + }; + + !removals.contains(&config.id) + }); + // Add all new datasets afterwards + datasets.append(&mut additions); - todo!(); + // Ensure that regardless of our implementation, the output dataset + // order is idempotent. + datasets.sort_by(|a, b| a.id.cmp(&b.id)); + + Ok(EnsureMultiple::Changed { added, updated, removed }) } pub fn sled_ensure_zone_ntp( @@ -1145,7 +1043,11 @@ impl<'a> BlueprintBuilder<'a> { self.sled_add_zone(sled_id, zone)?; } - Ok(EnsureMultiple::Changed { added: num_nexus_to_add, removed: 0 }) + Ok(EnsureMultiple::Changed { + added: num_nexus_to_add, + updated: 0, + removed: 0, + }) } pub fn cockroachdb_preserve_downgrade( @@ -1201,7 +1103,11 @@ impl<'a> BlueprintBuilder<'a> { self.sled_add_zone(sled_id, zone)?; } - Ok(EnsureMultiple::Changed { added: num_crdb_to_add, removed: 0 }) + Ok(EnsureMultiple::Changed { + added: num_crdb_to_add, + updated: 0, + removed: 0, + }) } fn sled_add_zone( @@ -1569,7 +1475,8 @@ impl<'a> BlueprintDatasetsBuilder<'a> { sled_id: SledUuid, ) -> &mut BlueprintDatasetsConfig { self.changed_datasets.entry(sled_id).or_insert_with(|| { - if let Some(old_sled_datasets) = self.parent_datasets.get(&sled_id) { + if let Some(old_sled_datasets) = self.parent_datasets.get(&sled_id) + { BlueprintDatasetsConfig { generation: old_sled_datasets.generation.next(), datasets: old_sled_datasets.datasets.clone(), @@ -1630,6 +1537,195 @@ impl<'a> BlueprintDatasetsBuilder<'a> { } } +/// Helper for working with sets of datasets on a single sled +struct BlueprintSledDatasetsBuilder<'a> { + blueprint_datasets: + BTreeMap>, + database_datasets: + BTreeMap>, + + new_datasets: + BTreeMap>, + updated_datasets: + BTreeMap>, +} + +impl<'a> BlueprintSledDatasetsBuilder<'a> { + pub fn new( + sled_id: SledUuid, + datasets: &'a BlueprintDatasetsBuilder<'_>, + resources: &'a SledResources, + ) -> Self { + // Gather all datasets known to the blueprint + let mut blueprint_datasets = BTreeMap::new(); + for dataset in datasets.current_sled_datasets(sled_id) { + blueprint_datasets + .entry(dataset.pool.id()) + .and_modify(|values: &mut BTreeMap<_, _>| { + values.insert(dataset.kind.clone(), dataset); + }) + .or_insert_with(|| { + BTreeMap::from([(dataset.kind.clone(), dataset)]) + }); + } + + // Gather all datasets known to the database + let mut database_datasets = BTreeMap::new(); + for (zpool, datasets) in resources.all_datasets(ZpoolFilter::InService) + { + let datasets_by_kind = datasets + .into_iter() + .map(|dataset| (dataset.name.dataset().clone(), dataset)) + .collect(); + + database_datasets.insert(*zpool, datasets_by_kind); + } + + Self { + blueprint_datasets, + database_datasets, + new_datasets: BTreeMap::new(), + updated_datasets: BTreeMap::new(), + } + } + + /// Attempts to add a dataset to the builder. + /// + /// - If the dataset exists in the blueprint already, use it + /// - Otherwise, if the dataset exists in the database, re-use + /// the UUID, but add it to the blueprint + /// - Otherwse, create a new dataset in both the database + /// and the blueprint + pub fn ensure( + &mut self, + dataset: DatasetName, + quota: Option, + reservation: Option, + compression: Option, + ) { + let zpool = dataset.pool(); + let zpool_id = zpool.id(); + let kind = dataset.dataset(); + + let make_config = |id: DatasetUuid| BlueprintDatasetConfig { + id, + pool: zpool.clone(), + kind: kind.clone(), + quota, + reservation, + compression, + }; + + // This dataset already exists in the blueprint + if let Some(old_config) = self.get_from_bp(zpool_id, kind) { + let new_config = make_config(old_config.id); + + // If it needs updating, add it + if *old_config != new_config { + self.updated_datasets + .entry(zpool_id) + .and_modify(|values: &mut BTreeMap<_, _>| { + values.insert( + new_config.kind.clone(), + new_config.clone(), + ); + }) + .or_insert_with(|| { + BTreeMap::from([(new_config.kind.clone(), new_config)]) + }); + } + return; + } + + // If the dataset exists in the datastore, re-use the UUID. + let id = if let Some(old_config) = self.get_from_db(zpool_id, kind) { + old_config.id + } else { + DatasetUuid::new_v4() + }; + + let new_config = make_config(id); + self.new_datasets + .entry(zpool_id) + .and_modify(|values: &mut BTreeMap<_, _>| { + values.insert(new_config.kind.clone(), new_config.clone()); + }) + .or_insert_with(|| { + BTreeMap::from([(new_config.kind.clone(), new_config)]) + }); + } + + /// Returns all datasets in the old blueprint that are not planned to be + /// part of the new blueprint. + pub fn get_unused_datasets(&self) -> BTreeSet { + let dataset_exists_in = + |group: &BTreeMap< + ZpoolUuid, + BTreeMap, + >, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid| { + let Some(datasets) = group.get(&zpool_id) else { + return false; + }; + + for (_, dataset_config) in datasets { + if dataset_config.id == dataset_id { + return true; + } + } + return false; + }; + + let mut removals = BTreeSet::new(); + + for (zpool_id, datasets) in &self.blueprint_datasets { + for (_dataset_kind, dataset_config) in datasets { + let dataset_id = dataset_config.id; + if !dataset_exists_in(&self.new_datasets, *zpool_id, dataset_id) + && !dataset_exists_in( + &self.updated_datasets, + *zpool_id, + dataset_id, + ) + { + removals.insert(dataset_id); + } + } + } + + removals + } + + pub fn all_bp_zpools(&self) -> impl Iterator + '_ { + self.blueprint_datasets.keys().map(|id| *id) + } + + fn get_from_bp( + &self, + zpool: ZpoolUuid, + kind: &DatasetKind, + ) -> Option<&'a BlueprintDatasetConfig> { + self.blueprint_datasets + .get(&zpool) + .map(|datasets| datasets.get(kind)) + .flatten() + .copied() + } + + fn get_from_db( + &self, + zpool: ZpoolUuid, + kind: &DatasetKind, + ) -> Option<&'a DatasetConfig> { + self.database_datasets + .get(&zpool) + .map(|datasets| datasets.get(kind)) + .flatten() + .copied() + } +} + #[cfg(test)] pub mod test { use super::*; @@ -2011,7 +2107,11 @@ pub mod test { builder .sled_ensure_disks(sled_id, &sled_resources) .unwrap(), - EnsureMultiple::Changed { added: 10, removed: 0 }, + EnsureMultiple::Changed { + added: 10, + updated: 0, + removed: 0 + }, ); } @@ -2184,7 +2284,10 @@ pub mod test { .sled_ensure_zone_multiple_nexus(sled_id, 1) .expect("failed to ensure nexus zone"); - assert_eq!(added, EnsureMultiple::Changed { added: 1, removed: 0 }); + assert_eq!( + added, + EnsureMultiple::Changed { added: 1, updated: 0, removed: 0 } + ); } { @@ -2202,7 +2305,10 @@ pub mod test { .sled_ensure_zone_multiple_nexus(sled_id, 3) .expect("failed to ensure nexus zone"); - assert_eq!(added, EnsureMultiple::Changed { added: 3, removed: 0 }); + assert_eq!( + added, + EnsureMultiple::Changed { added: 3, updated: 0, removed: 0 } + ); } { @@ -2458,7 +2564,11 @@ pub mod test { .expect("ensured multiple CRDB zones"); assert_eq!( ensure_result, - EnsureMultiple::Changed { added: num_sled_zpools, removed: 0 } + EnsureMultiple::Changed { + added: num_sled_zpools, + updated: 0, + removed: 0 + } ); let blueprint = builder.build(); diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs b/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs index c707702be4..32ab345c22 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs @@ -254,7 +254,7 @@ mod tests { state: PhysicalDiskState::Active, }, // Datasets: Leave empty - vec![] + vec![], ), )]), }, diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index e656efdb0f..02e0ca461e 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -230,7 +230,7 @@ impl<'a> Planner<'a> { { // First, we need to ensure that sleds are using their expected // disks. This is necessary before we can allocate any zones. - if let EnsureMultiple::Changed { added, removed } = + if let EnsureMultiple::Changed { added, updated, removed } = self.blueprint.sled_ensure_disks(sled_id, &sled_resources)? { info!( @@ -241,6 +241,7 @@ impl<'a> Planner<'a> { self.blueprint.record_operation(Operation::UpdateDisks { sled_id, added, + updated, removed, }); @@ -358,9 +359,12 @@ impl<'a> Planner<'a> { } fn do_plan_datasets(&mut self) -> Result<(), Error> { - for (sled_id, sled_resources) in self.input.all_sled_resources(SledFilter::InService) { - if let EnsureMultiple::Changed { added, removed } = - self.blueprint.sled_ensure_datasets(sled_id, &sled_resources)? { + for (sled_id, sled_resources) in + self.input.all_sled_resources(SledFilter::InService) + { + if let EnsureMultiple::Changed { added, updated, removed } = + self.blueprint.sled_ensure_datasets(sled_id, &sled_resources)? + { info!( &self.log, "altered datasets"; @@ -369,6 +373,7 @@ impl<'a> Planner<'a> { self.blueprint.record_operation(Operation::UpdateDatasets { sled_id, added, + updated, removed, }); } @@ -549,7 +554,7 @@ impl<'a> Planner<'a> { } }; match result { - EnsureMultiple::Changed { added, removed: _ } => { + EnsureMultiple::Changed { added, updated: _, removed: _ } => { info!( self.log, "will add {added} Nexus zone(s) to sled"; "sled_id" => %sled_id, @@ -1207,20 +1212,13 @@ mod test { for _ in 0..NEW_IN_SERVICE_DISKS { sled_details.resources.zpools.insert( ZpoolUuid::from(zpool_rng.next()), - ( - new_sled_disk(PhysicalDiskPolicy::InService), - vec![], - ) - + (new_sled_disk(PhysicalDiskPolicy::InService), vec![]), ); } for _ in 0..NEW_EXPUNGED_DISKS { sled_details.resources.zpools.insert( ZpoolUuid::from(zpool_rng.next()), - ( - new_sled_disk(PhysicalDiskPolicy::Expunged), - vec![], - ) + (new_sled_disk(PhysicalDiskPolicy::Expunged), vec![]), ); } diff --git a/nexus/reconfigurator/preparation/src/lib.rs b/nexus/reconfigurator/preparation/src/lib.rs index 188ded9406..1042f9bf2c 100644 --- a/nexus/reconfigurator/preparation/src/lib.rs +++ b/nexus/reconfigurator/preparation/src/lib.rs @@ -91,7 +91,9 @@ impl PlanningInputFromDb<'_> { let mut zpools_by_sled_id = { // Gather all the datasets first, by Zpool ID - let mut datasets: Vec<_> = self.dataset_rows.iter() + let mut datasets: Vec<_> = self + .dataset_rows + .iter() .map(|dataset| { ( ZpoolUuid::from_untyped_uuid(dataset.pool_id), @@ -102,7 +104,8 @@ impl PlanningInputFromDb<'_> { datasets.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); let mut datasets_by_zpool: BTreeMap<_, Vec<_>> = BTreeMap::new(); for (zpool_id, dataset) in datasets { - datasets_by_zpool.entry(zpool_id) + datasets_by_zpool + .entry(zpool_id) .or_default() .push(DatasetConfig::try_from(dataset)?); } @@ -124,7 +127,9 @@ impl PlanningInputFromDb<'_> { state: disk.disk_state.into(), }; - let datasets = datasets_by_zpool.remove(&zpool_id).unwrap_or_else(|| vec![]); + let datasets = datasets_by_zpool + .remove(&zpool_id) + .unwrap_or_else(|| vec![]); sled_zpool_names.insert(zpool_id, (disk, datasets)); } zpools diff --git a/nexus/src/app/background/tasks/blueprint_execution.rs b/nexus/src/app/background/tasks/blueprint_execution.rs index d1769f2765..65eae9d792 100644 --- a/nexus/src/app/background/tasks/blueprint_execution.rs +++ b/nexus/src/app/background/tasks/blueprint_execution.rs @@ -128,9 +128,10 @@ mod test { use nexus_test_utils_macros::nexus_test; use nexus_types::deployment::BlueprintZoneFilter; use nexus_types::deployment::{ - blueprint_zone_type, Blueprint, BlueprintDatasetsConfig, BlueprintPhysicalDisksConfig, - BlueprintTarget, BlueprintZoneConfig, BlueprintZoneDisposition, - BlueprintZoneType, BlueprintZonesConfig, CockroachDbPreserveDowngrade, + blueprint_zone_type, Blueprint, BlueprintDatasetsConfig, + BlueprintPhysicalDisksConfig, BlueprintTarget, BlueprintZoneConfig, + BlueprintZoneDisposition, BlueprintZoneType, BlueprintZonesConfig, + CockroachDbPreserveDowngrade, }; use nexus_types::external_api::views::SledState; use omicron_common::api::external::Generation; diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 96e12dc5c3..0039a79133 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -29,8 +29,8 @@ use omicron_common::api::internal::shared::DatasetKind; use omicron_common::disk::DiskIdentity; use omicron_common::disk::OmicronPhysicalDisksConfig; use omicron_uuid_kinds::CollectionUuid; -use omicron_uuid_kinds::ExternalIpUuid; use omicron_uuid_kinds::DatasetUuid; +use omicron_uuid_kinds::ExternalIpUuid; use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::SledUuid; use schemars::JsonSchema; diff --git a/nexus/types/src/deployment/planning_input.rs b/nexus/types/src/deployment/planning_input.rs index 389845f9df..7f6a2e0b6a 100644 --- a/nexus/types/src/deployment/planning_input.rs +++ b/nexus/types/src/deployment/planning_input.rs @@ -466,7 +466,9 @@ pub struct SledResources { impl SledResources { /// Returns if the zpool is provisionable (known, in-service, and active). pub fn zpool_is_provisionable(&self, zpool: &ZpoolUuid) -> bool { - let Some((disk, _datasets)) = self.zpools.get(zpool) else { return false }; + let Some((disk, _datasets)) = self.zpools.get(zpool) else { + return false; + }; disk.provisionable() } From 37b490772b8f0bb9d3059456e59717aa576d01d4 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 5 Aug 2024 13:23:38 -0700 Subject: [PATCH 17/84] deploy_datasets --- clients/sled-agent-client/src/lib.rs | 1 + .../reconfigurator/execution/src/datasets.rs | 90 +++++++++++++++++++ nexus/reconfigurator/execution/src/lib.rs | 7 ++ .../execution/src/omicron_physical_disks.rs | 4 +- .../planning/src/blueprint_builder/builder.rs | 9 +- nexus/reconfigurator/planning/src/planner.rs | 14 +-- nexus/types/src/deployment.rs | 24 +++++ sled-agent/src/rack_setup/plan/service.rs | 2 +- sled-agent/src/rack_setup/service.rs | 9 +- sled-agent/src/sim/server.rs | 2 +- sled-agent/src/sim/sled_agent.rs | 4 +- 11 files changed, 141 insertions(+), 25 deletions(-) diff --git a/clients/sled-agent-client/src/lib.rs b/clients/sled-agent-client/src/lib.rs index 073cb9cfeb..94fa130ec7 100644 --- a/clients/sled-agent-client/src/lib.rs +++ b/clients/sled-agent-client/src/lib.rs @@ -38,6 +38,7 @@ progenitor::generate_api!( replace = { Baseboard = nexus_sled_agent_shared::inventory::Baseboard, ByteCount = omicron_common::api::external::ByteCount, + DatasetsConfig = omicron_common::disk::DatasetsConfig, DiskIdentity = omicron_common::disk::DiskIdentity, DiskVariant = omicron_common::disk::DiskVariant, Generation = omicron_common::api::external::Generation, diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index 003861519e..1bd3734afd 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -4,19 +4,109 @@ //! Ensures dataset records required by a given blueprint +use crate::Sled; + +use anyhow::anyhow; use anyhow::Context; +use futures::stream; +use futures::StreamExt; use nexus_db_model::Dataset; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; +use nexus_types::deployment::BlueprintDatasetsConfig; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::DurableDataset; use nexus_types::identity::Asset; +use omicron_common::disk::DatasetsConfig; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::SledUuid; use slog::info; +use slog::o; use slog::warn; +use std::collections::BTreeMap; use std::collections::BTreeSet; +/// Idempotently ensures that the specified datasets are deployed to the +/// corresponding sleds +pub(crate) async fn deploy_datasets( + opctx: &OpContext, + sleds_by_id: &BTreeMap, + sled_configs: &BTreeMap, +) -> Result<(), Vec> { + let errors: Vec<_> = stream::iter(sled_configs) + .filter_map(|(sled_id, config)| async move { + let log = opctx.log.new(o!( + "sled_id" => sled_id.to_string(), + "generation" => config.generation.to_string(), + )); + + let db_sled = match sleds_by_id.get(&sled_id) { + Some(sled) => sled, + None => { + let err = anyhow!("sled not found in db list: {}", sled_id); + warn!(log, "{err:#}"); + return Some(err); + } + }; + + let client = nexus_networking::sled_client_from_address( + sled_id.into_untyped_uuid(), + db_sled.sled_agent_address, + &log, + ); + let config: DatasetsConfig = config.clone().into(); + let result = + client.datasets_put(&config).await.with_context( + || format!("Failed to put {config:#?} to sled {sled_id}"), + ); + match result { + Err(error) => { + warn!(log, "{error:#}"); + Some(error) + } + Ok(result) => { + let (errs, successes): (Vec<_>, Vec<_>) = result + .into_inner() + .status + .into_iter() + .partition(|status| status.err.is_some()); + + if !errs.is_empty() { + warn!( + log, + "Failed to deploy datasets for sled agent"; + "successfully configured datasets" => successes.len(), + "failed dataset configurations" => errs.len(), + ); + for err in &errs { + warn!(log, "{err:?}"); + } + return Some(anyhow!( + "failure deploying datasets: {:?}", + errs + )); + } + + info!( + log, + "Successfully deployed datasets for sled agent"; + "successfully configured datasets" => successes.len(), + ); + None + } + } + }) + .collect() + .await; + + if errors.is_empty() { + Ok(()) + } else { + Err(errors) + } +} + /// For each zone in `all_omicron_zones` that has an associated durable dataset, /// ensure that a corresponding dataset record exists in `datastore`. /// diff --git a/nexus/reconfigurator/execution/src/lib.rs b/nexus/reconfigurator/execution/src/lib.rs index e3d2019230..c2e5a84efe 100644 --- a/nexus/reconfigurator/execution/src/lib.rs +++ b/nexus/reconfigurator/execution/src/lib.rs @@ -161,6 +161,13 @@ where ) .await?; + datasets::deploy_datasets( + &opctx, + &sleds_by_id, + &blueprint.blueprint_datasets, + ) + .await?; + omicron_zones::deploy_zones( &opctx, &sleds_by_id, diff --git a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs index 73dacf1c91..7dd618ae81 100644 --- a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs +++ b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs @@ -66,7 +66,7 @@ pub(crate) async fn deploy_disks( if !errs.is_empty() { warn!( log, - "Failed to deploy storage for sled agent"; + "Failed to deploy physical disk for sled agent"; "successfully configured disks" => successes.len(), "failed disk configurations" => errs.len(), ); @@ -81,7 +81,7 @@ pub(crate) async fn deploy_disks( info!( log, - "Successfully deployed storage for sled agent"; + "Successfully deployed physical disks for sled agent"; "successfully configured disks" => successes.len(), ); None diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index e9ae065ac2..f1aaecb562 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -669,8 +669,8 @@ impl<'a> BlueprintBuilder<'a> { /// From this process, we should be able to construct "all datasets that /// should exist in the new blueprint". /// - /// - If new datasets are proposed, they are added - /// - If datasets are changed, they are updated + /// - If new datasets are proposed, they are added to the blueprint. + /// - If datasets are changed, they are updated in the blueprint. /// - If datasets are not proposed, but they exist in the parent blueprint, /// they are removed. pub fn sled_ensure_datasets( @@ -788,9 +788,8 @@ impl<'a> BlueprintBuilder<'a> { // Add all new datasets afterwards datasets.append(&mut additions); - // Ensure that regardless of our implementation, the output dataset - // order is idempotent. - datasets.sort_by(|a, b| a.id.cmp(&b.id)); + // We sort in the call to "BlueprintDatasetsBuilder::into_datasets_map", + // so we don't need to sort "datasets" now. Ok(EnsureMultiple::Changed { added, updated, removed }) } diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 02e0ca461e..7420bf4a89 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -250,15 +250,6 @@ impl<'a> Planner<'a> { // we send this request first. } - // TODO: Ensure the "baseline" datasets exist (zone root, debug) - // TODO: Ensure all durable datasets exist (with zone allocation) - // TODO: Ensure all transient datasets exist (with zone allocation) - // - // NOTE: Make sure this works even if the zone was already - // provisioned? - // - // TODO: Ensure that all these datasets get deleted eventually? - // Check for an NTP zone. Every sled should have one. If it's not // there, all we can do is provision that one zone. We have to wait // for that to succeed and synchronize the clock before we can @@ -368,7 +359,10 @@ impl<'a> Planner<'a> { info!( &self.log, "altered datasets"; - "sled_id" => %sled_id + "sled_id" => %sled_id, + "added" => added, + "updated" => updated, + "removed" => removed, ); self.blueprint.record_operation(Operation::UpdateDatasets { sled_id, diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 0039a79133..3e8da87c56 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -26,6 +26,9 @@ use nexus_sled_agent_shared::inventory::ZoneKind; use omicron_common::api::external::ByteCount; use omicron_common::api::external::Generation; use omicron_common::api::internal::shared::DatasetKind; +use omicron_common::disk::DatasetConfig; +use omicron_common::disk::DatasetName; +use omicron_common::disk::DatasetsConfig; use omicron_common::disk::DiskIdentity; use omicron_common::disk::OmicronPhysicalDisksConfig; use omicron_uuid_kinds::CollectionUuid; @@ -935,6 +938,15 @@ pub struct BlueprintDatasetsConfig { pub datasets: Vec, } +impl From for DatasetsConfig { + fn from(config: BlueprintDatasetsConfig) -> Self { + Self { + generation: config.generation, + datasets: config.datasets.into_iter().map(From::from).collect(), + } + } +} + /// Information about a dataset as recorded in a blueprint #[derive(Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize)] pub struct BlueprintDatasetConfig { @@ -946,6 +958,18 @@ pub struct BlueprintDatasetConfig { pub compression: Option, } +impl From for DatasetConfig { + fn from(config: BlueprintDatasetConfig) -> Self { + Self { + id: config.id, + name: DatasetName::new(config.pool, config.kind), + quota: config.quota.map(|q| q.to_bytes()), + reservation: config.reservation.map(|r| r.to_bytes()), + compression: config.compression, + } + } +} + /// Describe high-level metadata about a blueprint // These fields are a subset of [`Blueprint`], and include only the data we can // quickly fetch from the main blueprint table (e.g., when listing all diff --git a/sled-agent/src/rack_setup/plan/service.rs b/sled-agent/src/rack_setup/plan/service.rs index e724bec8a5..c67166d76e 100644 --- a/sled-agent/src/rack_setup/plan/service.rs +++ b/sled-agent/src/rack_setup/plan/service.rs @@ -28,7 +28,7 @@ use omicron_common::backoff::{ retry_notify_ext, retry_policy_internal_service_aggressive, BackoffError, }; use omicron_common::disk::{ - DatasetsConfig, DatasetKind, DatasetName, DiskVariant, + DatasetKind, DatasetName, DatasetsConfig, DiskVariant, OmicronPhysicalDiskConfig, OmicronPhysicalDisksConfig, }; use omicron_common::ledger::{self, Ledger, Ledgerable}; diff --git a/sled-agent/src/rack_setup/service.rs b/sled-agent/src/rack_setup/service.rs index 7a1d1f675a..2da98e736d 100644 --- a/sled-agent/src/rack_setup/service.rs +++ b/sled-agent/src/rack_setup/service.rs @@ -92,8 +92,9 @@ use nexus_sled_agent_shared::inventory::{ OmicronZoneConfig, OmicronZoneType, OmicronZonesConfig, }; use nexus_types::deployment::{ - Blueprint, BlueprintDatasetConfig, BlueprintDatasetsConfig, BlueprintPhysicalDisksConfig, - BlueprintZoneConfig, BlueprintZoneDisposition, BlueprintZonesConfig, + Blueprint, BlueprintDatasetConfig, BlueprintDatasetsConfig, + BlueprintPhysicalDisksConfig, BlueprintZoneConfig, + BlueprintZoneDisposition, BlueprintZonesConfig, CockroachDbPreserveDowngrade, InvalidOmicronZoneType, }; use nexus_types::external_api::views::SledState; @@ -1429,7 +1430,9 @@ pub(crate) fn build_initial_blueprint_from_sled_configs( kind: d.name.dataset().clone(), compression: d.compression.clone(), quota: d.quota.map(|q| ByteCount::try_from(q).unwrap()), - reservation: d.reservation.map(|r| ByteCount::try_from(r).unwrap()), + reservation: d + .reservation + .map(|r| ByteCount::try_from(r).unwrap()), }) .collect(), }, diff --git a/sled-agent/src/sim/server.rs b/sled-agent/src/sim/server.rs index 98dbe58725..b4016b0404 100644 --- a/sled-agent/src/sim/server.rs +++ b/sled-agent/src/sim/server.rs @@ -525,7 +525,7 @@ pub async fn run_standalone_server( SledConfig { disks: server.sled_agent.omicron_physical_disks_list().await?, datasets: server.sled_agent.datasets_list().await?, - zones + zones, }, ); diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index c3839e7ac5..c766f9d109 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -893,9 +893,7 @@ impl SledAgent { }) } - pub async fn datasets_list( - &self, - ) -> Result { + pub async fn datasets_list(&self) -> Result { todo!(); } From 3b092e74e26c9ad0d0cd0c8efdfc1cc5ef27b2a1 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 5 Aug 2024 18:20:16 -0700 Subject: [PATCH 18/84] Using disposition to try to delete datasets --- nexus/db-model/src/deployment.rs | 106 ++++++++++++- nexus/db-model/src/schema.rs | 4 + nexus/db-queries/src/db/datastore/dataset.rs | 32 ++++ .../db-queries/src/db/datastore/deployment.rs | 67 +++++++++ .../reconfigurator/execution/src/datasets.rs | 141 +++++++++++------- nexus/reconfigurator/execution/src/lib.rs | 4 +- .../planning/src/blueprint_builder/builder.rs | 111 ++++++++++++-- nexus/types/src/deployment.rs | 37 +++++ schema/crdb/dbinit.sql | 17 +++ sled-agent/src/rack_setup/service.rs | 48 +++--- 10 files changed, 472 insertions(+), 95 deletions(-) diff --git a/nexus/db-model/src/deployment.rs b/nexus/db-model/src/deployment.rs index ed1c6e17c6..5764671072 100644 --- a/nexus/db-model/src/deployment.rs +++ b/nexus/db-model/src/deployment.rs @@ -21,6 +21,8 @@ use crate::{ use chrono::{DateTime, Utc}; use ipnetwork::IpNetwork; use nexus_types::deployment::BlueprintDatasetConfig; +use nexus_types::deployment::BlueprintDatasetDisposition; +use nexus_types::deployment::BlueprintDatasetsConfig; use nexus_types::deployment::BlueprintPhysicalDiskConfig; use nexus_types::deployment::BlueprintPhysicalDisksConfig; use nexus_types::deployment::BlueprintTarget; @@ -34,6 +36,7 @@ use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; use omicron_uuid_kinds::{DatasetKind, ExternalIpKind, SledKind, ZpoolKind}; +use std::net::SocketAddrV6; use uuid::Uuid; /// See [`nexus_types::deployment::Blueprint`]. @@ -199,6 +202,54 @@ impl From for BlueprintPhysicalDiskConfig { } } +impl_enum_type!( + #[derive(Clone, SqlType, Debug, QueryId)] + #[diesel(postgres_type(name = "bp_dataset_disposition", schema = "public"))] + pub struct DbBpDatasetDispositionEnum; + + /// This type is not actually public, because [`BlueprintDatasetDisposition`] + /// interacts with external logic. + /// + /// However, it must be marked `pub` to avoid errors like `crate-private + /// type `BpDatasetDispositionEnum` in public interface`. Marking this type `pub`, + /// without actually making it public, tricks rustc in a desirable way. + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, PartialEq)] + #[diesel(sql_type = DbBpDatasetDispositionEnum)] + pub enum DbBpDatasetDisposition; + + // Enum values + InService => b"in_service" + Expunged => b"expunged" +); + +/// Converts a [`BlueprintDatasetDisposition`] to a version that can be inserted +/// into a database. +pub fn to_db_bp_dataset_disposition( + disposition: BlueprintDatasetDisposition, +) -> DbBpDatasetDisposition { + match disposition { + BlueprintDatasetDisposition::InService => { + DbBpDatasetDisposition::InService + } + BlueprintDatasetDisposition::Expunged => { + DbBpDatasetDisposition::Expunged + } + } +} + +impl From for BlueprintDatasetDisposition { + fn from(disposition: DbBpDatasetDisposition) -> Self { + match disposition { + DbBpDatasetDisposition::InService => { + BlueprintDatasetDisposition::InService + } + DbBpDatasetDisposition::Expunged => { + BlueprintDatasetDisposition::Expunged + } + } + } +} + #[derive(Queryable, Clone, Debug, Selectable, Insertable)] #[diesel(table_name = bp_sled_omicron_datasets)] pub struct BpSledOmicronDatasets { @@ -208,11 +259,17 @@ pub struct BpSledOmicronDatasets { } impl BpSledOmicronDatasets { - // pub fn new( - // blueprint_id: Uuid, - // sled_id: Uuid, - // ) { - // } + pub fn new( + blueprint_id: Uuid, + sled_id: SledUuid, + datasets_config: &BlueprintDatasetsConfig, + ) -> Self { + Self { + blueprint_id, + sled_id: sled_id.into(), + generation: Generation(datasets_config.generation), + } + } } /// DB representation of [BlueprintDatasetConfig] @@ -223,9 +280,13 @@ pub struct BpOmicronDataset { pub sled_id: DbTypedUuid, pub id: DbTypedUuid, + pub disposition: DbBpDatasetDisposition, + pub pool_id: DbTypedUuid, pub kind: crate::DatasetKind, zone_name: Option, + pub ip: Option, + pub port: Option, pub quota: Option, pub reservation: Option, @@ -233,14 +294,46 @@ pub struct BpOmicronDataset { } impl BpOmicronDataset { - // TODO: Needs constructor? + pub fn new( + blueprint_id: Uuid, + sled_id: SledUuid, + dataset_config: &BlueprintDatasetConfig, + ) -> Self { + Self { + blueprint_id, + sled_id: sled_id.into(), + id: dataset_config.id.into(), + disposition: to_db_bp_dataset_disposition( + dataset_config.disposition, + ), + pool_id: dataset_config.pool.id().into(), + kind: dataset_config.kind.clone().into(), + zone_name: dataset_config.kind.zone_name(), + ip: dataset_config.address.map(|addr| addr.ip().into()), + port: dataset_config.address.map(|addr| addr.port().into()), + quota: dataset_config.quota.map(|q| q.into()), + reservation: dataset_config.reservation.map(|r| r.into()), + compression: dataset_config.compression.clone(), + } + } } impl TryFrom for BlueprintDatasetConfig { type Error = anyhow::Error; fn try_from(dataset: BpOmicronDataset) -> Result { + let address = match (dataset.ip, dataset.port) { + (Some(ip), Some(port)) => { + Some(SocketAddrV6::new(ip.into(), port.into(), 0, 0)) + } + (None, None) => None, + (_, _) => anyhow::bail!( + "Either both 'ip' and 'port' should be set, or neither" + ), + }; + Ok(Self { + disposition: dataset.disposition.into(), id: dataset.id.into(), pool: omicron_common::zpool_name::ZpoolName::new_external( dataset.pool_id.into(), @@ -249,6 +342,7 @@ impl TryFrom for BlueprintDatasetConfig { dataset.kind, dataset.zone_name, )?, + address, quota: dataset.quota.map(|b| b.into()), reservation: dataset.reservation.map(|b| b.into()), compression: dataset.compression, diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index b6f8ed5063..2ac2538a7b 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1596,9 +1596,13 @@ table! { sled_id -> Uuid, id -> Uuid, + disposition -> crate::DbBpDatasetDispositionEnum, + pool_id -> Uuid, kind -> crate::DatasetKindEnum, zone_name -> Nullable, + ip -> Nullable, + port -> Nullable, quota -> Nullable, reservation -> Nullable, diff --git a/nexus/db-queries/src/db/datastore/dataset.rs b/nexus/db-queries/src/db/datastore/dataset.rs index 8a814aea80..1dfbee987a 100644 --- a/nexus/db-queries/src/db/datastore/dataset.rs +++ b/nexus/db-queries/src/db/datastore/dataset.rs @@ -27,11 +27,14 @@ use diesel::upsert::excluded; use nexus_db_model::DatasetKind; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; +use omicron_uuid_kinds::DatasetUuid; +use omicron_uuid_kinds::GenericUuid; use uuid::Uuid; impl DataStore { @@ -69,6 +72,10 @@ impl DataStore { dsl::ip.eq(excluded(dsl::ip)), dsl::port.eq(excluded(dsl::port)), dsl::kind.eq(excluded(dsl::kind)), + dsl::zone_name.eq(excluded(dsl::zone_name)), + dsl::quota.eq(excluded(dsl::quota)), + dsl::reservation.eq(excluded(dsl::reservation)), + dsl::compression.eq(excluded(dsl::compression)), )), ) .insert_and_get_result_async( @@ -183,6 +190,31 @@ impl DataStore { Ok(all_datasets) } + pub async fn dataset_delete( + &self, + opctx: &OpContext, + id: DatasetUuid, + ) -> DeleteResult { + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + + use db::schema::dataset::dsl as dataset_dsl; + let now = Utc::now(); + + let conn = &*self.pool_connection_authorized(&opctx).await?; + + let id = *id.as_untyped_uuid(); + diesel::update(dataset_dsl::dataset) + .filter(dataset_dsl::time_deleted.is_null()) + .filter(dataset_dsl::id.eq(id)) + .set(dataset_dsl::time_deleted.eq(now)) + .execute_async(conn) + .await + .map(|_rows_modified| ()) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(()) + } + pub async fn dataset_physical_disk_in_service( &self, dataset_id: Uuid, diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index 2cbf516d27..ce142e376d 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -35,6 +35,7 @@ use diesel::OptionalExtension; use diesel::QueryDsl; use diesel::RunQueryDsl; use nexus_db_model::Blueprint as DbBlueprint; +use nexus_db_model::BpOmicronDataset; use nexus_db_model::BpOmicronPhysicalDisk; use nexus_db_model::BpOmicronZone; use nexus_db_model::BpOmicronZoneNic; @@ -148,6 +149,28 @@ impl DataStore { }) }) .collect::>(); + + let sled_omicron_datasets = blueprint + .blueprint_datasets + .iter() + .map(|(sled_id, datasets_config)| { + BpSledOmicronDatasets::new( + blueprint_id, + *sled_id, + datasets_config, + ) + }) + .collect::>(); + let omicron_datasets = blueprint + .blueprint_datasets + .iter() + .flat_map(|(sled_id, datasets_config)| { + datasets_config.datasets.iter().map(move |dataset| { + BpOmicronDataset::new(blueprint_id, *sled_id, dataset) + }) + }) + .collect::>(); + let sled_omicron_zones = blueprint .blueprint_zones .iter() @@ -230,6 +253,24 @@ impl DataStore { .await?; } + // Insert all datasets for this blueprint. + + { + use db::schema::bp_sled_omicron_datasets::dsl as sled_datasets; + let _ = diesel::insert_into(sled_datasets::bp_sled_omicron_datasets) + .values(sled_omicron_datasets) + .execute_async(&conn) + .await?; + } + + { + use db::schema::bp_omicron_dataset::dsl as omicron_dataset; + let _ = diesel::insert_into(omicron_dataset::bp_omicron_dataset) + .values(omicron_datasets) + .execute_async(&conn) + .await?; + } + // Insert all the Omicron zones for this blueprint. { use db::schema::bp_sled_omicron_zones::dsl as sled_zones; @@ -707,6 +748,8 @@ impl DataStore { nsled_states, nsled_physical_disks, nphysical_disks, + nsled_datasets, + ndatasets, nsled_agent_zones, nzones, nnics, @@ -775,6 +818,26 @@ impl DataStore { .await? }; + // Remove rows associated with Omicron datasets + let nsled_datasets = { + use db::schema::bp_sled_omicron_datasets::dsl; + diesel::delete( + dsl::bp_sled_omicron_datasets + .filter(dsl::blueprint_id.eq(blueprint_id)), + ) + .execute_async(&conn) + .await? + }; + let ndatasets = { + use db::schema::bp_omicron_dataset::dsl; + diesel::delete( + dsl::bp_omicron_dataset + .filter(dsl::blueprint_id.eq(blueprint_id)), + ) + .execute_async(&conn) + .await? + }; + // Remove rows associated with Omicron zones let nsled_agent_zones = { use db::schema::bp_sled_omicron_zones::dsl; @@ -811,6 +874,8 @@ impl DataStore { nsled_states, nsled_physical_disks, nphysical_disks, + nsled_datasets, + ndatasets, nsled_agent_zones, nzones, nnics, @@ -830,6 +895,8 @@ impl DataStore { "nsled_states" => nsled_states, "nsled_physical_disks" => nsled_physical_disks, "nphysical_disks" => nphysical_disks, + "nsled_datasets" => nsled_datasets, + "ndatasets" => ndatasets, "nsled_agent_zones" => nsled_agent_zones, "nzones" => nzones, "nnics" => nnics, diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index 1bd3734afd..43c9f96c4a 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -13,19 +13,19 @@ use futures::StreamExt; use nexus_db_model::Dataset; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; +use nexus_types::deployment::BlueprintDatasetConfig; +use nexus_types::deployment::BlueprintDatasetDisposition; use nexus_types::deployment::BlueprintDatasetsConfig; -use nexus_types::deployment::BlueprintZoneConfig; -use nexus_types::deployment::DurableDataset; use nexus_types::identity::Asset; +use omicron_common::disk::DatasetConfig; use omicron_common::disk::DatasetsConfig; +use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; -use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::SledUuid; use slog::info; use slog::o; use slog::warn; use std::collections::BTreeMap; -use std::collections::BTreeSet; /// Idempotently ensures that the specified datasets are deployed to the /// corresponding sleds @@ -107,15 +107,15 @@ pub(crate) async fn deploy_datasets( } } -/// For each zone in `all_omicron_zones` that has an associated durable dataset, -/// ensure that a corresponding dataset record exists in `datastore`. +/// For all datasets we expect to see in the blueprint, ensure that a corresponding +/// database record exists in `datastore`. /// -/// Does not modify any existing dataset records. Returns the number of -/// datasets inserted. +/// Updates all existing dataset records that don't match the blueprint. +/// Returns the number of datasets inserted. pub(crate) async fn ensure_dataset_records_exist( opctx: &OpContext, datastore: &DataStore, - all_omicron_zones: impl Iterator, + bp_datasets: impl Iterator, ) -> anyhow::Result { // Before attempting to insert any datasets, first query for any existing // dataset records so we can filter them out. This looks like a typical @@ -131,61 +131,92 @@ pub(crate) async fn ensure_dataset_records_exist( .await .context("failed to list all datasets")? .into_iter() - .map(|dataset| OmicronZoneUuid::from_untyped_uuid(dataset.id())) - .collect::>(); + .map(|dataset| (DatasetUuid::from_untyped_uuid(dataset.id()), dataset)) + .collect::>(); let mut num_inserted = 0; - let mut num_already_exist = 0; - - for zone in all_omicron_zones { - let Some(DurableDataset { dataset, kind, address }) = - zone.zone_type.durable_dataset() - else { - continue; + let mut num_updated = 0; + let mut num_unchanged = 0; + let mut num_removed = 0; + + let (wanted_datasets, unwanted_datasets): (Vec<_>, Vec<_>) = bp_datasets + .partition(|d| match d.disposition { + BlueprintDatasetDisposition::InService => true, + BlueprintDatasetDisposition::Expunged => false, + }); + + for bp_dataset in wanted_datasets { + let id = bp_dataset.id; + let kind = &bp_dataset.kind; + + // If this dataset already exists, only update it if it appears different from what exists + // in the database already. + let action = if let Some(db_dataset) = existing_datasets.remove(&id) { + let db_dataset: DatasetConfig = db_dataset.try_into()?; + + if db_dataset == bp_dataset.clone().into() { + num_unchanged += 1; + continue; + } + num_updated += 1; + "update" + } else { + "insert" }; - let id = zone.id; - - // If already present in the datastore, move on. - if existing_datasets.remove(&id) { - num_already_exist += 1; - continue; - } - - let pool_id = dataset.pool_name.id(); + let address = bp_dataset.address; let dataset = Dataset::new( id.into_untyped_uuid(), - pool_id.into_untyped_uuid(), - Some(address), + bp_dataset.pool.id().into_untyped_uuid(), + address, kind.clone().into(), kind.zone_name(), ); - let maybe_inserted = datastore - .dataset_insert_if_not_exists(dataset) - .await - .with_context(|| { - format!("failed to insert dataset record for dataset {id}") - })?; - - // If we succeeded in inserting, log it; if `maybe_dataset` is `None`, - // we must have lost the TOCTOU race described above, and another Nexus - // must have inserted this dataset before we could. - if maybe_inserted.is_some() { - info!( - opctx.log, - "inserted new dataset for Omicron zone"; - "id" => %id, - "kind" => ?kind, - ); - num_inserted += 1; - } else { - num_already_exist += 1; + datastore.dataset_upsert(dataset).await.with_context(|| { + format!("failed to upsert dataset record for dataset {id}") + })?; + + info!( + opctx.log, + "ensuring dataset record in database"; + "action" => action, + "id" => %id, + "kind" => ?kind, + ); + num_inserted += 1; + } + + // TODO: I know we don't want to actually expunge crucible zones, but unclear + // where that decision SHOULD be made? + // + // --> Actually, idk about this. We should clearly read the disposition to + // decide which datasets to delete, but I think we need some + // planner/executor coordination to punt on Crucible. + + for bp_dataset in unwanted_datasets { + if existing_datasets.remove(&bp_dataset.id).is_some() { + if matches!( + bp_dataset.kind, + omicron_common::disk::DatasetKind::Crucible + ) { + // Region and snapshot replacement cannot happen without the + // database record, even if the dataset has been expunged. + // + // This record will still be deleted, but it will happen as a + // part of the "decommissioned_disk_cleaner" background task. + continue; + } + + datastore.dataset_delete(&opctx, bp_dataset.id).await?; + num_removed += 1; } } - // We don't currently support removing datasets, so this would be - // surprising: the database contains dataset records that are no longer in - // our blueprint. We can't do anything about this, so just warn. + // We support removing expunged datasets - if we read a dataset that hasn't + // been explicitly expunged, log this as an oddity. + // + // This could be possible in rare conditions where multiple Nexuses are executing + // distinct blueprints. if !existing_datasets.is_empty() { warn!( opctx.log, @@ -197,9 +228,11 @@ pub(crate) async fn ensure_dataset_records_exist( info!( opctx.log, - "ensured all Omicron zones have dataset records"; + "ensured all Omicron datasets have database records"; "num_inserted" => num_inserted, - "num_already_existed" => num_already_exist, + "num_updated" => num_updated, + "num_unchanged" => num_unchanged, + "num_removed" => num_removed, ); Ok(num_inserted) diff --git a/nexus/reconfigurator/execution/src/lib.rs b/nexus/reconfigurator/execution/src/lib.rs index c2e5a84efe..e78bd5fb4c 100644 --- a/nexus/reconfigurator/execution/src/lib.rs +++ b/nexus/reconfigurator/execution/src/lib.rs @@ -197,9 +197,7 @@ where datasets::ensure_dataset_records_exist( &opctx, datastore, - blueprint - .all_omicron_zones(BlueprintZoneFilter::ShouldBeRunning) - .map(|(_sled_id, zone)| zone), + blueprint.all_omicron_datasets(), ) .await .map_err(|err| vec![err])?; diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index f1aaecb562..132a2b9e78 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -17,6 +17,7 @@ use nexus_sled_agent_shared::inventory::ZoneKind; use nexus_types::deployment::blueprint_zone_type; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintDatasetConfig; +use nexus_types::deployment::BlueprintDatasetDisposition; use nexus_types::deployment::BlueprintDatasetsConfig; use nexus_types::deployment::BlueprintPhysicalDiskConfig; use nexus_types::deployment::BlueprintPhysicalDisksConfig; @@ -678,7 +679,7 @@ impl<'a> BlueprintBuilder<'a> { sled_id: SledUuid, resources: &SledResources, ) -> Result { - let (mut additions, mut updates, removals) = { + let (mut additions, mut updates, expunges, removals) = { let mut datasets_builder = BlueprintSledDatasetsBuilder::new( sled_id, &self.datasets, @@ -690,14 +691,17 @@ impl<'a> BlueprintBuilder<'a> { datasets_builder.all_bp_zpools().collect::>(); for zpool_id in bp_zpools { let zpool = ZpoolName::new_external(zpool_id); + let address = None; datasets_builder.ensure( DatasetName::new(zpool.clone(), DatasetKind::Debug), + address, Some(ByteCount::from_gibibytes_u32(100)), None, None, ); datasets_builder.ensure( DatasetName::new(zpool, DatasetKind::ZoneRoot), + address, None, None, None, @@ -720,11 +724,13 @@ impl<'a> BlueprintBuilder<'a> { zone.zone_type.kind().zone_prefix(), zone.id, ); + let address = None; datasets_builder.ensure( DatasetName::new( fs_zpool.clone(), DatasetKind::Zone { name }, ), + address, None, None, None, @@ -734,8 +740,15 @@ impl<'a> BlueprintBuilder<'a> { // Dataset for durable dataset co-located with zone if let Some(dataset) = zone.zone_type.durable_dataset() { let zpool = &dataset.dataset.pool_name; + let address = match zone.zone_type { + BlueprintZoneType::Crucible( + blueprint_zone_type::Crucible { address, .. }, + ) => Some(address), + _ => None, + }; datasets_builder.ensure( DatasetName::new(zpool.clone(), dataset.kind), + address, None, None, None, @@ -746,11 +759,8 @@ impl<'a> BlueprintBuilder<'a> { // TODO: Note that we also have datasets in "zone/" for propolis // zones, but these are not currently being tracked by blueprints. - // TODO: upsert dataset records during execution - // NOTE: we add dataset records for durable datasets during - // the execution phase? need a different addition/removal criteria - - let removals = datasets_builder.get_unused_datasets(); + let expunges = datasets_builder.get_expungeable_datasets(); + let removals = datasets_builder.get_removable_datasets(); let additions = datasets_builder .new_datasets @@ -764,29 +774,55 @@ impl<'a> BlueprintBuilder<'a> { datasets.into_values().map(|dataset| (dataset.id, dataset)) }) .collect::>(); - (additions, updates, removals) + (additions, updates, expunges, removals) }; - if additions.is_empty() && updates.is_empty() && removals.is_empty() { + if additions.is_empty() + && updates.is_empty() + && expunges.is_empty() + && removals.is_empty() + { return Ok(EnsureMultiple::NotNeeded); } let added = additions.len(); let updated = updates.len(); - let removed = removals.len(); + // This is a little overloaded, but: + // - When a dataset is expunged, for whatever reason, it is a part of + // "expunges". This leads to it getting removed from a sled. + // - When we know that we've safely destroyed all traces of the dataset, + // it becomes a part of "removals". This means we can remove it from the + // blueprint. + let removed = expunges.len() + removals.len(); let datasets = &mut self.datasets.change_sled_datasets(sled_id).datasets; - // Apply updates & removals in the same iteration - datasets.retain_mut(|config| { + // Add all new datasets + datasets.append(&mut additions); + + for config in &mut *datasets { + // Apply updates if let Some(new_config) = updates.remove(&config.id) { *config = new_config; }; - !removals.contains(&config.id) + // Mark unused datasets as expunged. + // + // This indicates that the dataset should be removed from the database. + if expunges.contains(&config.id) { + config.disposition = BlueprintDatasetDisposition::Expunged; + } + } + + // Remove all datasets that we've finished expunging. + datasets.retain(|d| { + debug_assert_eq!( + d.disposition, + BlueprintDatasetDisposition::Expunged, + "We should only be removing datasets that are already expunged" + ); + !removals.contains(&d.id) }); - // Add all new datasets afterwards - datasets.append(&mut additions); // We sort in the call to "BlueprintDatasetsBuilder::into_datasets_map", // so we don't need to sort "datasets" now. @@ -1598,6 +1634,7 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { pub fn ensure( &mut self, dataset: DatasetName, + address: Option, quota: Option, reservation: Option, compression: Option, @@ -1607,9 +1644,11 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { let kind = dataset.dataset(); let make_config = |id: DatasetUuid| BlueprintDatasetConfig { + disposition: BlueprintDatasetDisposition::InService, id, pool: zpool.clone(), kind: kind.clone(), + address, quota, reservation, compression, @@ -1656,7 +1695,7 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { /// Returns all datasets in the old blueprint that are not planned to be /// part of the new blueprint. - pub fn get_unused_datasets(&self) -> BTreeSet { + pub fn get_expungeable_datasets(&self) -> BTreeSet { let dataset_exists_in = |group: &BTreeMap< ZpoolUuid, @@ -1696,6 +1735,48 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { removals } + /// Returns all datasets that have been expunged in a prior blueprint, + /// and which are also deleted from the database. + /// + /// This is our sign that the work of expungement has completed. + pub fn get_removable_datasets(&self) -> BTreeSet { + let dataset_exists_in = + |group: &BTreeMap< + ZpoolUuid, + BTreeMap, + >, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid| { + let Some(datasets) = group.get(&zpool_id) else { + return false; + }; + + for (_, dataset_config) in datasets { + if dataset_config.id == dataset_id { + return true; + } + } + return false; + }; + + let mut removals = BTreeSet::new(); + for (zpool_id, datasets) in &self.blueprint_datasets { + for (_kind, config) in datasets { + if matches!( + config.disposition, + BlueprintDatasetDisposition::Expunged + ) && !dataset_exists_in( + &self.database_datasets, + *zpool_id, + config.id, + ) { + removals.insert(config.id); + } + } + } + removals + } + pub fn all_bp_zpools(&self) -> impl Iterator + '_ { self.blueprint_datasets.keys().map(|id| *id) } diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 3e8da87c56..9e72833323 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -44,6 +44,7 @@ use std::collections::BTreeMap; use std::collections::BTreeSet; use std::fmt; use std::net::Ipv6Addr; +use std::net::SocketAddrV6; use strum::EnumIter; use strum::IntoEnumIterator; use thiserror::Error; @@ -216,6 +217,13 @@ impl Blueprint { }) } + /// Iterate over the [`BlueprintDatasetsConfig`] instances in the blueprint. + pub fn all_omicron_datasets( + &self, + ) -> impl Iterator { + self.blueprint_datasets.iter().flat_map(move |(_, d)| d.datasets.iter()) + } + /// Iterate over the [`BlueprintZoneConfig`] instances in the blueprint /// that do not match the provided filter, along with the associated sled /// id. @@ -947,12 +955,41 @@ impl From for DatasetsConfig { } } +/// The desired state of an Omicron-managed dataset in a blueprint. +/// +/// Part of [`BlueprintDatasetConfig`]. +#[derive( + Debug, + Copy, + Clone, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + JsonSchema, + Deserialize, + Serialize, + EnumIter, +)] +#[serde(rename_all = "snake_case")] +pub enum BlueprintDatasetDisposition { + /// The dataset is in-service. + InService, + + /// The dataset is permanently gone. + Expunged, +} + /// Information about a dataset as recorded in a blueprint #[derive(Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize)] pub struct BlueprintDatasetConfig { + pub disposition: BlueprintDatasetDisposition, + pub id: DatasetUuid, pub pool: ZpoolName, pub kind: DatasetKind, + pub address: Option, pub quota: Option, pub reservation: Option, pub compression: Option, diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 3cc9fa0014..86c60369e8 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -3380,6 +3380,11 @@ CREATE TYPE IF NOT EXISTS omicron.public.bp_zone_disposition AS ENUM ( 'expunged' ); +CREATE TYPE IF NOT EXISTS omicron.public.bp_dataset_disposition AS ENUM ( + 'in_service', + 'expunged' +); + -- list of all blueprints CREATE TABLE IF NOT EXISTS omicron.public.blueprint ( id UUID PRIMARY KEY, @@ -3498,11 +3503,18 @@ CREATE TABLE IF NOT EXISTS omicron.public.bp_omicron_dataset ( sled_id UUID NOT NULL, id UUID NOT NULL, + -- Dataset disposition + disposition omicron.public.bp_dataset_disposition NOT NULL, + pool_id UUID NOT NULL, kind omicron.public.dataset_kind NOT NULL, -- Only valid if kind = zone zone_name TEXT, + -- Only valid if kind = crucible + ip INET, + port INT4 CHECK (port BETWEEN 0 AND 65535), + quota INT8, reservation INT8, compression TEXT, @@ -3512,6 +3524,11 @@ CREATE TABLE IF NOT EXISTS omicron.public.bp_omicron_dataset ( (kind = 'zone' AND zone_name IS NOT NULL) ), + CONSTRAINT ip_and_port_set_for_crucible CHECK ( + (kind != 'crucible') OR + (kind = 'crucible' AND ip IS NOT NULL and port IS NOT NULL) + ), + PRIMARY KEY (blueprint_id, id) ); diff --git a/sled-agent/src/rack_setup/service.rs b/sled-agent/src/rack_setup/service.rs index 2da98e736d..6faafc30b8 100644 --- a/sled-agent/src/rack_setup/service.rs +++ b/sled-agent/src/rack_setup/service.rs @@ -92,8 +92,8 @@ use nexus_sled_agent_shared::inventory::{ OmicronZoneConfig, OmicronZoneType, OmicronZonesConfig, }; use nexus_types::deployment::{ - Blueprint, BlueprintDatasetConfig, BlueprintDatasetsConfig, - BlueprintPhysicalDisksConfig, BlueprintZoneConfig, + Blueprint, BlueprintDatasetConfig, BlueprintDatasetDisposition, + BlueprintDatasetsConfig, BlueprintPhysicalDisksConfig, BlueprintZoneConfig, BlueprintZoneDisposition, BlueprintZonesConfig, CockroachDbPreserveDowngrade, InvalidOmicronZoneType, }; @@ -1416,25 +1416,39 @@ pub(crate) fn build_initial_blueprint_from_sled_configs( let mut blueprint_datasets = BTreeMap::new(); for (sled_id, sled_config) in sled_configs_by_id { + let mut datasets = vec![]; + for d in &sled_config.datasets.datasets { + // Only the "Crucible" dataset needs to know the address + let address = sled_config.zones.iter().find_map(|z| { + if let OmicronZoneType::Crucible { address, dataset } = + &z.zone_type + { + if &dataset.pool_name == d.name.pool() { + return Some(*address); + } + }; + None + }); + + datasets.push(BlueprintDatasetConfig { + disposition: BlueprintDatasetDisposition::InService, + id: d.id, + pool: d.name.pool().clone(), + kind: d.name.dataset().clone(), + address, + compression: d.compression.clone(), + quota: d.quota.map(|q| ByteCount::try_from(q).unwrap()), + reservation: d + .reservation + .map(|r| ByteCount::try_from(r).unwrap()), + }); + } + blueprint_datasets.insert( *sled_id, BlueprintDatasetsConfig { generation: sled_config.datasets.generation, - datasets: sled_config - .datasets - .datasets - .iter() - .map(|d| BlueprintDatasetConfig { - id: d.id, - pool: d.name.pool().clone(), - kind: d.name.dataset().clone(), - compression: d.compression.clone(), - quota: d.quota.map(|q| ByteCount::try_from(q).unwrap()), - reservation: d - .reservation - .map(|r| ByteCount::try_from(r).unwrap()), - }) - .collect(), + datasets, }, ); } From 1cd5a96c4373a5bdf3dc342412aac5239107095e Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 6 Aug 2024 12:40:08 -0700 Subject: [PATCH 19/84] Passing reconfigurator tests --- .../reconfigurator/execution/src/datasets.rs | 108 +++++++++--------- .../planning/src/blueprint_builder/builder.rs | 66 +++++++---- .../planner_decommissions_sleds_bp2.txt | 2 +- .../output/planner_nonprovisionable_bp2.txt | 2 +- sled-agent/src/sim/http_entrypoints.rs | 29 +++++ sled-agent/src/sim/sled_agent.rs | 10 +- sled-agent/src/sim/storage.rs | 42 +++++++ 7 files changed, 179 insertions(+), 80 deletions(-) diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index 43c9f96c4a..3a6e713600 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -243,12 +243,9 @@ mod tests { use super::*; use nexus_db_model::Zpool; use nexus_reconfigurator_planning::example::example; - use nexus_sled_agent_shared::inventory::OmicronZoneDataset; use nexus_test_utils_macros::nexus_test; - use nexus_types::deployment::blueprint_zone_type; - use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::BlueprintZoneFilter; - use nexus_types::deployment::BlueprintZoneType; + use omicron_common::api::internal::shared::DatasetKind; use omicron_common::zpool_name::ZpoolName; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::ZpoolUuid; @@ -292,25 +289,37 @@ mod tests { 0 ); - // Collect all the blueprint zones. - let all_omicron_zones = blueprint + // Let's allocate datasets for all the zones with durable datasets. + // + // Finding these datasets is normally the responsibility of the planner, + // but we're kinda hand-rolling it. + let all_datasets = blueprint .all_omicron_zones(BlueprintZoneFilter::All) - .map(|(_, zone)| zone) + .filter_map(|(_, zone)| { + if let Some(dataset) = zone.zone_type.durable_dataset() { + Some(BlueprintDatasetConfig { + disposition: BlueprintDatasetDisposition::InService, + id: DatasetUuid::new_v4(), + pool: dataset.dataset.pool_name.clone(), + kind: dataset.kind, + address: Some(dataset.address), + quota: None, + reservation: None, + compression: None, + }) + } else { + None + } + }) .collect::>(); // How many zones are there with durable datasets? - let nzones_with_durable_datasets = all_omicron_zones - .iter() - .filter(|z| z.zone_type.durable_dataset().is_some()) - .count(); + let nzones_with_durable_datasets = all_datasets.len(); - let ndatasets_inserted = ensure_dataset_records_exist( - opctx, - datastore, - all_omicron_zones.iter().copied(), - ) - .await - .expect("failed to ensure datasets"); + let ndatasets_inserted = + ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) + .await + .expect("failed to ensure datasets"); // We should have inserted a dataset for each zone with a durable // dataset. @@ -325,13 +334,10 @@ mod tests { ); // Ensuring the same datasets again should insert no new records. - let ndatasets_inserted = ensure_dataset_records_exist( - opctx, - datastore, - all_omicron_zones.iter().copied(), - ) - .await - .expect("failed to ensure datasets"); + let ndatasets_inserted = + ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) + .await + .expect("failed to ensure datasets"); assert_eq!(0, ndatasets_inserted); assert_eq!( datastore @@ -357,42 +363,36 @@ mod tests { .expect("failed to upsert zpool"); } - // Call `ensure_dataset_records_exist` again, adding new crucible and - // cockroach zones. It should insert only these new zones. + // Call `ensure_dataset_records_exist` again, adding new datasets. + // + // It should only insert these new zones. let new_zones = [ - BlueprintZoneConfig { - disposition: BlueprintZoneDisposition::InService, - id: OmicronZoneUuid::new_v4(), - underlay_address: "::1".parse().unwrap(), - filesystem_pool: Some(ZpoolName::new_external(new_zpool_id)), - zone_type: BlueprintZoneType::Crucible( - blueprint_zone_type::Crucible { - address: "[::1]:0".parse().unwrap(), - dataset: OmicronZoneDataset { - pool_name: ZpoolName::new_external(new_zpool_id), - }, - }, - ), + BlueprintDatasetConfig { + disposition: BlueprintDatasetDisposition::InService, + id: DatasetUuid::new_v4(), + pool: ZpoolName::new_external(new_zpool_id), + kind: DatasetKind::Debug, + address: None, + quota: None, + reservation: None, + compression: None, }, - BlueprintZoneConfig { - disposition: BlueprintZoneDisposition::InService, - id: OmicronZoneUuid::new_v4(), - underlay_address: "::1".parse().unwrap(), - filesystem_pool: Some(ZpoolName::new_external(new_zpool_id)), - zone_type: BlueprintZoneType::CockroachDb( - blueprint_zone_type::CockroachDb { - address: "[::1]:0".parse().unwrap(), - dataset: OmicronZoneDataset { - pool_name: ZpoolName::new_external(new_zpool_id), - }, - }, - ), + BlueprintDatasetConfig { + disposition: BlueprintDatasetDisposition::InService, + id: DatasetUuid::new_v4(), + pool: ZpoolName::new_external(new_zpool_id), + kind: DatasetKind::ZoneRoot, + address: None, + quota: None, + reservation: None, + compression: None, }, ]; + let ndatasets_inserted = ensure_dataset_records_exist( opctx, datastore, - all_omicron_zones.iter().copied().chain(&new_zones), + all_datasets.iter().chain(&new_zones), ) .await .expect("failed to ensure datasets"); diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 132a2b9e78..188241bd36 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -681,6 +681,7 @@ impl<'a> BlueprintBuilder<'a> { ) -> Result { let (mut additions, mut updates, expunges, removals) = { let mut datasets_builder = BlueprintSledDatasetsBuilder::new( + self.log.clone(), sled_id, &self.datasets, resources, @@ -816,12 +817,16 @@ impl<'a> BlueprintBuilder<'a> { // Remove all datasets that we've finished expunging. datasets.retain(|d| { - debug_assert_eq!( - d.disposition, - BlueprintDatasetDisposition::Expunged, - "We should only be removing datasets that are already expunged" - ); - !removals.contains(&d.id) + if removals.contains(&d.id) { + debug_assert_eq!( + d.disposition, + BlueprintDatasetDisposition::Expunged, + "Should only remove datasets that are expunged, but dataset {} is {:?}", + d.id, d.disposition, + ); + return false; + }; + true }); // We sort in the call to "BlueprintDatasetsBuilder::into_datasets_map", @@ -1574,19 +1579,27 @@ impl<'a> BlueprintDatasetsBuilder<'a> { /// Helper for working with sets of datasets on a single sled struct BlueprintSledDatasetsBuilder<'a> { + log: Logger, blueprint_datasets: BTreeMap>, database_datasets: BTreeMap>, + // Datasets which are unchanged from the prior blueprint + unchanged_datasets: + BTreeMap>, + // Datasets which are new in this blueprint new_datasets: BTreeMap>, + // Datasets which existed in the old blueprint, but which are + // changing in this one updated_datasets: BTreeMap>, } impl<'a> BlueprintSledDatasetsBuilder<'a> { pub fn new( + log: Logger, sled_id: SledUuid, datasets: &'a BlueprintDatasetsBuilder<'_>, resources: &'a SledResources, @@ -1617,8 +1630,10 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { } Self { + log, blueprint_datasets, database_datasets, + unchanged_datasets: BTreeMap::new(), new_datasets: BTreeMap::new(), updated_datasets: BTreeMap::new(), } @@ -1659,19 +1674,19 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { let new_config = make_config(old_config.id); // If it needs updating, add it - if *old_config != new_config { - self.updated_datasets - .entry(zpool_id) - .and_modify(|values: &mut BTreeMap<_, _>| { - values.insert( - new_config.kind.clone(), - new_config.clone(), - ); - }) - .or_insert_with(|| { - BTreeMap::from([(new_config.kind.clone(), new_config)]) - }); - } + let target = if *old_config != new_config { + &mut self.updated_datasets + } else { + &mut self.unchanged_datasets + }; + target + .entry(zpool_id) + .and_modify(|values: &mut BTreeMap<_, _>| { + values.insert(new_config.kind.clone(), new_config.clone()); + }) + .or_insert_with(|| { + BTreeMap::from([(new_config.kind.clone(), new_config)]) + }); return; } @@ -1726,7 +1741,13 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { *zpool_id, dataset_id, ) + && !dataset_exists_in( + &self.unchanged_datasets, + *zpool_id, + dataset_id, + ) { + info!(self.log, "dataset expungeable (not needed in blueprint)"; "id" => ?dataset_id); removals.insert(dataset_id); } } @@ -1770,6 +1791,7 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { *zpool_id, config.id, ) { + info!(self.log, "dataset removable (expunged, not in database)"; "id" => ?config.id); removals.insert(config.id); } } @@ -1788,8 +1810,7 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { ) -> Option<&'a BlueprintDatasetConfig> { self.blueprint_datasets .get(&zpool) - .map(|datasets| datasets.get(kind)) - .flatten() + .and_then(|datasets| datasets.get(kind)) .copied() } @@ -1800,8 +1821,7 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { ) -> Option<&'a DatasetConfig> { self.database_datasets .get(&zpool) - .map(|datasets| datasets.get(kind)) - .flatten() + .and_then(|datasets| datasets.get(kind)) .copied() } } diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt index 6954d4e12b..879884567d 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt @@ -104,7 +104,7 @@ WARNING: Zones exist without physical disks! METADATA: created by::::::::::: test_blueprint2 created at::::::::::: 1970-01-01T00:00:00.000Z - comment:::::::::::::: sled a1b477db-b629-48eb-911d-1ccdafca75b9: expunged 12 zones because: sled policy is expunged + comment:::::::::::::: sled a1b477db-b629-48eb-911d-1ccdafca75b9: expunged 12 zones because: sled policy is expunged, sled d67ce8f0-a691-4010-b414-420d82e80527: added 22 datasets, updated: 0, removed 0 datasets, sled fefcf4cf-f7e7-46b3-b629-058526ce440e: added 23 datasets, updated: 0, removed 0 datasets internal DNS version: 1 external DNS version: 1 diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt index 5a2ed5a28a..bbdfa88eae 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt @@ -168,7 +168,7 @@ WARNING: Zones exist without physical disks! METADATA: created by::::::::::: test_blueprint2 created at::::::::::: 1970-01-01T00:00:00.000Z - comment:::::::::::::: sled 48d95fef-bc9f-4f50-9a53-1e075836291d: expunged 12 zones because: sled policy is expunged + comment:::::::::::::: sled 48d95fef-bc9f-4f50-9a53-1e075836291d: expunged 12 zones because: sled policy is expunged, sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: added 22 datasets, updated: 0, removed 0 datasets, sled 75bc286f-2b4b-482c-9431-59272af529da: added 25 datasets, updated: 0, removed 0 datasets, sled affab35f-600a-4109-8ea0-34a067a4e0bc: added 25 datasets, updated: 0, removed 0 datasets internal DNS version: 1 external DNS version: 1 diff --git a/sled-agent/src/sim/http_entrypoints.rs b/sled-agent/src/sim/http_entrypoints.rs index 268e8a9cf1..4117e27bf9 100644 --- a/sled-agent/src/sim/http_entrypoints.rs +++ b/sled-agent/src/sim/http_entrypoints.rs @@ -27,11 +27,13 @@ use omicron_common::api::internal::nexus::UpdateArtifactId; use omicron_common::api::internal::shared::{ ResolvedVpcRouteSet, ResolvedVpcRouteState, SwitchPorts, }; +use omicron_common::disk::DatasetsConfig; use omicron_common::disk::OmicronPhysicalDisksConfig; use omicron_uuid_kinds::{GenericUuid, InstanceUuid}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use sled_agent_types::early_networking::EarlyNetworkConfig; +use sled_storage::resources::DatasetsManagementResult; use sled_storage::resources::DisksManagementResult; use std::sync::Arc; use uuid::Uuid; @@ -65,6 +67,8 @@ pub fn api() -> SledApiDescription { api.register(read_network_bootstore_config)?; api.register(write_network_bootstore_config)?; api.register(inventory)?; + api.register(datasets_get)?; + api.register(datasets_put)?; api.register(omicron_physical_disks_get)?; api.register(omicron_physical_disks_put)?; api.register(omicron_zones_get)?; @@ -456,6 +460,31 @@ async fn inventory( )) } +#[endpoint { + method = PUT, + path = "/datasets", +}] +async fn datasets_put( + rqctx: RequestContext>, + body: TypedBody, +) -> Result, HttpError> { + let sa = rqctx.context(); + let body_args = body.into_inner(); + let result = sa.datasets_ensure(body_args).await?; + Ok(HttpResponseOk(result)) +} + +#[endpoint { + method = GET, + path = "/datasets", +}] +async fn datasets_get( + rqctx: RequestContext>, +) -> Result, HttpError> { + let sa = rqctx.context(); + Ok(HttpResponseOk(sa.datasets_list().await?)) +} + #[endpoint { method = PUT, path = "/omicron-physical-disks", diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index c766f9d109..3a1c24f504 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -51,6 +51,7 @@ use propolis_mock_server::Context as PropolisContext; use sled_agent_types::early_networking::{ EarlyNetworkConfig, EarlyNetworkConfigBody, }; +use sled_storage::resources::DatasetsManagementResult; use sled_storage::resources::DisksManagementResult; use slog::Logger; use std::collections::{HashMap, HashSet, VecDeque}; @@ -893,8 +894,15 @@ impl SledAgent { }) } + pub async fn datasets_ensure( + &self, + config: DatasetsConfig, + ) -> Result { + self.storage.lock().await.datasets_ensure(config).await + } + pub async fn datasets_list(&self) -> Result { - todo!(); + self.storage.lock().await.datasets_list().await } pub async fn omicron_physical_disks_list( diff --git a/sled-agent/src/sim/storage.rs b/sled-agent/src/sim/storage.rs index 948ac96bcd..13b8d3261f 100644 --- a/sled-agent/src/sim/storage.rs +++ b/sled-agent/src/sim/storage.rs @@ -18,6 +18,7 @@ use crucible_agent_client::types::{ use dropshot::HandlerTaskMode; use dropshot::HttpError; use futures::lock::Mutex; +use omicron_common::disk::DatasetsConfig; use omicron_common::disk::DiskIdentity; use omicron_common::disk::DiskVariant; use omicron_common::disk::OmicronPhysicalDisksConfig; @@ -26,6 +27,8 @@ use omicron_uuid_kinds::InstanceUuid; use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::ZpoolUuid; use propolis_client::types::VolumeConstructionRequest; +use sled_storage::resources::DatasetManagementStatus; +use sled_storage::resources::DatasetsManagementResult; use sled_storage::resources::DiskManagementStatus; use sled_storage::resources::DisksManagementResult; use slog::Logger; @@ -555,6 +558,7 @@ pub struct Storage { sled_id: Uuid, log: Logger, config: Option, + dataset_config: Option, physical_disks: HashMap, next_disk_slot: i64, zpools: HashMap, @@ -568,6 +572,7 @@ impl Storage { sled_id, log, config: None, + dataset_config: None, physical_disks: HashMap::new(), next_disk_slot: 0, zpools: HashMap::new(), @@ -581,6 +586,43 @@ impl Storage { &self.physical_disks } + pub async fn datasets_list(&self) -> Result { + let Some(config) = self.dataset_config.as_ref() else { + return Err(HttpError::for_not_found( + None, + "No control plane datasets".into(), + )); + }; + Ok(config.clone()) + } + + pub async fn datasets_ensure( + &mut self, + config: DatasetsConfig, + ) -> Result { + if let Some(stored_config) = self.dataset_config.as_ref() { + if stored_config.generation < config.generation { + return Err(HttpError::for_client_error( + None, + http::StatusCode::BAD_REQUEST, + "Generation number too old".to_string(), + )); + } + } + self.dataset_config.replace(config.clone()); + + Ok(DatasetsManagementResult { + status: config + .datasets + .into_iter() + .map(|config| DatasetManagementStatus { + dataset_name: config.name.clone(), + err: None, + }) + .collect(), + }) + } + pub async fn omicron_physical_disks_list( &mut self, ) -> Result { From 940c24000f0b5417ee47f1a47fe596da0514f2cc Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 7 Aug 2024 11:19:35 -0700 Subject: [PATCH 20/84] Fix BP loading, add datasets to representative blueprint --- .../db-queries/src/db/datastore/deployment.rs | 55 +++++++++++++++++++ nexus/reconfigurator/planning/src/example.rs | 1 + .../planner_decommissions_sleds_bp2.txt | 2 +- .../output/planner_nonprovisionable_bp2.txt | 2 +- 4 files changed, 58 insertions(+), 2 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index ce142e376d..251cb76e4b 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -700,6 +700,55 @@ impl DataStore { } } + // Load all the datasets for each sled + { + use db::schema::bp_omicron_dataset::dsl; + + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + // `paginated` implicitly orders by our `id`, which is also + // handy for testing: the datasets are always consistently ordered + let batch = paginated( + dsl::bp_omicron_dataset, + dsl::id, + &p.current_pagparams(), + ) + .filter(dsl::blueprint_id.eq(blueprint_id)) + .select(BpOmicronDataset::as_select()) + .load_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })?; + + paginator = p.found_batch(&batch, &|d| d.id); + + for d in batch { + let sled_datasets = blueprint_datasets + .get_mut(&d.sled_id.into()) + .ok_or_else(|| { + // This error means that we found a row in + // bp_omicron_dataset with no associated record in + // bp_sled_omicron_datasets. This should be + // impossible and reflects either a bug or database + // corruption. + Error::internal_error(&format!( + "dataset {}: unknown sled: {}", + d.id, d.sled_id + )) + })?; + + let dataset_id = d.id; + sled_datasets.datasets.push(d.try_into().map_err(|e| { + Error::internal_error(&format!( + "Cannot parse dataset {}: {e}", + dataset_id + )) + })?); + } + } + } + // Sort all disks to match what blueprint builders do. for (_, disks_config) in blueprint_disks.iter_mut() { disks_config.disks.sort_unstable_by_key(|d| d.id); @@ -1517,6 +1566,12 @@ mod tests { for (table_name, result) in [ query_count!(blueprint, id), + query_count!(bp_sled_state, blueprint_id), + query_count!(bp_sled_omicron_datasets, blueprint_id), + query_count!(bp_sled_omicron_physical_disks, blueprint_id), + query_count!(bp_sled_omicron_zones, blueprint_id), + query_count!(bp_omicron_dataset, blueprint_id), + query_count!(bp_omicron_physical_disk, blueprint_id), query_count!(bp_omicron_zone, blueprint_id), query_count!(bp_omicron_zone_nic, blueprint_id), ] { diff --git a/nexus/reconfigurator/planning/src/example.rs b/nexus/reconfigurator/planning/src/example.rs index e52fe3fc4b..27ed87f652 100644 --- a/nexus/reconfigurator/planning/src/example.rs +++ b/nexus/reconfigurator/planning/src/example.rs @@ -85,6 +85,7 @@ impl ExampleSystem { .sled_ensure_zone_crucible(sled_id, *pool_name) .unwrap(); } + builder.sled_ensure_datasets(sled_id, &sled_resources).unwrap(); } let blueprint = builder.build(); diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt index 879884567d..fda6f3cbb5 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt @@ -104,7 +104,7 @@ WARNING: Zones exist without physical disks! METADATA: created by::::::::::: test_blueprint2 created at::::::::::: 1970-01-01T00:00:00.000Z - comment:::::::::::::: sled a1b477db-b629-48eb-911d-1ccdafca75b9: expunged 12 zones because: sled policy is expunged, sled d67ce8f0-a691-4010-b414-420d82e80527: added 22 datasets, updated: 0, removed 0 datasets, sled fefcf4cf-f7e7-46b3-b629-058526ce440e: added 23 datasets, updated: 0, removed 0 datasets + comment:::::::::::::: sled a1b477db-b629-48eb-911d-1ccdafca75b9: expunged 12 zones because: sled policy is expunged, sled d67ce8f0-a691-4010-b414-420d82e80527: added 20 datasets, updated: 0, removed 0 datasets, sled fefcf4cf-f7e7-46b3-b629-058526ce440e: added 21 datasets, updated: 0, removed 0 datasets internal DNS version: 1 external DNS version: 1 diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt index bbdfa88eae..8aa86d16c3 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt @@ -168,7 +168,7 @@ WARNING: Zones exist without physical disks! METADATA: created by::::::::::: test_blueprint2 created at::::::::::: 1970-01-01T00:00:00.000Z - comment:::::::::::::: sled 48d95fef-bc9f-4f50-9a53-1e075836291d: expunged 12 zones because: sled policy is expunged, sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: added 22 datasets, updated: 0, removed 0 datasets, sled 75bc286f-2b4b-482c-9431-59272af529da: added 25 datasets, updated: 0, removed 0 datasets, sled affab35f-600a-4109-8ea0-34a067a4e0bc: added 25 datasets, updated: 0, removed 0 datasets + comment:::::::::::::: sled 48d95fef-bc9f-4f50-9a53-1e075836291d: expunged 12 zones because: sled policy is expunged, sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: added 20 datasets, updated: 0, removed 0 datasets, sled 75bc286f-2b4b-482c-9431-59272af529da: added 23 datasets, updated: 0, removed 0 datasets, sled affab35f-600a-4109-8ea0-34a067a4e0bc: added 23 datasets, updated: 0, removed 0 datasets internal DNS version: 1 external DNS version: 1 From c5621256ccc7a8995842d057e608c8b3b66effe5 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 7 Aug 2024 13:06:11 -0700 Subject: [PATCH 21/84] Optional properties still get set --- illumos-utils/src/zfs.rs | 67 +++++++++++++++++++++------------------- 1 file changed, 35 insertions(+), 32 deletions(-) diff --git a/illumos-utils/src/zfs.rs b/illumos-utils/src/zfs.rs index 21de2a50da..0178a43e84 100644 --- a/illumos-utils/src/zfs.rs +++ b/illumos-utils/src/zfs.rs @@ -376,6 +376,10 @@ impl Zfs { Ok(()) } + /// Applies the following properties to the filesystem. + /// + /// If any of the options are not supplied, a default "none" or "off" + /// value is supplied. fn apply_properties( name: &str, mountpoint: &Mountpoint, @@ -383,40 +387,39 @@ impl Zfs { reservation: Option, compression: Option, ) -> Result<(), EnsureFilesystemError> { - if let Some(quota) = quota { - if let Err(err) = - Self::set_value(name, "quota", &format!("{quota}")) - { - return Err(EnsureFilesystemError { - name: name.to_string(), - mountpoint: mountpoint.clone(), - // Take the execution error from the SetValueError - err: err.err.into(), - }); - } + let quota = quota + .map(|q| q.to_string()) + .unwrap_or_else(|| String::from("none")); + let reservation = reservation + .map(|r| r.to_string()) + .unwrap_or_else(|| String::from("none")); + let compression = compression.unwrap_or_else(|| String::from("off")); + + if let Err(err) = Self::set_value(name, "quota", &format!("{quota}")) { + return Err(EnsureFilesystemError { + name: name.to_string(), + mountpoint: mountpoint.clone(), + // Take the execution error from the SetValueError + err: err.err.into(), + }); } - if let Some(reservation) = reservation { - if let Err(err) = - Self::set_value(name, "reservation", &format!("{reservation}")) - { - return Err(EnsureFilesystemError { - name: name.to_string(), - mountpoint: mountpoint.clone(), - // Take the execution error from the SetValueError - err: err.err.into(), - }); - } + if let Err(err) = + Self::set_value(name, "reservation", &format!("{reservation}")) + { + return Err(EnsureFilesystemError { + name: name.to_string(), + mountpoint: mountpoint.clone(), + // Take the execution error from the SetValueError + err: err.err.into(), + }); } - if let Some(compression) = compression { - if let Err(err) = Self::set_value(name, "compression", &compression) - { - return Err(EnsureFilesystemError { - name: name.to_string(), - mountpoint: mountpoint.clone(), - // Take the execution error from the SetValueError - err: err.err.into(), - }); - } + if let Err(err) = Self::set_value(name, "compression", &compression) { + return Err(EnsureFilesystemError { + name: name.to_string(), + mountpoint: mountpoint.clone(), + // Take the execution error from the SetValueError + err: err.err.into(), + }); } Ok(()) } From c7a5adfd7484b4578c8695fbc58d20b0d0dba4f1 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 7 Aug 2024 13:09:45 -0700 Subject: [PATCH 22/84] clippy --- illumos-utils/src/zfs.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/illumos-utils/src/zfs.rs b/illumos-utils/src/zfs.rs index 0178a43e84..9b3c288d66 100644 --- a/illumos-utils/src/zfs.rs +++ b/illumos-utils/src/zfs.rs @@ -395,7 +395,7 @@ impl Zfs { .unwrap_or_else(|| String::from("none")); let compression = compression.unwrap_or_else(|| String::from("off")); - if let Err(err) = Self::set_value(name, "quota", &format!("{quota}")) { + if let Err(err) = Self::set_value(name, "quota", "a) { return Err(EnsureFilesystemError { name: name.to_string(), mountpoint: mountpoint.clone(), @@ -404,7 +404,7 @@ impl Zfs { }); } if let Err(err) = - Self::set_value(name, "reservation", &format!("{reservation}")) + Self::set_value(name, "reservation", &reservation) { return Err(EnsureFilesystemError { name: name.to_string(), From 181507cdb8fd6ad4dc70bef26283071622dda9a4 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 7 Aug 2024 13:11:16 -0700 Subject: [PATCH 23/84] fmt --- illumos-utils/src/zfs.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/illumos-utils/src/zfs.rs b/illumos-utils/src/zfs.rs index 9b3c288d66..c9fd7fa315 100644 --- a/illumos-utils/src/zfs.rs +++ b/illumos-utils/src/zfs.rs @@ -403,9 +403,7 @@ impl Zfs { err: err.err.into(), }); } - if let Err(err) = - Self::set_value(name, "reservation", &reservation) - { + if let Err(err) = Self::set_value(name, "reservation", &reservation) { return Err(EnsureFilesystemError { name: name.to_string(), mountpoint: mountpoint.clone(), From 8e1df07079619c10445395582a3e7946698c7a44 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 7 Aug 2024 13:35:52 -0700 Subject: [PATCH 24/84] str not string --- common/src/api/internal/shared.rs | 4 ++-- nexus/reconfigurator/execution/src/datasets.rs | 2 +- nexus/src/app/rack.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/common/src/api/internal/shared.rs b/common/src/api/internal/shared.rs index 673378c1d3..d30789ddf9 100644 --- a/common/src/api/internal/shared.rs +++ b/common/src/api/internal/shared.rs @@ -765,9 +765,9 @@ impl DatasetKind { /// Returns the zone name, if this is dataset for a zone filesystem. /// /// Otherwise, returns "None". - pub fn zone_name(&self) -> Option { + pub fn zone_name(&self) -> Option<&str> { if let DatasetKind::Zone { name } = self { - Some(name.clone()) + Some(name) } else { None } diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index 003861519e..d20de82cef 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -68,7 +68,7 @@ pub(crate) async fn ensure_dataset_records_exist( pool_id.into_untyped_uuid(), Some(address), kind.clone().into(), - kind.zone_name(), + kind.zone_name().map(String::from), ); let maybe_inserted = datastore .dataset_insert_if_not_exists(dataset) diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index f4162f55ab..432c44bea4 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -147,7 +147,7 @@ impl super::Nexus { dataset.zpool_id, Some(dataset.request.address), dataset.request.kind.clone().into(), - dataset.request.kind.zone_name(), + dataset.request.kind.zone_name().map(String::from), ) }) .collect(); From 93134c25232d244cd3c54dcbd6d847640f9525ab Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 7 Aug 2024 14:05:56 -0700 Subject: [PATCH 25/84] cockroachdb, not cockroach_db --- common/src/api/internal/shared.rs | 5 +---- openapi/sled-agent.json | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/common/src/api/internal/shared.rs b/common/src/api/internal/shared.rs index d30789ddf9..79f5eb8c3b 100644 --- a/common/src/api/internal/shared.rs +++ b/common/src/api/internal/shared.rs @@ -719,10 +719,7 @@ pub struct ResolvedVpcRouteSet { pub enum DatasetKind { // Durable datasets for zones - // This renaming exists for backwards compatibility -- this enum variant - // was serialized to "all-zones-request" as "cockroach_db" and should - // stay that way, unless we perform an explicit schema change. - #[serde(rename = "cockroach_db")] + #[serde(rename = "cockroachdb")] Cockroach, Crucible, Clickhouse, diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index d42ffd8fae..173a1c8058 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -2211,7 +2211,7 @@ "type": { "type": "string", "enum": [ - "cockroach_db" + "cockroachdb" ] } }, From d88d5413c74b1e5b7c8199cbb907e29df4f30d49 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 7 Aug 2024 14:34:17 -0700 Subject: [PATCH 26/84] generation numbers --- common/src/api/internal/shared.rs | 1 - sled-storage/src/manager.rs | 11 +++++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/common/src/api/internal/shared.rs b/common/src/api/internal/shared.rs index 79f5eb8c3b..874d1a5866 100644 --- a/common/src/api/internal/shared.rs +++ b/common/src/api/internal/shared.rs @@ -718,7 +718,6 @@ pub struct ResolvedVpcRouteSet { #[serde(tag = "type", rename_all = "snake_case")] pub enum DatasetKind { // Durable datasets for zones - #[serde(rename = "cockroachdb")] Cockroach, Crucible, diff --git a/sled-storage/src/manager.rs b/sled-storage/src/manager.rs index 7f61097b6d..3b32b36c09 100644 --- a/sled-storage/src/manager.rs +++ b/sled-storage/src/manager.rs @@ -697,7 +697,9 @@ impl StorageManager { if config.generation < ledger_data.generation { warn!( log, - "Request looks out-of-date compared to prior request" + "Request looks out-of-date compared to prior request"; + "requested_generation" => ?config.generation, + "ledger_generation" => ?ledger_data.generation, ); return Err(Error::DatasetConfigurationOutdated { requested: config.generation, @@ -720,7 +722,12 @@ impl StorageManager { }); } } else { - info!(log, "Request looks newer than prior requests"); + info!( + log, + "Request looks newer than prior requests"; + "requested_generation" => ?config.generation, + "ledger_generation" => ?ledger_data.generation, + ); } ledger } From 95ebbb87174c84c57eb38bdd8bfc698a7c23475b Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 7 Aug 2024 15:35:52 -0700 Subject: [PATCH 27/84] review feedback --- illumos-utils/src/zfs.rs | 22 ++++++++++++++++++++-- openapi/sled-agent.json | 2 ++ sled-agent/src/http_entrypoints.rs | 4 +++- sled-agent/src/sled_agent.rs | 4 ++-- sled-storage/src/manager.rs | 12 +++++++----- 5 files changed, 34 insertions(+), 10 deletions(-) diff --git a/illumos-utils/src/zfs.rs b/illumos-utils/src/zfs.rs index c9fd7fa315..5df1b73c07 100644 --- a/illumos-utils/src/zfs.rs +++ b/illumos-utils/src/zfs.rs @@ -260,9 +260,27 @@ impl Zfs { Ok(()) } - /// Creates a new ZFS filesystem named `name`, unless one already exists. + /// Creates a new ZFS filesystem unless one already exists. /// - /// Applies an optional quota, provided _in bytes_. + /// - `name`: the full path to the zfs dataset + /// - `mountpoint`: The expected mountpoint of this filesystem. + /// If the filesystem already exists, and is not mounted here, and error is + /// returned. + /// - `zoned`: identifies whether or not this filesystem should be + /// used in a zone. Only used when creating a new filesystem - ignored + /// if the filesystem already exists. + /// - `do_format`: if "false", prevents a new filesystem from being created, + /// and returns an error if it is not found. + /// - `encryption_details`: Ensures a filesystem as an encryption root. + /// For new filesystems, this supplies the key, and all datasets within this + /// root are implicitly encrypted. For existing filesystems, ensures that + /// they are mounted (and that keys are loaded), but does not verify the + /// input details. + /// - `size_details`: If supplied, sets size-related information. These + /// values are set on both new filesystem creation as well as when loading + /// existing filesystems. + /// - `additional_options`: Additional ZFS options, which are only set when + /// creating new filesystems. #[allow(clippy::too_many_arguments)] pub fn ensure_filesystem( name: &str, diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index 173a1c8058..c9e5c709e3 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -179,6 +179,7 @@ }, "/datasets": { "get": { + "summary": "Lists the datasets that this sled is configured to use", "operationId": "datasets_get", "responses": { "200": { @@ -200,6 +201,7 @@ } }, "put": { + "summary": "Configures datasets to be used on this sled", "operationId": "datasets_put", "requestBody": { "content": { diff --git a/sled-agent/src/http_entrypoints.rs b/sled-agent/src/http_entrypoints.rs index c36de9a787..97671b42e6 100644 --- a/sled-agent/src/http_entrypoints.rs +++ b/sled-agent/src/http_entrypoints.rs @@ -352,6 +352,7 @@ async fn omicron_zones_get( Ok(HttpResponseOk(sa.omicron_zones_list().await?)) } +/// Configures datasets to be used on this sled #[endpoint { method = PUT, path = "/datasets", @@ -366,6 +367,7 @@ async fn datasets_put( Ok(HttpResponseOk(result)) } +/// Lists the datasets that this sled is configured to use #[endpoint { method = GET, path = "/datasets", @@ -374,7 +376,7 @@ async fn datasets_get( rqctx: RequestContext, ) -> Result, HttpError> { let sa = rqctx.context(); - Ok(HttpResponseOk(sa.datasets_list().await?)) + Ok(HttpResponseOk(sa.datasets_config_list().await?)) } #[endpoint { diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index 5368539445..29cf4c6de2 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -810,8 +810,8 @@ impl SledAgent { self.inner.zone_bundler.cleanup().await.map_err(Error::from) } - pub async fn datasets_list(&self) -> Result { - Ok(self.storage().datasets_list().await?) + pub async fn datasets_config_list(&self) -> Result { + Ok(self.storage().datasets_config_list().await?) } pub async fn datasets_ensure( diff --git a/sled-storage/src/manager.rs b/sled-storage/src/manager.rs index 3b32b36c09..c086d656d1 100644 --- a/sled-storage/src/manager.rs +++ b/sled-storage/src/manager.rs @@ -273,7 +273,7 @@ impl StorageHandle { /// Reads the last value written to storage by /// [Self::datasets_ensure]. - pub async fn datasets_list(&self) -> Result { + pub async fn datasets_config_list(&self) -> Result { let (tx, rx) = oneshot::channel(); self.tx .send(StorageRequest::DatasetsList { tx: tx.into() }) @@ -479,7 +479,7 @@ impl StorageManager { let _ = tx.0.send(self.datasets_ensure(config).await); } StorageRequest::DatasetsList { tx } => { - let _ = tx.0.send(self.datasets_list().await); + let _ = tx.0.send(self.datasets_config_list().await); } StorageRequest::OmicronPhysicalDisksEnsure { config, tx } => { let _ = @@ -790,8 +790,9 @@ impl StorageManager { status } - async fn datasets_list(&mut self) -> Result { - let log = self.log.new(o!("request" => "datasets_list")); + // Lists datasets that this sled is configured to use. + async fn datasets_config_list(&mut self) -> Result { + let log = self.log.new(o!("request" => "datasets_config_list")); let ledger_paths = self.all_omicron_dataset_ledgers().await; let maybe_ledger = @@ -1637,7 +1638,8 @@ mod tests { assert!(!status.has_error()); // List datasets, expect to see what we just created - let observed_config = harness.handle().datasets_list().await.unwrap(); + let observed_config = + harness.handle().datasets_config_list().await.unwrap(); assert_eq!(config, observed_config); // Calling "datasets_ensure" with the same input should succeed. From 994757e017e145f483da68f2702d2e4b4359301f Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 7 Aug 2024 16:50:34 -0700 Subject: [PATCH 28/84] Serialize via string, not tag --- common/src/api/internal/shared.rs | 96 +++++++++++++++++++++++++------ common/src/disk.rs | 5 +- openapi/nexus-internal.json | 2 +- openapi/sled-agent.json | 4 +- schema/omicron-datasets.json | 6 +- sled-storage/src/error.rs | 3 + sled-storage/src/manager.rs | 33 +++++++---- 7 files changed, 113 insertions(+), 36 deletions(-) diff --git a/common/src/api/internal/shared.rs b/common/src/api/internal/shared.rs index 874d1a5866..403e0855a3 100644 --- a/common/src/api/internal/shared.rs +++ b/common/src/api/internal/shared.rs @@ -10,13 +10,14 @@ use crate::{ }; use oxnet::{IpNet, Ipv4Net, Ipv6Net}; use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use std::{ collections::{HashMap, HashSet}, fmt, net::{IpAddr, Ipv4Addr, Ipv6Addr}, str::FromStr, }; +use strum::EnumCount; use uuid::Uuid; /// The type of network interface @@ -704,16 +705,7 @@ pub struct ResolvedVpcRouteSet { /// Describes the purpose of the dataset. #[derive( - Debug, - Serialize, - Deserialize, - JsonSchema, - Clone, - PartialEq, - Eq, - Ord, - PartialOrd, - Hash, + Debug, JsonSchema, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, EnumCount, )] #[serde(tag = "type", rename_all = "snake_case")] pub enum DatasetKind { @@ -736,6 +728,25 @@ pub enum DatasetKind { Debug, } +impl Serialize for DatasetKind { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + +impl<'de> Deserialize<'de> for DatasetKind { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + s.parse().map_err(de::Error::custom) + } +} + impl DatasetKind { pub fn dataset_should_be_encrypted(&self) -> bool { match self { @@ -758,7 +769,7 @@ impl DatasetKind { } } - /// Returns the zone name, if this is dataset for a zone filesystem. + /// Returns the zone name, if this is a dataset for a zone filesystem. /// /// Otherwise, returns "None". pub fn zone_name(&self) -> Option<&str> { @@ -808,16 +819,22 @@ impl FromStr for DatasetKind { fn from_str(s: &str) -> Result { use DatasetKind::*; let kind = match s { + "cockroachdb" => Cockroach, "crucible" => Crucible, - "cockroachdb" | "cockroach_db" => Cockroach, "clickhouse" => Clickhouse, "clickhouse_keeper" => ClickhouseKeeper, "external_dns" => ExternalDns, "internal_dns" => InternalDns, - _ => { - return Err(DatasetKindParseError::UnknownDataset( - s.to_string(), - )) + "zone" => ZoneRoot, + "debug" => Debug, + other => { + if let Some(name) = other.strip_prefix("zone/") { + Zone { name: name.to_string() } + } else { + return Err(DatasetKindParseError::UnknownDataset( + s.to_string(), + )); + } } }; Ok(kind) @@ -846,6 +863,7 @@ pub struct SledIdentifiers { #[cfg(test)] mod tests { + use super::*; use crate::api::internal::shared::AllowedSourceIps; use oxnet::{IpNet, Ipv4Net, Ipv6Net}; use std::net::{Ipv4Addr, Ipv6Addr}; @@ -890,4 +908,48 @@ mod tests { serde_json::from_str(r#"{"allow":"any"}"#).unwrap(), ); } + + #[test] + fn test_dataset_kind_serialization() { + let kinds = [ + DatasetKind::Crucible, + DatasetKind::Cockroach, + DatasetKind::Clickhouse, + DatasetKind::ClickhouseKeeper, + DatasetKind::ExternalDns, + DatasetKind::InternalDns, + DatasetKind::ZoneRoot, + DatasetKind::Zone { name: String::from("myzone") }, + DatasetKind::Debug, + ]; + + assert_eq!(kinds.len(), DatasetKind::COUNT); + + for kind in &kinds { + // To string, from string + let as_str = kind.to_string(); + let from_str = + DatasetKind::from_str(&as_str).unwrap_or_else(|_| { + panic!("Failed to convert {kind} to and from string") + }); + assert_eq!( + *kind, from_str, + "{kind} failed to convert to/from a string" + ); + + // Serialize, deserialize + let ser = serde_json::to_string(&kind) + .unwrap_or_else(|_| panic!("Failed to serialize {kind}")); + let de: DatasetKind = serde_json::from_str(&ser) + .unwrap_or_else(|_| panic!("Failed to deserialize {kind}")); + assert_eq!(*kind, de, "{kind} failed serialization"); + + // Test that serialization is equivalent to stringifying. + assert_eq!( + format!("\"{as_str}\""), + ser, + "{kind} does not match stringification/serialization" + ); + } + } } diff --git a/common/src/disk.rs b/common/src/disk.rs index b9a259574e..86ee48c0f6 100644 --- a/common/src/disk.rs +++ b/common/src/disk.rs @@ -8,6 +8,7 @@ use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::ZpoolUuid; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; use uuid::Uuid; use crate::{ @@ -180,12 +181,12 @@ pub struct DatasetsConfig { /// for a sled before any requests have been made. pub generation: Generation, - pub datasets: Vec, + pub datasets: BTreeMap, } impl Default for DatasetsConfig { fn default() -> Self { - Self { generation: Generation::new(), datasets: vec![] } + Self { generation: Generation::new(), datasets: BTreeMap::new() } } } diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 2bd7172bf3..cfbe028f9c 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -2599,7 +2599,7 @@ "type": { "type": "string", "enum": [ - "cockroach_db" + "cockroachdb" ] } }, diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index c9e5c709e3..82f3c6c3ae 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -2374,8 +2374,8 @@ "type": "object", "properties": { "datasets": { - "type": "array", - "items": { + "type": "object", + "additionalProperties": { "$ref": "#/components/schemas/DatasetConfig" } }, diff --git a/schema/omicron-datasets.json b/schema/omicron-datasets.json index 1821eddac7..b675432172 100644 --- a/schema/omicron-datasets.json +++ b/schema/omicron-datasets.json @@ -8,8 +8,8 @@ ], "properties": { "datasets": { - "type": "array", - "items": { + "type": "object", + "additionalProperties": { "$ref": "#/definitions/DatasetConfig" } }, @@ -86,7 +86,7 @@ "type": { "type": "string", "enum": [ - "cockroach_db" + "cockroachdb" ] } } diff --git a/sled-storage/src/error.rs b/sled-storage/src/error.rs index 96fca65f57..988f7f363a 100644 --- a/sled-storage/src/error.rs +++ b/sled-storage/src/error.rs @@ -84,6 +84,9 @@ pub enum Error { current: Generation, }, + #[error("Invalid configuration (UUID mismatch in arguments)")] + ConfigUuidMismatch, + #[error("Dataset configuration out-of-date (asked for {requested}, but latest is {current})")] DatasetConfigurationOutdated { requested: Generation, current: Generation }, diff --git a/sled-storage/src/manager.rs b/sled-storage/src/manager.rs index c086d656d1..bc0cac2014 100644 --- a/sled-storage/src/manager.rs +++ b/sled-storage/src/manager.rs @@ -674,12 +674,19 @@ impl StorageManager { async fn datasets_ensure( &mut self, - mut config: DatasetsConfig, + config: DatasetsConfig, ) -> Result { let log = self.log.new(o!("request" => "datasets_ensure")); - // Ensure that the datasets arrive in a consistent order - config.datasets.sort_by(|a, b| a.id.partial_cmp(&b.id).unwrap()); + // As a small input-check, confirm that the UUID of the map of inputs + // matches the DatasetConfig. + // + // The dataset configs are sorted by UUID so they always appear in the + // same order, but this check prevents adding an entry of: + // - (UUID: X, Config(UUID: Y)), for X != Y + if !config.datasets.iter().all(|(id, config)| *id == config.id) { + return Err(Error::ConfigUuidMismatch); + } // We rely on the schema being stable across reboots -- observe // "test_datasets_schema" below for that property guarantee. @@ -764,7 +771,7 @@ impl StorageManager { config: &DatasetsConfig, ) -> DatasetsManagementResult { let mut status = vec![]; - for dataset in &config.datasets { + for dataset in config.datasets.values() { status.push(self.dataset_ensure_internal(log, dataset).await); } DatasetsManagementResult { status } @@ -1122,6 +1129,7 @@ mod tests { use omicron_common::ledger; use omicron_test_utils::dev::test_setup_log; use sled_hardware::DiskFirmware; + use std::collections::BTreeMap; use std::sync::atomic::Ordering; use uuid::Uuid; @@ -1621,13 +1629,16 @@ mod tests { let id = DatasetUuid::new_v4(); let zpool_name = ZpoolName::new_external(config.disks[0].pool_id); let name = DatasetName::new(zpool_name.clone(), DatasetKind::Crucible); - let datasets = vec![DatasetConfig { + let datasets = BTreeMap::from([( id, - name, - compression: None, - quota: None, - reservation: None, - }]; + DatasetConfig { + id, + name, + compression: None, + quota: None, + reservation: None, + }, + )]); // "Generation = 1" is reserved as "no requests seen yet", so we jump // past it. let generation = Generation::new().next(); @@ -1659,7 +1670,7 @@ mod tests { // However, calling it with a different input and the same generation // number should fail. config.generation = current_config_generation; - config.datasets[0].reservation = Some(1024); + config.datasets.values_mut().next().unwrap().reservation = Some(1024); let err = harness.handle().datasets_ensure(config.clone()).await.unwrap_err(); assert!(matches!(err, Error::DatasetConfigurationChanged { .. })); From 7156ba06870c10ea01df546f10d9f7b40f07a5a1 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 7 Aug 2024 17:18:40 -0700 Subject: [PATCH 29/84] safer API for Dataset::new --- nexus/db-model/src/dataset.rs | 13 ++++++++----- nexus/db-model/src/dataset_kind.rs | 8 ++++++-- nexus/db-queries/src/db/datastore/dataset.rs | 13 +++++-------- nexus/db-queries/src/db/datastore/mod.rs | 13 +++++-------- nexus/reconfigurator/execution/src/datasets.rs | 3 +-- .../execution/src/omicron_physical_disks.rs | 3 +-- .../background/tasks/decommissioned_disk_cleaner.rs | 3 +-- nexus/src/app/rack.rs | 3 +-- nexus/src/app/sagas/region_replacement_start.rs | 6 +----- nexus/src/app/sled.rs | 4 ++-- 10 files changed, 31 insertions(+), 38 deletions(-) diff --git a/nexus/db-model/src/dataset.rs b/nexus/db-model/src/dataset.rs index d525b80241..f896f11c5b 100644 --- a/nexus/db-model/src/dataset.rs +++ b/nexus/db-model/src/dataset.rs @@ -8,6 +8,7 @@ use crate::ipv6; use crate::schema::{dataset, region}; use chrono::{DateTime, Utc}; use db_macros::Asset; +use omicron_common::api::internal::shared::DatasetKind as ApiDatasetKind; use serde::{Deserialize, Serialize}; use std::net::{Ipv6Addr, SocketAddrV6}; use uuid::Uuid; @@ -49,13 +50,15 @@ impl Dataset { id: Uuid, pool_id: Uuid, addr: Option, - kind: DatasetKind, - zone_name: Option, + api_kind: ApiDatasetKind, ) -> Self { - let size_used = match kind { - DatasetKind::Crucible => Some(0), - _ => None, + let kind = DatasetKind::from(&api_kind); + let (size_used, zone_name) = match api_kind { + ApiDatasetKind::Crucible => (Some(0), None), + ApiDatasetKind::Zone { name } => (None, Some(name)), + _ => (None, None), }; + Self { identity: DatasetIdentity::new(id), time_deleted: None, diff --git a/nexus/db-model/src/dataset_kind.rs b/nexus/db-model/src/dataset_kind.rs index 2e71b96a41..856b340a11 100644 --- a/nexus/db-model/src/dataset_kind.rs +++ b/nexus/db-model/src/dataset_kind.rs @@ -27,8 +27,8 @@ impl_enum_type!( Debug => b"debug" ); -impl From for DatasetKind { - fn from(k: internal::shared::DatasetKind) -> Self { +impl From<&internal::shared::DatasetKind> for DatasetKind { + fn from(k: &internal::shared::DatasetKind) -> Self { match k { internal::shared::DatasetKind::Crucible => DatasetKind::Crucible, internal::shared::DatasetKind::Cockroach => DatasetKind::Cockroach, @@ -45,6 +45,10 @@ impl From for DatasetKind { DatasetKind::InternalDns } internal::shared::DatasetKind::ZoneRoot => DatasetKind::ZoneRoot, + // Enums in the database do not have associated data, so this drops + // the "name" of the zone and only considers the type. + // + // The zone name, if it exists, is stored in a separate column. internal::shared::DatasetKind::Zone { .. } => DatasetKind::Zone, internal::shared::DatasetKind::Debug => DatasetKind::Debug, } diff --git a/nexus/db-queries/src/db/datastore/dataset.rs b/nexus/db-queries/src/db/datastore/dataset.rs index 8a814aea80..0fe1c7912e 100644 --- a/nexus/db-queries/src/db/datastore/dataset.rs +++ b/nexus/db-queries/src/db/datastore/dataset.rs @@ -241,6 +241,7 @@ mod test { use nexus_db_model::SledSystemHardware; use nexus_db_model::SledUpdate; use nexus_test_utils::db::test_setup_database; + use omicron_common::api::internal::shared::DatasetKind as ApiDatasetKind; use omicron_test_utils::dev; #[tokio::test] @@ -291,8 +292,7 @@ mod test { Uuid::new_v4(), zpool_id, Some("[::1]:0".parse().unwrap()), - DatasetKind::Crucible, - None, + ApiDatasetKind::Crucible, )) .await .expect("failed to insert dataset") @@ -325,8 +325,7 @@ mod test { dataset1.id(), zpool_id, Some("[::1]:12345".parse().unwrap()), - DatasetKind::Cockroach, - None, + ApiDatasetKind::Cockroach, )) .await .expect("failed to do-nothing insert dataset"); @@ -342,8 +341,7 @@ mod test { Uuid::new_v4(), zpool_id, Some("[::1]:0".parse().unwrap()), - DatasetKind::Cockroach, - None, + ApiDatasetKind::Cockroach, )) .await .expect("failed to upsert dataset"); @@ -375,8 +373,7 @@ mod test { dataset1.id(), zpool_id, Some("[::1]:12345".parse().unwrap()), - DatasetKind::Cockroach, - None, + ApiDatasetKind::Cockroach, )) .await .expect("failed to do-nothing insert dataset"); diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 10c812de89..881f0d4aa5 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -392,10 +392,10 @@ mod test { use crate::db::identity::Asset; use crate::db::lookup::LookupPath; use crate::db::model::{ - BlockSize, ConsoleSession, Dataset, DatasetKind, ExternalIp, - PhysicalDisk, PhysicalDiskKind, PhysicalDiskPolicy, PhysicalDiskState, - Project, Rack, Region, SiloUser, SledBaseboard, SledSystemHardware, - SledUpdate, SshKey, Zpool, + BlockSize, ConsoleSession, Dataset, ExternalIp, PhysicalDisk, + PhysicalDiskKind, PhysicalDiskPolicy, PhysicalDiskState, Project, Rack, + Region, SiloUser, SledBaseboard, SledSystemHardware, SledUpdate, + SshKey, Zpool, }; use crate::db::queries::vpc_subnet::InsertVpcSubnetQuery; use chrono::{Duration, Utc}; @@ -411,6 +411,7 @@ mod test { use omicron_common::api::external::{ ByteCount, Error, IdentityMetadataCreateParams, LookupType, Name, }; + use omicron_common::api::internal::shared::DatasetKind; use omicron_test_utils::dev; use omicron_uuid_kinds::CollectionUuid; use omicron_uuid_kinds::GenericUuid; @@ -908,7 +909,6 @@ mod test { zpool.pool_id, bogus_addr, DatasetKind::Crucible, - None, ); let datastore = datastore.clone(); @@ -1281,7 +1281,6 @@ mod test { zpool_id, bogus_addr, DatasetKind::Crucible, - None, ); let datastore = datastore.clone(); async move { @@ -1382,7 +1381,6 @@ mod test { zpool_id, bogus_addr, DatasetKind::Crucible, - None, ); let datastore = datastore.clone(); async move { @@ -1458,7 +1456,6 @@ mod test { zpool_id, bogus_addr, DatasetKind::Crucible, - None, ); datastore.dataset_upsert(dataset).await.unwrap(); physical_disk_ids.push(physical_disk_id); diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index d20de82cef..2f84378a13 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -67,8 +67,7 @@ pub(crate) async fn ensure_dataset_records_exist( id.into_untyped_uuid(), pool_id.into_untyped_uuid(), Some(address), - kind.clone().into(), - kind.zone_name().map(String::from), + kind.clone(), ); let maybe_inserted = datastore .dataset_insert_if_not_exists(dataset) diff --git a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs index 9dcaa098d5..380448acf2 100644 --- a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs +++ b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs @@ -124,7 +124,6 @@ mod test { use httptest::responders::status_code; use httptest::Expectation; use nexus_db_model::Dataset; - use nexus_db_model::DatasetKind; use nexus_db_model::PhysicalDisk; use nexus_db_model::PhysicalDiskKind; use nexus_db_model::PhysicalDiskPolicy; @@ -142,6 +141,7 @@ mod test { use nexus_types::identity::Asset; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Generation; + use omicron_common::api::internal::shared::DatasetKind; use omicron_common::disk::DiskIdentity; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::PhysicalDiskUuid; @@ -435,7 +435,6 @@ mod test { 0, )), DatasetKind::Crucible, - None, )) .await .unwrap(); diff --git a/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs b/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs index 0e90ed84ab..6e49ddc7f0 100644 --- a/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs +++ b/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs @@ -179,13 +179,13 @@ mod tests { use diesel::ExpressionMethods; use diesel::QueryDsl; use nexus_db_model::Dataset; - use nexus_db_model::DatasetKind; use nexus_db_model::PhysicalDisk; use nexus_db_model::PhysicalDiskKind; use nexus_db_model::PhysicalDiskPolicy; use nexus_db_model::Region; use nexus_test_utils::SLED_AGENT_UUID; use nexus_test_utils_macros::nexus_test; + use omicron_common::api::internal::shared::DatasetKind; use omicron_uuid_kinds::{ DatasetUuid, PhysicalDiskUuid, RegionUuid, SledUuid, }; @@ -246,7 +246,6 @@ mod tests { 0, )), DatasetKind::Crucible, - None, )) .await .unwrap(); diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 432c44bea4..b289e871eb 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -146,8 +146,7 @@ impl super::Nexus { dataset.dataset_id, dataset.zpool_id, Some(dataset.request.address), - dataset.request.kind.clone().into(), - dataset.request.kind.zone_name().map(String::from), + dataset.request.kind, ) }) .collect(); diff --git a/nexus/src/app/sagas/region_replacement_start.rs b/nexus/src/app/sagas/region_replacement_start.rs index 60c35781f1..c2b886938a 100644 --- a/nexus/src/app/sagas/region_replacement_start.rs +++ b/nexus/src/app/sagas/region_replacement_start.rs @@ -776,7 +776,6 @@ pub(crate) mod test { }; use chrono::Utc; use nexus_db_model::Dataset; - use nexus_db_model::DatasetKind; use nexus_db_model::Region; use nexus_db_model::RegionReplacement; use nexus_db_model::RegionReplacementState; @@ -787,6 +786,7 @@ pub(crate) mod test { use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils_macros::nexus_test; use nexus_types::identity::Asset; + use omicron_common::api::internal::shared::DatasetKind; use sled_agent_client::types::VolumeConstructionRequest; use uuid::Uuid; @@ -903,28 +903,24 @@ pub(crate) mod test { Uuid::new_v4(), Some("[fd00:1122:3344:101::1]:12345".parse().unwrap()), DatasetKind::Crucible, - None, ), Dataset::new( Uuid::new_v4(), Uuid::new_v4(), Some("[fd00:1122:3344:102::1]:12345".parse().unwrap()), DatasetKind::Crucible, - None, ), Dataset::new( Uuid::new_v4(), Uuid::new_v4(), Some("[fd00:1122:3344:103::1]:12345".parse().unwrap()), DatasetKind::Crucible, - None, ), Dataset::new( Uuid::new_v4(), Uuid::new_v4(), Some("[fd00:1122:3344:104::1]:12345".parse().unwrap()), DatasetKind::Crucible, - None, ), ]; diff --git a/nexus/src/app/sled.rs b/nexus/src/app/sled.rs index a0e1cc3526..9c21ca73a1 100644 --- a/nexus/src/app/sled.rs +++ b/nexus/src/app/sled.rs @@ -12,7 +12,6 @@ use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; use nexus_db_queries::db::lookup; -use nexus_db_queries::db::model::DatasetKind; use nexus_sled_agent_shared::inventory::SledRole; use nexus_types::deployment::DiskFilter; use nexus_types::deployment::SledFilter; @@ -23,6 +22,7 @@ use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; +use omicron_common::api::internal::shared::DatasetKind; use omicron_uuid_kinds::{GenericUuid, SledUuid}; use sled_agent_client::Client as SledAgentClient; use std::net::SocketAddrV6; @@ -308,7 +308,7 @@ impl super::Nexus { ); let kind = DatasetKind::Crucible; let dataset = - db::model::Dataset::new(id, zpool_id, Some(address), kind, None); + db::model::Dataset::new(id, zpool_id, Some(address), kind); self.db_datastore.dataset_upsert(dataset).await?; Ok(()) } From de22ac05763e18616c3acc4ecf1d2fb6c20c26bf Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 8 Aug 2024 13:48:04 -0700 Subject: [PATCH 30/84] fmt --- sled-agent/src/zone_bundle.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sled-agent/src/zone_bundle.rs b/sled-agent/src/zone_bundle.rs index 685b4247d9..7436266c93 100644 --- a/sled-agent/src/zone_bundle.rs +++ b/sled-agent/src/zone_bundle.rs @@ -2423,8 +2423,7 @@ mod illumos_tests { // If this needs to change, go modify the "add_vdevs" call in // "setup_storage". assert!( - TEST_QUOTA - < StorageManagerTestHarness::DEFAULT_VDEV_SIZE, + TEST_QUOTA < StorageManagerTestHarness::DEFAULT_VDEV_SIZE, "Quota larger than underlying device (quota: {}, device size: {})", TEST_QUOTA, StorageManagerTestHarness::DEFAULT_VDEV_SIZE, From 018f1f7c630fb8a221ad6fd8c4208ef692bbe5cf Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 8 Aug 2024 15:23:27 -0700 Subject: [PATCH 31/84] Schemas, expectorate --- .../tests/output/cmd-stdout | 20 +++++------ nexus/db-model/src/schema_versions.rs | 3 +- .../output/region_allocate_distinct_sleds.sql | 11 +++++- .../output/region_allocate_random_sleds.sql | 11 +++++- ..._allocate_with_snapshot_distinct_sleds.sql | 11 +++++- ...on_allocate_with_snapshot_random_sleds.sql | 11 +++++- schema/crdb/blueprint-dataset/up01.sql | 4 +++ schema/crdb/blueprint-dataset/up02.sql | 4 +++ schema/crdb/blueprint-dataset/up03.sql | 9 +++++ schema/crdb/blueprint-dataset/up04.sql | 35 +++++++++++++++++++ schema/crdb/dbinit.sql | 2 +- 11 files changed, 105 insertions(+), 16 deletions(-) create mode 100644 schema/crdb/blueprint-dataset/up01.sql create mode 100644 schema/crdb/blueprint-dataset/up02.sql create mode 100644 schema/crdb/blueprint-dataset/up03.sql create mode 100644 schema/crdb/blueprint-dataset/up04.sql diff --git a/dev-tools/reconfigurator-cli/tests/output/cmd-stdout b/dev-tools/reconfigurator-cli/tests/output/cmd-stdout index a2d6d3d17b..3c6a337357 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmd-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmd-stdout @@ -24,25 +24,25 @@ sled ..................... subnet fd00:1122:3344:101::/64 zpools (10): ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) > sled-add ..................... diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index 921123e41f..c07a9a6e37 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -17,7 +17,7 @@ use std::collections::BTreeMap; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(87, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(88, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -29,6 +29,7 @@ static KNOWN_VERSIONS: Lazy> = Lazy::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(88, "blueprint-dataset"), KnownVersion::new(87, "inv-dataset"), KnownVersion::new(86, "dataset-kinds-zone-and-debug"), KnownVersion::new(85, "add-migrations-by-time-created-index"), diff --git a/nexus/db-queries/tests/output/region_allocate_distinct_sleds.sql b/nexus/db-queries/tests/output/region_allocate_distinct_sleds.sql index 4e7dde244b..9ee71b403f 100644 --- a/nexus/db-queries/tests/output/region_allocate_distinct_sleds.sql +++ b/nexus/db-queries/tests/output/region_allocate_distinct_sleds.sql @@ -271,7 +271,10 @@ WITH dataset.port, dataset.kind, dataset.size_used, - dataset.zone_name + dataset.zone_name, + dataset.quota, + dataset.reservation, + dataset.compression ) ( SELECT @@ -286,6 +289,9 @@ WITH dataset.kind, dataset.size_used, dataset.zone_name, + dataset.quota, + dataset.reservation, + dataset.compression, old_regions.id, old_regions.time_created, old_regions.time_modified, @@ -313,6 +319,9 @@ UNION updated_datasets.kind, updated_datasets.size_used, updated_datasets.zone_name, + updated_datasets.quota, + updated_datasets.reservation, + updated_datasets.compression, inserted_regions.id, inserted_regions.time_created, inserted_regions.time_modified, diff --git a/nexus/db-queries/tests/output/region_allocate_random_sleds.sql b/nexus/db-queries/tests/output/region_allocate_random_sleds.sql index b2c164a6d9..369410c68c 100644 --- a/nexus/db-queries/tests/output/region_allocate_random_sleds.sql +++ b/nexus/db-queries/tests/output/region_allocate_random_sleds.sql @@ -269,7 +269,10 @@ WITH dataset.port, dataset.kind, dataset.size_used, - dataset.zone_name + dataset.zone_name, + dataset.quota, + dataset.reservation, + dataset.compression ) ( SELECT @@ -284,6 +287,9 @@ WITH dataset.kind, dataset.size_used, dataset.zone_name, + dataset.quota, + dataset.reservation, + dataset.compression, old_regions.id, old_regions.time_created, old_regions.time_modified, @@ -311,6 +317,9 @@ UNION updated_datasets.kind, updated_datasets.size_used, updated_datasets.zone_name, + updated_datasets.quota, + updated_datasets.reservation, + updated_datasets.compression, inserted_regions.id, inserted_regions.time_created, inserted_regions.time_modified, diff --git a/nexus/db-queries/tests/output/region_allocate_with_snapshot_distinct_sleds.sql b/nexus/db-queries/tests/output/region_allocate_with_snapshot_distinct_sleds.sql index 97ee23f82e..9251139c4e 100644 --- a/nexus/db-queries/tests/output/region_allocate_with_snapshot_distinct_sleds.sql +++ b/nexus/db-queries/tests/output/region_allocate_with_snapshot_distinct_sleds.sql @@ -282,7 +282,10 @@ WITH dataset.port, dataset.kind, dataset.size_used, - dataset.zone_name + dataset.zone_name, + dataset.quota, + dataset.reservation, + dataset.compression ) ( SELECT @@ -297,6 +300,9 @@ WITH dataset.kind, dataset.size_used, dataset.zone_name, + dataset.quota, + dataset.reservation, + dataset.compression, old_regions.id, old_regions.time_created, old_regions.time_modified, @@ -324,6 +330,9 @@ UNION updated_datasets.kind, updated_datasets.size_used, updated_datasets.zone_name, + updated_datasets.quota, + updated_datasets.reservation, + updated_datasets.compression, inserted_regions.id, inserted_regions.time_created, inserted_regions.time_modified, diff --git a/nexus/db-queries/tests/output/region_allocate_with_snapshot_random_sleds.sql b/nexus/db-queries/tests/output/region_allocate_with_snapshot_random_sleds.sql index a1cc103594..c8aa8adf2e 100644 --- a/nexus/db-queries/tests/output/region_allocate_with_snapshot_random_sleds.sql +++ b/nexus/db-queries/tests/output/region_allocate_with_snapshot_random_sleds.sql @@ -280,7 +280,10 @@ WITH dataset.port, dataset.kind, dataset.size_used, - dataset.zone_name + dataset.zone_name, + dataset.quota, + dataset.reservation, + dataset.compression ) ( SELECT @@ -295,6 +298,9 @@ WITH dataset.kind, dataset.size_used, dataset.zone_name, + dataset.quota, + dataset.reservation, + dataset.compression, old_regions.id, old_regions.time_created, old_regions.time_modified, @@ -322,6 +328,9 @@ UNION updated_datasets.kind, updated_datasets.size_used, updated_datasets.zone_name, + updated_datasets.quota, + updated_datasets.reservation, + updated_datasets.compression, inserted_regions.id, inserted_regions.time_created, inserted_regions.time_modified, diff --git a/schema/crdb/blueprint-dataset/up01.sql b/schema/crdb/blueprint-dataset/up01.sql new file mode 100644 index 0000000000..cfdde5bacd --- /dev/null +++ b/schema/crdb/blueprint-dataset/up01.sql @@ -0,0 +1,4 @@ +ALTER TABLE omicron.public.dataset + ADD COLUMN IF NOT EXISTS quota INT8, + ADD COLUMN IF NOT EXISTS reservation INT8, + ADD COLUMN IF NOT EXISTS compression TEXT diff --git a/schema/crdb/blueprint-dataset/up02.sql b/schema/crdb/blueprint-dataset/up02.sql new file mode 100644 index 0000000000..a1a0dc7cb7 --- /dev/null +++ b/schema/crdb/blueprint-dataset/up02.sql @@ -0,0 +1,4 @@ +CREATE TYPE IF NOT EXISTS omicron.public.bp_dataset_disposition AS ENUM ( + 'in_service', + 'expunged' +) diff --git a/schema/crdb/blueprint-dataset/up03.sql b/schema/crdb/blueprint-dataset/up03.sql new file mode 100644 index 0000000000..2ce95db275 --- /dev/null +++ b/schema/crdb/blueprint-dataset/up03.sql @@ -0,0 +1,9 @@ +-- description of a collection of omicron datasets stored in a blueprint +CREATE TABLE IF NOT EXISTS omicron.public.bp_sled_omicron_datasets ( + -- foreign key into the `blueprint` table + blueprint_id UUID NOT NULL, + sled_id UUID NOT NULL, + generation INT8 NOT NULL, + + PRIMARY KEY (blueprint_id, sled_id) +) diff --git a/schema/crdb/blueprint-dataset/up04.sql b/schema/crdb/blueprint-dataset/up04.sql new file mode 100644 index 0000000000..121984740c --- /dev/null +++ b/schema/crdb/blueprint-dataset/up04.sql @@ -0,0 +1,35 @@ +-- description of an omicron dataset specified in a blueprint. +CREATE TABLE IF NOT EXISTS omicron.public.bp_omicron_dataset ( + -- foreign key into the `blueprint` table + blueprint_id UUID NOT NULL, + sled_id UUID NOT NULL, + id UUID NOT NULL, + + -- Dataset disposition + disposition omicron.public.bp_dataset_disposition NOT NULL, + + pool_id UUID NOT NULL, + kind omicron.public.dataset_kind NOT NULL, + -- Only valid if kind = zone + zone_name TEXT, + + -- Only valid if kind = crucible + ip INET, + port INT4 CHECK (port BETWEEN 0 AND 65535), + + quota INT8, + reservation INT8, + compression TEXT, + + CONSTRAINT zone_name_for_zone_kind CHECK ( + (kind != 'zone') OR + (kind = 'zone' AND zone_name IS NOT NULL) + ), + + CONSTRAINT ip_and_port_set_for_crucible CHECK ( + (kind != 'crucible') OR + (kind = 'crucible' AND ip IS NOT NULL and port IS NOT NULL) + ), + + PRIMARY KEY (blueprint_id, id) +) diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 7bd738f867..a232ededc0 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -4247,7 +4247,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '87.0.0', NULL) + (TRUE, NOW(), NOW(), '88.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; From aa6025421da9bfc1f6872ed45c50d46d1bf9816d Mon Sep 17 00:00:00 2001 From: Rain Date: Thu, 8 Aug 2024 15:24:13 -0700 Subject: [PATCH 32/84] update schema JSONs, add replace --- clients/sled-agent-client/src/lib.rs | 1 + openapi/nexus-internal.json | 135 +-------------------------- openapi/sled-agent.json | 135 +-------------------------- 3 files changed, 5 insertions(+), 266 deletions(-) diff --git a/clients/sled-agent-client/src/lib.rs b/clients/sled-agent-client/src/lib.rs index 4e7a4a72db..d65174ed3b 100644 --- a/clients/sled-agent-client/src/lib.rs +++ b/clients/sled-agent-client/src/lib.rs @@ -38,6 +38,7 @@ progenitor::generate_api!( replace = { Baseboard = nexus_sled_agent_shared::inventory::Baseboard, ByteCount = omicron_common::api::external::ByteCount, + DatasetKind = omicron_common::api::internal::shared::DatasetKind, DiskIdentity = omicron_common::disk::DiskIdentity, DiskVariant = omicron_common::disk::DiskVariant, Generation = omicron_common::api::external::Generation, diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index cfbe028f9c..7cb401d4bc 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -2591,139 +2591,8 @@ ] }, "DatasetKind": { - "description": "Describes the purpose of the dataset.", - "oneOf": [ - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "cockroachdb" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "crucible" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "clickhouse" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "clickhouse_keeper" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "external_dns" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "internal_dns" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "zone_root" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "zone" - ] - } - }, - "required": [ - "name", - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "debug" - ] - } - }, - "required": [ - "type" - ] - } - ] + "description": "The kind of dataset. See the `DatasetKind` enum in omicron-common for possible values.", + "type": "string" }, "DatasetPutRequest": { "description": "Describes a dataset within a pool.", diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index 82f3c6c3ae..f2c5d127c1 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -2205,139 +2205,8 @@ ] }, "DatasetKind": { - "description": "Describes the purpose of the dataset.", - "oneOf": [ - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "cockroachdb" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "crucible" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "clickhouse" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "clickhouse_keeper" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "external_dns" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "internal_dns" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "zone_root" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "zone" - ] - } - }, - "required": [ - "name", - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "debug" - ] - } - }, - "required": [ - "type" - ] - } - ] + "description": "The kind of dataset. See the `DatasetKind` enum in omicron-common for possible values.", + "type": "string" }, "DatasetManagementStatus": { "description": "Identifies how a single dataset management operation may have succeeded or failed.", From 58835271968746db4cc64e09062347ca216406fc Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 8 Aug 2024 17:06:56 -0700 Subject: [PATCH 33/84] reconfigurator execution tests --- nexus/db-model/src/dataset.rs | 27 ++ .../reconfigurator/execution/src/datasets.rs | 374 +++++++++++++++--- 2 files changed, 345 insertions(+), 56 deletions(-) diff --git a/nexus/db-model/src/dataset.rs b/nexus/db-model/src/dataset.rs index a757dbc7b8..aa4810014e 100644 --- a/nexus/db-model/src/dataset.rs +++ b/nexus/db-model/src/dataset.rs @@ -8,6 +8,7 @@ use crate::ipv6; use crate::schema::{dataset, region}; use chrono::{DateTime, Utc}; use db_macros::Asset; +use nexus_types::deployment::BlueprintDatasetConfig; use omicron_common::api::external::Error; use omicron_common::api::internal::shared::DatasetKind as ApiDatasetKind; use omicron_uuid_kinds::DatasetUuid; @@ -92,6 +93,32 @@ impl Dataset { } } +impl From for Dataset { + fn from(bp: BlueprintDatasetConfig) -> Self { + let kind = DatasetKind::from(&bp.kind); + let (size_used, zone_name) = match bp.kind { + ApiDatasetKind::Crucible => (Some(0), None), + ApiDatasetKind::Zone { name } => (None, Some(name)), + _ => (None, None), + }; + let addr = bp.address; + Self { + identity: DatasetIdentity::new(bp.id.into_untyped_uuid()), + time_deleted: None, + rcgen: Generation::new(), + pool_id: bp.pool.id().into_untyped_uuid(), + kind, + ip: addr.map(|addr| addr.ip().into()), + port: addr.map(|addr| addr.port().into()), + size_used, + zone_name, + quota: bp.quota.map(ByteCount::from), + reservation: bp.reservation.map(ByteCount::from), + compression: bp.compression, + } + } +} + impl TryFrom for omicron_common::disk::DatasetConfig { type Error = Error; diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index fbfcc16af0..b4e8a26968 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -107,6 +107,13 @@ pub(crate) async fn deploy_datasets( } } +#[allow(dead_code)] +pub(crate) struct EnsureDatasetsResult { + pub(crate) inserted: usize, + pub(crate) updated: usize, + pub(crate) removed: usize, +} + /// For all datasets we expect to see in the blueprint, ensure that a corresponding /// database record exists in `datastore`. /// @@ -116,14 +123,14 @@ pub(crate) async fn ensure_dataset_records_exist( opctx: &OpContext, datastore: &DataStore, bp_datasets: impl Iterator, -) -> anyhow::Result { +) -> anyhow::Result { // Before attempting to insert any datasets, first query for any existing // dataset records so we can filter them out. This looks like a typical // TOCTOU issue, but it is purely a performance optimization. We expect // almost all executions of this function to do nothing: new datasets are // created very rarely relative to how frequently blueprint realization // happens. We could remove this check and filter and instead run the below - // "insert if not exists" query on every zone, and the behavior would still + // "insert if not exists" query on every dataset, and the behavior would still // be correct. However, that would issue far more queries than necessary in // the very common case of "we don't need to do anything at all". let mut existing_datasets = datastore @@ -152,25 +159,21 @@ pub(crate) async fn ensure_dataset_records_exist( // If this dataset already exists, only update it if it appears different from what exists // in the database already. let action = if let Some(db_dataset) = existing_datasets.remove(&id) { - let db_dataset: DatasetConfig = db_dataset.try_into()?; + let db_config: DatasetConfig = db_dataset.try_into()?; + let bp_config: DatasetConfig = bp_dataset.clone().into(); - if db_dataset == bp_dataset.clone().into() { + if db_config == bp_config { num_unchanged += 1; continue; } num_updated += 1; "update" } else { + num_inserted += 1; "insert" }; - let address = bp_dataset.address; - let dataset = Dataset::new( - id.into_untyped_uuid(), - bp_dataset.pool.id().into_untyped_uuid(), - address, - kind.clone(), - ); + let dataset = Dataset::from(bp_dataset.clone()); datastore.dataset_upsert(dataset).await.with_context(|| { format!("failed to upsert dataset record for dataset {id}") })?; @@ -182,16 +185,8 @@ pub(crate) async fn ensure_dataset_records_exist( "id" => %id, "kind" => ?kind, ); - num_inserted += 1; } - // TODO: I know we don't want to actually expunge crucible zones, but unclear - // where that decision SHOULD be made? - // - // --> Actually, idk about this. We should clearly read the disposition to - // decide which datasets to delete, but I think we need some - // planner/executor coordination to punt on Crucible. - for bp_dataset in unwanted_datasets { if existing_datasets.remove(&bp_dataset.id).is_some() { if matches!( @@ -214,7 +209,7 @@ pub(crate) async fn ensure_dataset_records_exist( // We support removing expunged datasets - if we read a dataset that hasn't // been explicitly expunged, log this as an oddity. // - // This could be possible in rare conditions where multiple Nexuses are executing + // This could be possible in conditions where multiple Nexuses are executing // distinct blueprints. if !existing_datasets.is_empty() { warn!( @@ -234,7 +229,11 @@ pub(crate) async fn ensure_dataset_records_exist( "num_removed" => num_removed, ); - Ok(num_inserted) + Ok(EnsureDatasetsResult { + inserted: num_inserted, + updated: num_updated, + removed: num_removed, + }) } #[cfg(test)] @@ -243,7 +242,9 @@ mod tests { use nexus_db_model::Zpool; use nexus_reconfigurator_planning::example::example; use nexus_test_utils_macros::nexus_test; + use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintZoneFilter; + use omicron_common::api::external::ByteCount; use omicron_common::api::internal::shared::DatasetKind; use omicron_common::zpool_name::ZpoolName; use omicron_uuid_kinds::GenericUuid; @@ -253,11 +254,33 @@ mod tests { type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; + fn get_all_datasets_from_zones( + blueprint: &Blueprint, + ) -> Vec { + blueprint + .all_omicron_zones(BlueprintZoneFilter::All) + .filter_map(|(_, zone)| { + if let Some(dataset) = zone.zone_type.durable_dataset() { + Some(BlueprintDatasetConfig { + disposition: BlueprintDatasetDisposition::InService, + id: DatasetUuid::new_v4(), + pool: dataset.dataset.pool_name.clone(), + kind: dataset.kind, + address: Some(dataset.address), + quota: None, + reservation: None, + compression: None, + }) + } else { + None + } + }) + .collect::>() + } + #[nexus_test] - async fn test_ensure_dataset_records_exist( - cptestctx: &ControlPlaneTestContext, - ) { - const TEST_NAME: &str = "test_ensure_dataset_records_exist"; + async fn test_dataset_record_create(cptestctx: &ControlPlaneTestContext) { + const TEST_NAME: &str = "test_dataset_record_create"; // Set up. let nexus = &cptestctx.server.server_context().nexus; @@ -292,37 +315,22 @@ mod tests { // // Finding these datasets is normally the responsibility of the planner, // but we're kinda hand-rolling it. - let all_datasets = blueprint - .all_omicron_zones(BlueprintZoneFilter::All) - .filter_map(|(_, zone)| { - if let Some(dataset) = zone.zone_type.durable_dataset() { - Some(BlueprintDatasetConfig { - disposition: BlueprintDatasetDisposition::InService, - id: DatasetUuid::new_v4(), - pool: dataset.dataset.pool_name.clone(), - kind: dataset.kind, - address: Some(dataset.address), - quota: None, - reservation: None, - compression: None, - }) - } else { - None - } - }) - .collect::>(); + let all_datasets = get_all_datasets_from_zones(&blueprint); // How many zones are there with durable datasets? let nzones_with_durable_datasets = all_datasets.len(); + assert!(nzones_with_durable_datasets > 0); - let ndatasets_inserted = + let EnsureDatasetsResult { inserted, updated, removed } = ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) .await .expect("failed to ensure datasets"); // We should have inserted a dataset for each zone with a durable // dataset. - assert_eq!(nzones_with_durable_datasets, ndatasets_inserted); + assert_eq!(inserted, nzones_with_durable_datasets); + assert_eq!(updated, 0); + assert_eq!(removed, 0); assert_eq!( datastore .dataset_list_all_batched(opctx, None) @@ -333,11 +341,13 @@ mod tests { ); // Ensuring the same datasets again should insert no new records. - let ndatasets_inserted = + let EnsureDatasetsResult { inserted, updated, removed } = ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) .await .expect("failed to ensure datasets"); - assert_eq!(0, ndatasets_inserted); + assert_eq!(inserted, 0); + assert_eq!(updated, 0); + assert_eq!(removed, 0); assert_eq!( datastore .dataset_list_all_batched(opctx, None) @@ -388,14 +398,17 @@ mod tests { }, ]; - let ndatasets_inserted = ensure_dataset_records_exist( - opctx, - datastore, - all_datasets.iter().chain(&new_zones), - ) - .await - .expect("failed to ensure datasets"); - assert_eq!(ndatasets_inserted, 2); + let EnsureDatasetsResult { inserted, updated, removed } = + ensure_dataset_records_exist( + opctx, + datastore, + all_datasets.iter().chain(&new_zones), + ) + .await + .expect("failed to ensure datasets"); + assert_eq!(inserted, 2); + assert_eq!(updated, 0); + assert_eq!(removed, 0); assert_eq!( datastore .dataset_list_all_batched(opctx, None) @@ -405,4 +418,253 @@ mod tests { nzones_with_durable_datasets + 2, ); } + + #[nexus_test] + async fn test_dataset_records_update(cptestctx: &ControlPlaneTestContext) { + const TEST_NAME: &str = "test_dataset_records_update"; + + // Set up. + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + let opctx = &opctx; + + // Use the standard example system. + let (_, _, blueprint) = example(&opctx.log, TEST_NAME, 5); + + // Record the sleds and zpools. + crate::tests::insert_sled_records(datastore, &blueprint).await; + crate::tests::create_disks_for_zones_using_datasets( + datastore, opctx, &blueprint, + ) + .await; + + let mut all_datasets = get_all_datasets_from_zones(&blueprint); + let EnsureDatasetsResult { inserted, updated, removed } = + ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) + .await + .expect("failed to ensure datasets"); + assert_eq!(inserted, all_datasets.len()); + assert_eq!(updated, 0); + assert_eq!(removed, 0); + + // These values don't *really* matter, we just want to make sure we can + // change them and see the update. + let first_dataset = &mut all_datasets[0]; + assert_eq!(first_dataset.quota, None); + assert_eq!(first_dataset.reservation, None); + assert_eq!(first_dataset.compression, None); + + first_dataset.quota = Some(ByteCount::from_kibibytes_u32(1)); + first_dataset.reservation = Some(ByteCount::from_kibibytes_u32(2)); + first_dataset.compression = Some(String::from("pied_piper")); + let _ = first_dataset; + + // Update the datastore + let EnsureDatasetsResult { inserted, updated, removed } = + ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) + .await + .expect("failed to ensure datasets"); + assert_eq!(inserted, 0); + assert_eq!(updated, 1); + assert_eq!(removed, 0); + + // Observe that the update stuck + let observed_datasets = + datastore.dataset_list_all_batched(opctx, None).await.unwrap(); + let first_dataset = &mut all_datasets[0]; + let observed_dataset = observed_datasets + .into_iter() + .find(|dataset| { + dataset.id() == first_dataset.id.into_untyped_uuid() + }) + .expect("Couldn't find dataset we tried to update?"); + let observed_dataset: DatasetConfig = + observed_dataset.try_into().unwrap(); + assert_eq!( + observed_dataset.quota, + first_dataset.quota.map(|q| q.to_bytes()) + ); + assert_eq!( + observed_dataset.reservation, + first_dataset.reservation.map(|r| r.to_bytes()) + ); + assert_eq!(observed_dataset.compression, first_dataset.compression); + } + + #[nexus_test] + async fn test_dataset_records_delete(cptestctx: &ControlPlaneTestContext) { + const TEST_NAME: &str = "test_dataset_records_delete"; + + // Set up. + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + let opctx = &opctx; + + // Use the standard example system. + let (_, _, blueprint) = example(&opctx.log, TEST_NAME, 5); + + // Record the sleds and zpools. + crate::tests::insert_sled_records(datastore, &blueprint).await; + crate::tests::create_disks_for_zones_using_datasets( + datastore, opctx, &blueprint, + ) + .await; + + let mut all_datasets = get_all_datasets_from_zones(&blueprint); + + // Ensure that a non-crucible dataset exists + all_datasets.push(BlueprintDatasetConfig { + disposition: BlueprintDatasetDisposition::InService, + id: DatasetUuid::new_v4(), + pool: all_datasets[0].pool.clone(), + kind: DatasetKind::Debug, + address: None, + quota: None, + reservation: None, + compression: None, + }); + let EnsureDatasetsResult { inserted, updated, removed } = + ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) + .await + .expect("failed to ensure datasets"); + assert_eq!(inserted, all_datasets.len()); + assert_eq!(updated, 0); + assert_eq!(removed, 0); + + // Expunge two datasets -- one for Crucible, and one for any other + // service. + + let crucible_dataset = all_datasets + .iter_mut() + .find(|dataset| matches!(dataset.kind, DatasetKind::Crucible)) + .expect("No crucible dataset found"); + assert_eq!( + crucible_dataset.disposition, + BlueprintDatasetDisposition::InService + ); + crucible_dataset.disposition = BlueprintDatasetDisposition::Expunged; + let crucible_dataset_id = crucible_dataset.id; + let _ = crucible_dataset; + + let non_crucible_dataset = all_datasets + .iter_mut() + .find(|dataset| !matches!(dataset.kind, DatasetKind::Crucible)) + .expect("No non-crucible dataset found"); + assert_eq!( + non_crucible_dataset.disposition, + BlueprintDatasetDisposition::InService + ); + non_crucible_dataset.disposition = + BlueprintDatasetDisposition::Expunged; + let non_crucible_dataset_id = non_crucible_dataset.id; + let _ = non_crucible_dataset; + + // Observe that we only remove one dataset. + // + // This is a property of "special-case handling" of the Crucible + // dataset, where we punt the deletion to a background task. + + let EnsureDatasetsResult { inserted, updated, removed } = + ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) + .await + .expect("failed to ensure datasets"); + assert_eq!(inserted, 0); + assert_eq!(updated, 0); + assert_eq!(removed, 1); + + // Make sure the Crucible dataset still exists, even if the other + // dataset got deleted. + + let observed_datasets = + datastore.dataset_list_all_batched(opctx, None).await.unwrap(); + assert!(observed_datasets + .iter() + .any(|d| d.id() == crucible_dataset_id.into_untyped_uuid())); + assert!(!observed_datasets + .iter() + .any(|d| d.id() == non_crucible_dataset_id.into_untyped_uuid())); + } + + #[nexus_test] + async fn test_dataset_record_blueprint_removal_without_expunging( + cptestctx: &ControlPlaneTestContext, + ) { + const TEST_NAME: &str = + "test_dataset_record_blueprint_removal_without_expunging"; + + // Set up. + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + let opctx = &opctx; + + // Use the standard example system. + let (_, _, blueprint) = example(&opctx.log, TEST_NAME, 5); + + // Record the sleds and zpools. + crate::tests::insert_sled_records(datastore, &blueprint).await; + crate::tests::create_disks_for_zones_using_datasets( + datastore, opctx, &blueprint, + ) + .await; + + let mut all_datasets = get_all_datasets_from_zones(&blueprint); + + // Ensure that a deletable dataset exists + let dataset_id = DatasetUuid::new_v4(); + all_datasets.push(BlueprintDatasetConfig { + disposition: BlueprintDatasetDisposition::InService, + id: dataset_id, + pool: all_datasets[0].pool.clone(), + kind: DatasetKind::Debug, + address: None, + quota: None, + reservation: None, + compression: None, + }); + + let EnsureDatasetsResult { inserted, updated, removed } = + ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) + .await + .expect("failed to ensure datasets"); + assert_eq!(inserted, all_datasets.len()); + assert_eq!(updated, 0); + assert_eq!(removed, 0); + + // Rather than expunging a dataset, which is the normal way to "delete" + // a dataset, we'll just remove it from the "blueprint". + // + // This situation mimics a scenario where we are an "old Nexus, + // executing and old blueprint" - more datasets might be created + // concurrently with our execution, and we should leave them alone. + assert_eq!(dataset_id, all_datasets.pop().unwrap().id); + + // Observe that no datasets are removed. + let EnsureDatasetsResult { inserted, updated, removed } = + ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) + .await + .expect("failed to ensure datasets"); + assert_eq!(inserted, 0); + assert_eq!(updated, 0); + assert_eq!(removed, 0); + + // Make sure the dataset still exists, even if it isn't tracked by our + // "blueprint". + let observed_datasets = + datastore.dataset_list_all_batched(opctx, None).await.unwrap(); + assert!(observed_datasets + .iter() + .any(|d| d.id() == dataset_id.into_untyped_uuid())); + } } From 1a5684c854bc15170ad09f419d9f58496e1dbcca Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 9 Aug 2024 18:39:58 -0700 Subject: [PATCH 34/84] Datasets in input, test for sled_ensure_datasets, more bp verification --- dev-tools/reconfigurator-cli/src/main.rs | 14 +- .../db-queries/src/db/datastore/deployment.rs | 7 +- nexus/reconfigurator/execution/src/dns.rs | 7 +- .../planning/src/blueprint_builder/builder.rs | 335 ++++++++++++++++-- .../planning/src/blueprint_builder/zones.rs | 7 + nexus/reconfigurator/planning/src/example.rs | 21 ++ nexus/reconfigurator/planning/src/planner.rs | 24 +- .../planner_decommissions_sleds_bp2.txt | 2 +- .../output/planner_nonprovisionable_bp2.txt | 2 +- 9 files changed, 376 insertions(+), 43 deletions(-) diff --git a/dev-tools/reconfigurator-cli/src/main.rs b/dev-tools/reconfigurator-cli/src/main.rs index 6d505d8b7d..f29fb05bb2 100644 --- a/dev-tools/reconfigurator-cli/src/main.rs +++ b/dev-tools/reconfigurator-cli/src/main.rs @@ -754,7 +754,12 @@ fn cmd_blueprint_edit( .context("failed to add Nexus zone")?; assert_matches::assert_matches!( added, - EnsureMultiple::Changed { added: 1, updated: 0, removed: 0 } + EnsureMultiple::Changed { + added: 1, + updated: 0, + expunged: 0, + removed: 0 + } ); format!("added Nexus zone to sled {}", sled_id) } @@ -766,7 +771,12 @@ fn cmd_blueprint_edit( .context("failed to add CockroachDB zone")?; assert_matches::assert_matches!( added, - EnsureMultiple::Changed { added: 1, updated: 0, removed: 0 } + EnsureMultiple::Changed { + added: 1, + updated: 0, + expunged: 0, + removed: 0 + } ); format!("added CockroachDB zone to sled {}", sled_id) } diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index ab63913707..7b2c327b94 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -1828,7 +1828,12 @@ mod tests { .clone(), ) .unwrap(), - EnsureMultiple::Changed { added: 4, updated: 0, removed: 0 } + EnsureMultiple::Changed { + added: 4, + updated: 0, + expunged: 0, + removed: 0 + } ); // Add zones to our new sled. diff --git a/nexus/reconfigurator/execution/src/dns.rs b/nexus/reconfigurator/execution/src/dns.rs index c37433cfb2..412be3ea9f 100644 --- a/nexus/reconfigurator/execution/src/dns.rs +++ b/nexus/reconfigurator/execution/src/dns.rs @@ -1298,7 +1298,12 @@ mod test { .unwrap(); assert_eq!( rv, - EnsureMultiple::Changed { added: 1, updated: 0, removed: 0 } + EnsureMultiple::Changed { + added: 1, + updated: 0, + expunged: 0, + removed: 0 + } ); let blueprint2 = builder.build(); eprintln!("blueprint2: {}", blueprint2.display()); diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 44cee92a28..cdc671e71e 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -126,8 +126,17 @@ pub enum Ensure { /// actions taken or no action was necessary #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum EnsureMultiple { - /// action was taken, and multiple items were added - Changed { added: usize, updated: usize, removed: usize }, + /// action was taken within the operation + Changed { + /// An item was added to the blueprint + added: usize, + /// An item was updated within the blueprint + updated: usize, + /// An item was expunged in the blueprint + expunged: usize, + /// An item was removed from the blueprint + removed: usize, + }, /// no action was necessary NotNeeded, @@ -154,6 +163,7 @@ pub(crate) enum Operation { sled_id: SledUuid, added: usize, updated: usize, + expunged: usize, removed: usize, }, ZoneExpunged { @@ -172,8 +182,14 @@ impl fmt::Display for Operation { Self::UpdateDisks { sled_id, added, updated, removed } => { write!(f, "sled {sled_id}: added {added} disks, updated {updated}, removed {removed} disks") } - Self::UpdateDatasets { sled_id, added, updated, removed } => { - write!(f, "sled {sled_id}: added {added} datasets, updated: {updated}, removed {removed} datasets") + Self::UpdateDatasets { + sled_id, + added, + updated, + expunged, + removed, + } => { + write!(f, "sled {sled_id}: added {added} datasets, updated: {updated}, expunged {expunged}, removed {removed} datasets") } Self::ZoneExpunged { sled_id, reason, count } => { let reason = match reason { @@ -196,6 +212,10 @@ impl fmt::Display for Operation { } } +fn zone_name(zone: &BlueprintZoneConfig) -> String { + format!("oxz_{}_{}", zone.zone_type.kind().zone_prefix(), zone.id,) +} + /// Helper for assembling a blueprint /// /// There are two basic ways to assemble a new blueprint: @@ -646,7 +666,7 @@ impl<'a> BlueprintBuilder<'a> { !removals.contains(&PhysicalDiskUuid::from_untyped_uuid(config.id)) }); - Ok(EnsureMultiple::Changed { added, updated: 0, removed }) + Ok(EnsureMultiple::Changed { added, updated: 0, expunged: 0, removed }) } /// Ensures that a sled in the blueprint has all the datasets it should. @@ -688,8 +708,11 @@ impl<'a> BlueprintBuilder<'a> { ); // Ensure each zpool has a "Debug" and "Zone Root" dataset. - let bp_zpools = - datasets_builder.all_bp_zpools().collect::>(); + let bp_zpools = self + .disks + .current_sled_disks(sled_id) + .map(|disk_config| disk_config.pool_id) + .collect::>(); for zpool_id in bp_zpools { let zpool = ZpoolName::new_external(zpool_id); let address = None; @@ -720,11 +743,7 @@ impl<'a> BlueprintBuilder<'a> { // Dataset for transient zone filesystem if let Some(fs_zpool) = &zone.filesystem_pool { - let name = format!( - "oxp_{}_{}", - zone.zone_type.kind().zone_prefix(), - zone.id, - ); + let name = zone_name(&zone); let address = None; datasets_builder.ensure( DatasetName::new( @@ -787,13 +806,13 @@ impl<'a> BlueprintBuilder<'a> { } let added = additions.len(); let updated = updates.len(); - // This is a little overloaded, but: // - When a dataset is expunged, for whatever reason, it is a part of // "expunges". This leads to it getting removed from a sled. // - When we know that we've safely destroyed all traces of the dataset, // it becomes a part of "removals". This means we can remove it from the // blueprint. - let removed = expunges.len() + removals.len(); + let expunged = expunges.len(); + let removed = removals.len(); let datasets = &mut self.datasets.change_sled_datasets(sled_id).datasets; @@ -831,8 +850,7 @@ impl<'a> BlueprintBuilder<'a> { // We sort in the call to "BlueprintDatasetsBuilder::into_datasets_map", // so we don't need to sort "datasets" now. - - Ok(EnsureMultiple::Changed { added, updated, removed }) + Ok(EnsureMultiple::Changed { added, updated, expunged, removed }) } pub fn sled_ensure_zone_ntp( @@ -1086,6 +1104,7 @@ impl<'a> BlueprintBuilder<'a> { Ok(EnsureMultiple::Changed { added: num_nexus_to_add, updated: 0, + expunged: 0, removed: 0, }) } @@ -1146,6 +1165,7 @@ impl<'a> BlueprintBuilder<'a> { Ok(EnsureMultiple::Changed { added: num_crdb_to_add, updated: 0, + expunged: 0, removed: 0, }) } @@ -1577,6 +1597,7 @@ impl<'a> BlueprintDatasetsBuilder<'a> { } /// Helper for working with sets of datasets on a single sled +#[derive(Debug)] struct BlueprintSledDatasetsBuilder<'a> { log: Logger, blueprint_datasets: @@ -1584,6 +1605,8 @@ struct BlueprintSledDatasetsBuilder<'a> { database_datasets: BTreeMap>, + // TODO: Could combine these maps? + // Datasets which are unchanged from the prior blueprint unchanged_datasets: BTreeMap>, @@ -1729,10 +1752,17 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { return false; }; - let mut removals = BTreeSet::new(); + let mut expunges = BTreeSet::new(); for (zpool_id, datasets) in &self.blueprint_datasets { for (_dataset_kind, dataset_config) in datasets { + match dataset_config.disposition { + // Already expunged; ignore + BlueprintDatasetDisposition::Expunged => continue, + // Potentially expungeable + BlueprintDatasetDisposition::InService => (), + }; + let dataset_id = dataset_config.id; if !dataset_exists_in(&self.new_datasets, *zpool_id, dataset_id) && !dataset_exists_in( @@ -1747,12 +1777,12 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { ) { info!(self.log, "dataset expungeable (not needed in blueprint)"; "id" => ?dataset_id); - removals.insert(dataset_id); + expunges.insert(dataset_id); } } } - removals + expunges } /// Returns all datasets that have been expunged in a prior blueprint, @@ -1782,14 +1812,13 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { let mut removals = BTreeSet::new(); for (zpool_id, datasets) in &self.blueprint_datasets { for (_kind, config) in datasets { - if matches!( - config.disposition, - BlueprintDatasetDisposition::Expunged - ) && !dataset_exists_in( - &self.database_datasets, - *zpool_id, - config.id, - ) { + if config.disposition == BlueprintDatasetDisposition::Expunged + && !dataset_exists_in( + &self.database_datasets, + *zpool_id, + config.id, + ) + { info!(self.log, "dataset removable (expunged, not in database)"; "id" => ?config.id); removals.insert(config.id); } @@ -1798,10 +1827,6 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { removals } - pub fn all_bp_zpools(&self) -> impl Iterator + '_ { - self.blueprint_datasets.keys().map(|id| *id) - } - fn get_from_bp( &self, zpool: ZpoolUuid, @@ -1843,6 +1868,33 @@ pub mod test { pub const DEFAULT_N_SLEDS: usize = 3; + fn datasets_for_sled( + blueprint: &Blueprint, + sled_id: SledUuid, + ) -> &BTreeMap { + &blueprint + .blueprint_datasets + .get(&sled_id) + .unwrap_or_else(|| { + panic!("Cannot find datasets on missing sled: {sled_id}") + }) + .datasets + } + + fn find_dataset<'a>( + datasets: &'a BTreeMap, + zpool: &ZpoolName, + kind: DatasetKind, + ) -> &'a BlueprintDatasetConfig { + datasets.values().find(|dataset| { + &dataset.pool == zpool && + dataset.kind == kind + }).unwrap_or_else(|| { + let kinds = datasets.values().map(|d| (&d.id, &d.pool, &d.kind)).collect::>(); + panic!("Cannot find dataset of type {kind}\nFound the following: {kinds:#?}") + }) + } + /// Checks various conditions that should be true for all blueprints pub fn verify_blueprint(blueprint: &Blueprint) { // There should be no duplicate underlay IPs. @@ -1892,6 +1944,59 @@ pub mod test { } } } + + // All commissioned disks should have debug and zone root datasets. + for (sled_id, disk_config) in &blueprint.blueprint_disks { + for disk in &disk_config.disks { + let zpool = ZpoolName::new_external(disk.pool_id); + let datasets = datasets_for_sled(&blueprint, *sled_id); + + let dataset = + find_dataset(&datasets, &zpool, DatasetKind::Debug); + assert_eq!( + dataset.disposition, + BlueprintDatasetDisposition::InService + ); + let dataset = + find_dataset(&datasets, &zpool, DatasetKind::ZoneRoot); + assert_eq!( + dataset.disposition, + BlueprintDatasetDisposition::InService + ); + } + } + // All zones should have dataset records. + for (sled_id, zone_config) in + blueprint.all_omicron_zones(BlueprintZoneFilter::ShouldBeRunning) + { + match blueprint.sled_state.get(&sled_id) { + // Decommissioned sleds don't keep dataset state around + None | Some(SledState::Decommissioned) => continue, + Some(SledState::Active) => (), + } + + let datasets = datasets_for_sled(&blueprint, sled_id); + + let zpool = zone_config.filesystem_pool.as_ref().unwrap(); + let kind = DatasetKind::Zone { name: zone_name(&zone_config) }; + let dataset = find_dataset(&datasets, &zpool, kind); + assert_eq!( + dataset.disposition, + BlueprintDatasetDisposition::InService + ); + + if let Some(durable_dataset) = + zone_config.zone_type.durable_dataset() + { + let zpool = &durable_dataset.dataset.pool_name; + let dataset = + find_dataset(&datasets, &zpool, durable_dataset.kind); + assert_eq!( + dataset.disposition, + BlueprintDatasetDisposition::InService + ); + } + } } #[test] @@ -2001,6 +2106,7 @@ pub mod test { for pool_id in new_sled_resources.zpools.keys() { builder.sled_ensure_zone_crucible(new_sled_id, *pool_id).unwrap(); } + builder.sled_ensure_datasets(new_sled_id, new_sled_resources).unwrap(); let blueprint3 = builder.build(); verify_blueprint(&blueprint3); @@ -2209,6 +2315,7 @@ pub mod test { EnsureMultiple::Changed { added: 10, updated: 0, + expunged: 0, removed: 0 }, ); @@ -2246,6 +2353,156 @@ pub mod test { logctx.cleanup_successful(); } + #[test] + fn test_datasets_for_zpools_and_zones() { + static TEST_NAME: &str = "test_datasets_for_zpools_and_zones"; + let logctx = test_setup_log(TEST_NAME); + let (_, input, blueprint) = + example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); + + // Creating the "example" blueprint should already invoke + // `sled_ensure_datasets`. + // + // Verify that it has created the datasets we expect to exist. + verify_blueprint(&blueprint); + + let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, + &blueprint, + &input, + "test", + ) + .expect("failed to create builder"); + + // Before we make any modifications, there should be no work to do. + // + // If we haven't changed inputs, the output should be the same! + for (sled_id, resources) in + input.all_sled_resources(SledFilter::InService) + { + let r = builder.sled_ensure_datasets(sled_id, resources).unwrap(); + assert_eq!(r, EnsureMultiple::NotNeeded); + } + + // Expunge a zone from the blueprint, observe that the dataset is + // removed. + let sled_id = input + .all_sled_ids(SledFilter::Commissioned) + .next() + .expect("at least one sled present"); + let sled_resources = input.sled_resources(&sled_id).unwrap(); + let crucible_zone_id = builder + .zones + .current_sled_zones(sled_id) + .find_map(|(zone_config, _)| { + if zone_config.disposition + == BlueprintZoneDisposition::InService + && zone_config.zone_type.is_crucible() + { + return Some(zone_config.id); + } + None + }) + .expect("at least one crucible must be present"); + let change = builder.zones.change_sled_zones(sled_id); + println!("Expunging crucible zone: {crucible_zone_id}"); + change.expunge_zones(BTreeSet::from([crucible_zone_id])).unwrap(); + + // In the case of Crucible, we have a durable dataset and a transient + // zone filesystem, so we expect two datasets to be expunged. + let r = builder.sled_ensure_datasets(sled_id, sled_resources).unwrap(); + assert_eq!( + r, + EnsureMultiple::Changed { + added: 0, + updated: 0, + expunged: 2, + removed: 0 + } + ); + // Once the datasets are expunged, no further changes will be proposed. + let r = builder.sled_ensure_datasets(sled_id, sled_resources).unwrap(); + assert_eq!(r, EnsureMultiple::NotNeeded); + + let blueprint = builder.build(); + verify_blueprint(&blueprint); + + let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, + &blueprint, + &input, + "test", + ) + .expect("failed to create builder"); + + // While the datasets still exist in the input (effectively, the db) we + // cannot remove them. + let r = builder.sled_ensure_datasets(sled_id, sled_resources).unwrap(); + assert_eq!(r, EnsureMultiple::NotNeeded); + + let blueprint = builder.build(); + verify_blueprint(&blueprint); + + // Find the datasets we've expunged in the blueprint + let expunged_datasets = blueprint + .blueprint_datasets + .get(&sled_id) + .unwrap() + .datasets + .values() + .filter_map(|dataset_config| { + if dataset_config.disposition + == BlueprintDatasetDisposition::Expunged + { + Some(dataset_config.id) + } else { + None + } + }) + .collect::>(); + // We saw two datasets being expunged earlier -- validate that + assert_eq!(expunged_datasets.len(), 2); + + // Remove these two datasets from the input. + let mut input_builder = input.into_builder(); + let zpools = &mut input_builder + .sleds_mut() + .get_mut(&sled_id) + .unwrap() + .resources + .zpools; + for (_, (_, datasets)) in zpools { + datasets.retain(|dataset| !expunged_datasets.contains(&dataset.id)); + } + let input = input_builder.build(); + + let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, + &blueprint, + &input, + "test", + ) + .expect("failed to create builder"); + + // Now, we should see the datasets "removed" from the blueprint, since + // we no longer need to keep around records of their expungement. + let sled_resources = input.sled_resources(&sled_id).unwrap(); + let r = builder.sled_ensure_datasets(sled_id, sled_resources).unwrap(); + assert_eq!( + r, + EnsureMultiple::Changed { + added: 0, + updated: 0, + expunged: 0, + removed: 2 + } + ); + let r = builder.sled_ensure_datasets(sled_id, sled_resources).unwrap(); + assert_eq!(r, EnsureMultiple::NotNeeded); + + logctx.cleanup_successful(); + } + #[test] fn test_add_nexus_with_no_existing_nexus_zones() { static TEST_NAME: &str = @@ -2385,7 +2642,12 @@ pub mod test { assert_eq!( added, - EnsureMultiple::Changed { added: 1, updated: 0, removed: 0 } + EnsureMultiple::Changed { + added: 1, + updated: 0, + expunged: 0, + removed: 0 + } ); } @@ -2406,7 +2668,12 @@ pub mod test { assert_eq!( added, - EnsureMultiple::Changed { added: 3, updated: 0, removed: 0 } + EnsureMultiple::Changed { + added: 3, + updated: 0, + expunged: 0, + removed: 0 + } ); } @@ -2666,9 +2933,11 @@ pub mod test { EnsureMultiple::Changed { added: num_sled_zpools, updated: 0, + expunged: 0, removed: 0 } ); + builder.sled_ensure_datasets(target_sled_id, sled_resources).unwrap(); let blueprint = builder.build(); verify_blueprint(&blueprint); diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs b/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs index 32ab345c22..abd142befe 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs @@ -389,6 +389,13 @@ mod tests { } ); + // Ensure all datasets are created for the zones we've provisioned + for (sled_id, resources) in + input2.all_sled_resources(SledFilter::Commissioned) + { + builder.sled_ensure_datasets(sled_id, resources).unwrap(); + } + // Now build the blueprint and ensure that all the changes we described // above are present. let blueprint = builder.build(); diff --git a/nexus/reconfigurator/planning/src/example.rs b/nexus/reconfigurator/planning/src/example.rs index 27ed87f652..9cb7cd38cb 100644 --- a/nexus/reconfigurator/planning/src/example.rs +++ b/nexus/reconfigurator/planning/src/example.rs @@ -131,6 +131,27 @@ impl ExampleSystem { .unwrap(); } + // Ensure that our "input" contains the datasets we would have + // provisioned. + // + // This mimics them existing within the database. + let input_sleds = input_builder.sleds_mut(); + for (sled_id, bp_datasets_config) in &blueprint.blueprint_datasets { + let sled = input_sleds.get_mut(sled_id).unwrap(); + for (_, bp_dataset) in &bp_datasets_config.datasets { + let (_, datasets) = sled + .resources + .zpools + .get_mut(&bp_dataset.pool.id()) + .unwrap(); + let bp_config: omicron_common::disk::DatasetConfig = + bp_dataset.clone().into(); + if !datasets.contains(&bp_config) { + datasets.push(bp_config); + } + } + } + ExampleSystem { system, input: input_builder.build(), diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 7420bf4a89..9f8723a45b 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -230,8 +230,12 @@ impl<'a> Planner<'a> { { // First, we need to ensure that sleds are using their expected // disks. This is necessary before we can allocate any zones. - if let EnsureMultiple::Changed { added, updated, removed } = - self.blueprint.sled_ensure_disks(sled_id, &sled_resources)? + if let EnsureMultiple::Changed { + added, + updated, + expunged: _, + removed, + } = self.blueprint.sled_ensure_disks(sled_id, &sled_resources)? { info!( &self.log, @@ -353,7 +357,12 @@ impl<'a> Planner<'a> { for (sled_id, sled_resources) in self.input.all_sled_resources(SledFilter::InService) { - if let EnsureMultiple::Changed { added, updated, removed } = + if let EnsureMultiple::Changed { + added, + updated, + expunged, + removed, + } = self.blueprint.sled_ensure_datasets(sled_id, &sled_resources)? { info!( @@ -362,12 +371,14 @@ impl<'a> Planner<'a> { "sled_id" => %sled_id, "added" => added, "updated" => updated, + "expunged" => expunged, "removed" => removed, ); self.blueprint.record_operation(Operation::UpdateDatasets { sled_id, added, updated, + expunged, removed, }); } @@ -548,7 +559,12 @@ impl<'a> Planner<'a> { } }; match result { - EnsureMultiple::Changed { added, updated: _, removed: _ } => { + EnsureMultiple::Changed { + added, + updated: _, + expunged: _, + removed: _, + } => { info!( self.log, "will add {added} Nexus zone(s) to sled"; "sled_id" => %sled_id, diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt index fda6f3cbb5..d4600586d4 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt @@ -104,7 +104,7 @@ WARNING: Zones exist without physical disks! METADATA: created by::::::::::: test_blueprint2 created at::::::::::: 1970-01-01T00:00:00.000Z - comment:::::::::::::: sled a1b477db-b629-48eb-911d-1ccdafca75b9: expunged 12 zones because: sled policy is expunged, sled d67ce8f0-a691-4010-b414-420d82e80527: added 20 datasets, updated: 0, removed 0 datasets, sled fefcf4cf-f7e7-46b3-b629-058526ce440e: added 21 datasets, updated: 0, removed 0 datasets + comment:::::::::::::: sled a1b477db-b629-48eb-911d-1ccdafca75b9: expunged 12 zones because: sled policy is expunged, sled fefcf4cf-f7e7-46b3-b629-058526ce440e: added 1 datasets, updated: 0, expunged 0, removed 0 datasets internal DNS version: 1 external DNS version: 1 diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt index 8aa86d16c3..a207788048 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt @@ -168,7 +168,7 @@ WARNING: Zones exist without physical disks! METADATA: created by::::::::::: test_blueprint2 created at::::::::::: 1970-01-01T00:00:00.000Z - comment:::::::::::::: sled 48d95fef-bc9f-4f50-9a53-1e075836291d: expunged 12 zones because: sled policy is expunged, sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: added 20 datasets, updated: 0, removed 0 datasets, sled 75bc286f-2b4b-482c-9431-59272af529da: added 23 datasets, updated: 0, removed 0 datasets, sled affab35f-600a-4109-8ea0-34a067a4e0bc: added 23 datasets, updated: 0, removed 0 datasets + comment:::::::::::::: sled 48d95fef-bc9f-4f50-9a53-1e075836291d: expunged 12 zones because: sled policy is expunged, sled 75bc286f-2b4b-482c-9431-59272af529da: added 3 datasets, updated: 0, expunged 0, removed 0 datasets, sled affab35f-600a-4109-8ea0-34a067a4e0bc: added 3 datasets, updated: 0, expunged 0, removed 0 datasets internal DNS version: 1 external DNS version: 1 From 3e14cc4602935b07515b7355cec51caf7e0ec8fc Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 9 Aug 2024 18:54:03 -0700 Subject: [PATCH 35/84] Fix schema --- schema/omicron-datasets.json | 135 +---------------------------------- 1 file changed, 2 insertions(+), 133 deletions(-) diff --git a/schema/omicron-datasets.json b/schema/omicron-datasets.json index b675432172..8b4bf59ae9 100644 --- a/schema/omicron-datasets.json +++ b/schema/omicron-datasets.json @@ -75,139 +75,8 @@ } }, "DatasetKind": { - "description": "Describes the purpose of the dataset.", - "oneOf": [ - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "cockroachdb" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "crucible" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "clickhouse" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "clickhouse_keeper" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "external_dns" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "internal_dns" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "zone_root" - ] - } - } - }, - { - "type": "object", - "required": [ - "name", - "type" - ], - "properties": { - "name": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "zone" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "debug" - ] - } - } - } - ] + "description": "The kind of dataset. See the `DatasetKind` enum in omicron-common for possible values.", + "type": "string" }, "DatasetName": { "type": "object", From f39bce3d83219425d844bd4f05ae4294051bab56 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 9 Aug 2024 19:08:49 -0700 Subject: [PATCH 36/84] more merging --- .../planning/src/blueprint_builder/builder.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 52392623a7..cfb420c13c 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -2563,12 +2563,9 @@ pub mod test { let sled_resources = input.sled_resources(&sled_id).unwrap(); let crucible_zone_id = builder .zones - .current_sled_zones(sled_id) + .current_sled_zones(sled_id, BlueprintZoneFilter::ShouldBeRunning) .find_map(|(zone_config, _)| { - if zone_config.disposition - == BlueprintZoneDisposition::InService - && zone_config.zone_type.is_crucible() - { + if zone_config.zone_type.is_crucible() { return Some(zone_config.id); } None From 1f671c1928a4fd078403c04361900ee4aacd0e63 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 9 Aug 2024 21:21:48 -0700 Subject: [PATCH 37/84] schema --- schema/rss-service-plan-v3.json | 135 +------------------------------- 1 file changed, 2 insertions(+), 133 deletions(-) diff --git a/schema/rss-service-plan-v3.json b/schema/rss-service-plan-v3.json index 85fc2581dc..04cf473ccb 100644 --- a/schema/rss-service-plan-v3.json +++ b/schema/rss-service-plan-v3.json @@ -70,139 +70,8 @@ } }, "DatasetKind": { - "description": "Describes the purpose of the dataset.", - "oneOf": [ - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "cockroachdb" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "crucible" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "clickhouse" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "clickhouse_keeper" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "external_dns" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "internal_dns" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "zone_root" - ] - } - } - }, - { - "type": "object", - "required": [ - "name", - "type" - ], - "properties": { - "name": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "zone" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "debug" - ] - } - } - } - ] + "description": "The kind of dataset. See the `DatasetKind` enum in omicron-common for possible values.", + "type": "string" }, "DatasetName": { "type": "object", From 2c5685f4a9a01f72ace39d71cd7e901af48d68d7 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 26 Aug 2024 08:46:30 -0700 Subject: [PATCH 38/84] Fix clickhouseserver datasetkind serialization --- common/src/api/internal/shared.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/common/src/api/internal/shared.rs b/common/src/api/internal/shared.rs index a81e21e05b..4826292863 100644 --- a/common/src/api/internal/shared.rs +++ b/common/src/api/internal/shared.rs @@ -979,6 +979,7 @@ impl FromStr for DatasetKind { "crucible" => Crucible, "clickhouse" => Clickhouse, "clickhouse_keeper" => ClickhouseKeeper, + "clickhouse_server" => ClickhouseServer, "external_dns" => ExternalDns, "internal_dns" => InternalDns, "zone" => ZoneRoot, @@ -1068,10 +1069,11 @@ mod tests { #[test] fn test_dataset_kind_serialization() { let kinds = [ - DatasetKind::Crucible, DatasetKind::Cockroach, + DatasetKind::Crucible, DatasetKind::Clickhouse, DatasetKind::ClickhouseKeeper, + DatasetKind::ClickhouseServer, DatasetKind::ExternalDns, DatasetKind::InternalDns, DatasetKind::ZoneRoot, From 077908d02873848f025b88173eb09b5c4e7050f0 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 26 Aug 2024 08:46:30 -0700 Subject: [PATCH 39/84] Fix clickhouseserver datasetkind serialization --- common/src/api/internal/shared.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/common/src/api/internal/shared.rs b/common/src/api/internal/shared.rs index a81e21e05b..4826292863 100644 --- a/common/src/api/internal/shared.rs +++ b/common/src/api/internal/shared.rs @@ -979,6 +979,7 @@ impl FromStr for DatasetKind { "crucible" => Crucible, "clickhouse" => Clickhouse, "clickhouse_keeper" => ClickhouseKeeper, + "clickhouse_server" => ClickhouseServer, "external_dns" => ExternalDns, "internal_dns" => InternalDns, "zone" => ZoneRoot, @@ -1068,10 +1069,11 @@ mod tests { #[test] fn test_dataset_kind_serialization() { let kinds = [ - DatasetKind::Crucible, DatasetKind::Cockroach, + DatasetKind::Crucible, DatasetKind::Clickhouse, DatasetKind::ClickhouseKeeper, + DatasetKind::ClickhouseServer, DatasetKind::ExternalDns, DatasetKind::InternalDns, DatasetKind::ZoneRoot, From a83450e7061537d6ac0495eb35f1b2749ce9e2a9 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 26 Aug 2024 16:42:50 -0700 Subject: [PATCH 40/84] Add docs, use ByteCount more, better simulated inventory --- Cargo.lock | 1 + common/src/api/external/mod.rs | 13 +++- common/src/disk.rs | 6 +- dev-tools/omdb/src/bin/omdb/sled_agent.rs | 6 +- illumos-utils/src/zfs.rs | 8 +-- sled-agent/src/backing_fs.rs | 15 ++-- sled-agent/src/sim/sled_agent.rs | 31 +++++++- sled-storage/Cargo.toml | 1 + sled-storage/src/dataset.rs | 87 +++++++++++++---------- 9 files changed, 112 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d254480ac4..cd9efeb4cb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9656,6 +9656,7 @@ dependencies = [ "omicron-test-utils", "omicron-uuid-kinds", "omicron-workspace-hack", + "once_cell", "rand", "schemars", "serde", diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 07e4fd0b83..fbd5f3450b 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -593,7 +593,18 @@ impl JsonSchema for RoleName { // // TODO: custom JsonSchema impl to describe i64::MAX limit; this is blocked by // https://github.com/oxidecomputer/typify/issues/589 -#[derive(Copy, Clone, Debug, Serialize, JsonSchema, PartialEq, Eq)] +#[derive( + Copy, + Clone, + Debug, + Serialize, + JsonSchema, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, +)] pub struct ByteCount(u64); impl<'de> Deserialize<'de> for ByteCount { diff --git a/common/src/disk.rs b/common/src/disk.rs index a2016a9442..b60957fc95 100644 --- a/common/src/disk.rs +++ b/common/src/disk.rs @@ -14,7 +14,7 @@ use std::fmt; use uuid::Uuid; use crate::{ - api::external::Generation, + api::external::{ByteCount, Generation}, ledger::Ledgerable, zpool_name::{ZpoolKind, ZpoolName}, }; @@ -159,10 +159,10 @@ pub struct DatasetConfig { pub compression: Option, /// The upper bound on the amount of storage used by this dataset - pub quota: Option, + pub quota: Option, /// The lower bound on the amount of storage usable by this dataset - pub reservation: Option, + pub reservation: Option, } #[derive( diff --git a/dev-tools/omdb/src/bin/omdb/sled_agent.rs b/dev-tools/omdb/src/bin/omdb/sled_agent.rs index b97fb35e8c..44adbf4763 100644 --- a/dev-tools/omdb/src/bin/omdb/sled_agent.rs +++ b/dev-tools/omdb/src/bin/omdb/sled_agent.rs @@ -61,7 +61,11 @@ enum ZpoolCommands { #[derive(Debug, Subcommand)] enum DatasetCommands { - /// Print list of all datasets managed by the sled agent + /// Print list of all datasets the sled agent is configured to manage + /// + /// Note that the set of actual datasets on the sled may be distinct, + /// use the `omdb db inventory collections show` command to see the latest + /// set of datasets collected from sleds. List, } diff --git a/illumos-utils/src/zfs.rs b/illumos-utils/src/zfs.rs index 5968569ebe..18e16f4ec7 100644 --- a/illumos-utils/src/zfs.rs +++ b/illumos-utils/src/zfs.rs @@ -206,8 +206,8 @@ pub struct EncryptionDetails { #[derive(Debug, Default)] pub struct SizeDetails { - pub quota: Option, - pub reservation: Option, + pub quota: Option, + pub reservation: Option, pub compression: Option, } @@ -507,8 +507,8 @@ impl Zfs { fn apply_properties( name: &str, mountpoint: &Mountpoint, - quota: Option, - reservation: Option, + quota: Option, + reservation: Option, compression: Option, ) -> Result<(), EnsureFilesystemError> { let quota = quota diff --git a/sled-agent/src/backing_fs.rs b/sled-agent/src/backing_fs.rs index 48002a8841..a207668a91 100644 --- a/sled-agent/src/backing_fs.rs +++ b/sled-agent/src/backing_fs.rs @@ -25,6 +25,8 @@ use camino::Utf8PathBuf; use illumos_utils::zfs::{ EnsureFilesystemError, GetValueError, Mountpoint, SizeDetails, Zfs, }; +use omicron_common::api::external::ByteCount; +use once_cell::sync::Lazy; use std::io; #[derive(Debug, thiserror::Error)] @@ -48,7 +50,7 @@ struct BackingFs<'a> { // Mountpoint mountpoint: &'static str, // Optional quota, in _bytes_ - quota: Option, + quota: Option, // Optional compression mode compression: Option<&'static str>, // Linked service @@ -74,7 +76,7 @@ impl<'a> BackingFs<'a> { self } - const fn quota(mut self, quota: usize) -> Self { + const fn quota(mut self, quota: ByteCount) -> Self { self.quota = Some(quota); self } @@ -99,18 +101,19 @@ const BACKING_FMD_DATASET: &'static str = "fmd"; const BACKING_FMD_MOUNTPOINT: &'static str = "/var/fm/fmd"; const BACKING_FMD_SUBDIRS: [&'static str; 3] = ["rsrc", "ckpt", "xprt"]; const BACKING_FMD_SERVICE: &'static str = "svc:/system/fmd:default"; -const BACKING_FMD_QUOTA: usize = 500 * (1 << 20); // 500 MiB +const BACKING_FMD_QUOTA: u64 = 500 * (1 << 20); // 500 MiB const BACKING_COMPRESSION: &'static str = "on"; const BACKINGFS_COUNT: usize = 1; -static BACKINGFS: [BackingFs; BACKINGFS_COUNT] = +static BACKINGFS: Lazy<[BackingFs; BACKINGFS_COUNT]> = Lazy::new(|| { [BackingFs::new(BACKING_FMD_DATASET) .mountpoint(BACKING_FMD_MOUNTPOINT) .subdirs(&BACKING_FMD_SUBDIRS) - .quota(BACKING_FMD_QUOTA) + .quota(ByteCount::try_from(BACKING_FMD_QUOTA).unwrap()) .compression(BACKING_COMPRESSION) - .service(BACKING_FMD_SERVICE)]; + .service(BACKING_FMD_SERVICE)] +}); /// Ensure that the backing filesystems are mounted. /// If the underlying dataset for a backing fs does not exist on the specified diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index bc1de2e6b5..786ec51bc1 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -18,7 +18,8 @@ use anyhow::Context; use dropshot::{HttpError, HttpServer}; use futures::lock::Mutex; use nexus_sled_agent_shared::inventory::{ - Inventory, InventoryDisk, InventoryZpool, OmicronZonesConfig, SledRole, + Inventory, InventoryDataset, InventoryDisk, InventoryZpool, + OmicronZonesConfig, SledRole, }; use omicron_common::api::external::{ ByteCount, DiskState, Error, Generation, ResourceType, @@ -902,8 +903,32 @@ impl SledAgent { }) }) .collect::, anyhow::Error>>()?, - // TODO: Make this more real? - datasets: vec![], + // NOTE: We report the "configured" datasets as the "real" datasets + // unconditionally here. No real datasets exist, so we're free + // to lie here, but this information should be taken with a + // particularly careful grain-of-salt -- it's supposed to + // represent the "real" datasets the sled agent can observe. + datasets: storage + .datasets_config_list() + .await + .map(|config| { + config + .datasets + .into_iter() + .map(|(id, config)| InventoryDataset { + id: Some(id), + name: config.name.full_name(), + available: ByteCount::from_kibibytes_u32(0), + used: ByteCount::from_kibibytes_u32(0), + quota: config.quota, + reservation: config.reservation, + compression: config + .compression + .unwrap_or_else(|| String::new()), + }) + .collect::>() + }) + .unwrap_or_else(|_| vec![]), }) } diff --git a/sled-storage/Cargo.toml b/sled-storage/Cargo.toml index 2439c52aa7..27555ce96d 100644 --- a/sled-storage/Cargo.toml +++ b/sled-storage/Cargo.toml @@ -20,6 +20,7 @@ illumos-utils.workspace = true key-manager.workspace = true omicron-common.workspace = true omicron-uuid-kinds.workspace = true +once_cell.workspace = true rand.workspace = true schemars = { workspace = true, features = [ "chrono", "uuid1" ] } serde.workspace = true diff --git a/sled-storage/src/dataset.rs b/sled-storage/src/dataset.rs index b95877418e..f487460a6c 100644 --- a/sled-storage/src/dataset.rs +++ b/sled-storage/src/dataset.rs @@ -14,8 +14,10 @@ use illumos_utils::zfs::{ }; use illumos_utils::zpool::ZpoolName; use key_manager::StorageKeyRequester; +use omicron_common::api::external::ByteCount; use omicron_common::api::internal::shared::DatasetKind; use omicron_common::disk::{DatasetName, DiskIdentity, DiskVariant}; +use once_cell::sync::Lazy; use rand::distributions::{Alphanumeric, DistString}; use slog::{debug, info, Logger}; use std::process::Stdio; @@ -32,16 +34,16 @@ pub const M2_BACKING_DATASET: &'static str = "backing"; cfg_if! { if #[cfg(any(test, feature = "testing"))] { // Tuned for zone_bundle tests - pub const DEBUG_DATASET_QUOTA: usize = 1 << 20; + pub const DEBUG_DATASET_QUOTA: u64 = 1 << 20; } else { // TODO-correctness: This value of 100GiB is a pretty wild guess, and should be // tuned as needed. - pub const DEBUG_DATASET_QUOTA: usize = 100 * (1 << 30); + pub const DEBUG_DATASET_QUOTA: u64 = 100 * (1 << 30); } } // TODO-correctness: This value of 100GiB is a pretty wild guess, and should be // tuned as needed. -pub const DUMP_DATASET_QUOTA: usize = 100 * (1 << 30); +pub const DUMP_DATASET_QUOTA: u64 = 100 * (1 << 30); // passed to zfs create -o compression= pub const DUMP_DATASET_COMPRESSION: &'static str = "gzip-9"; @@ -54,41 +56,50 @@ pub const U2_DEBUG_DATASET: &'static str = "crypt/debug"; pub const CRYPT_DATASET: &'static str = "crypt"; const U2_EXPECTED_DATASET_COUNT: usize = 2; -static U2_EXPECTED_DATASETS: [ExpectedDataset; U2_EXPECTED_DATASET_COUNT] = [ - // Stores filesystems for zones - ExpectedDataset::new(ZONE_DATASET).wipe(), - // For storing full kernel RAM dumps - ExpectedDataset::new(DUMP_DATASET) - .quota(DUMP_DATASET_QUOTA) - .compression(DUMP_DATASET_COMPRESSION), -]; +static U2_EXPECTED_DATASETS: Lazy< + [ExpectedDataset; U2_EXPECTED_DATASET_COUNT], +> = Lazy::new(|| { + [ + // Stores filesystems for zones + ExpectedDataset::new(ZONE_DATASET).wipe(), + // For storing full kernel RAM dumps + ExpectedDataset::new(DUMP_DATASET) + .quota(ByteCount::try_from(DUMP_DATASET_QUOTA).unwrap()) + .compression(DUMP_DATASET_COMPRESSION), + ] +}); const M2_EXPECTED_DATASET_COUNT: usize = 6; -static M2_EXPECTED_DATASETS: [ExpectedDataset; M2_EXPECTED_DATASET_COUNT] = [ - // Stores software images. - // - // Should be duplicated to both M.2s. - ExpectedDataset::new(INSTALL_DATASET), - // Stores crash dumps. - ExpectedDataset::new(CRASH_DATASET), - // Backing store for OS data that should be persisted across reboots. - // Its children are selectively overlay mounted onto parts of the ramdisk - // root. - ExpectedDataset::new(M2_BACKING_DATASET), - // Stores cluter configuration information. - // - // Should be duplicated to both M.2s. - ExpectedDataset::new(CLUSTER_DATASET), - // Stores configuration data, including: - // - What services should be launched on this sled - // - Information about how to initialize the Sled Agent - // - (For scrimlets) RSS setup information - // - // Should be duplicated to both M.2s. - ExpectedDataset::new(CONFIG_DATASET), - // Store debugging data, such as service bundles. - ExpectedDataset::new(M2_DEBUG_DATASET).quota(DEBUG_DATASET_QUOTA), -]; +static M2_EXPECTED_DATASETS: Lazy< + [ExpectedDataset; M2_EXPECTED_DATASET_COUNT], +> = Lazy::new(|| { + [ + // Stores software images. + // + // Should be duplicated to both M.2s. + ExpectedDataset::new(INSTALL_DATASET), + // Stores crash dumps. + ExpectedDataset::new(CRASH_DATASET), + // Backing store for OS data that should be persisted across reboots. + // Its children are selectively overlay mounted onto parts of the ramdisk + // root. + ExpectedDataset::new(M2_BACKING_DATASET), + // Stores cluter configuration information. + // + // Should be duplicated to both M.2s. + ExpectedDataset::new(CLUSTER_DATASET), + // Stores configuration data, including: + // - What services should be launched on this sled + // - Information about how to initialize the Sled Agent + // - (For scrimlets) RSS setup information + // + // Should be duplicated to both M.2s. + ExpectedDataset::new(CONFIG_DATASET), + // Store debugging data, such as service bundles. + ExpectedDataset::new(M2_DEBUG_DATASET) + .quota(ByteCount::try_from(DEBUG_DATASET_QUOTA).unwrap()), + ] +}); // Helper type for describing expected datasets and their optional quota. #[derive(Clone, Copy, Debug)] @@ -96,7 +107,7 @@ struct ExpectedDataset { // Name for the dataset name: &'static str, // Optional quota, in _bytes_ - quota: Option, + quota: Option, // Identifies if the dataset should be deleted on boot wipe: bool, // Optional compression mode @@ -108,7 +119,7 @@ impl ExpectedDataset { ExpectedDataset { name, quota: None, wipe: false, compression: None } } - const fn quota(mut self, quota: usize) -> Self { + fn quota(mut self, quota: ByteCount) -> Self { self.quota = Some(quota); self } From e8504a58f3d89437837f2756bb843d5dc3690f02 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 27 Aug 2024 09:49:52 -0700 Subject: [PATCH 41/84] schema and openapi --- openapi/sled-agent.json | 16 ++++++++++------ schema/omicron-datasets.json | 34 ++++++++++++++++++++++------------ 2 files changed, 32 insertions(+), 18 deletions(-) diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index fdc87296b9..d7c4315a63 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -2143,16 +2143,20 @@ "quota": { "nullable": true, "description": "The upper bound on the amount of storage used by this dataset", - "type": "integer", - "format": "uint", - "minimum": 0 + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] }, "reservation": { "nullable": true, "description": "The lower bound on the amount of storage usable by this dataset", - "type": "integer", - "format": "uint", - "minimum": 0 + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] } }, "required": [ diff --git a/schema/omicron-datasets.json b/schema/omicron-datasets.json index 8b4bf59ae9..b6cd7da508 100644 --- a/schema/omicron-datasets.json +++ b/schema/omicron-datasets.json @@ -23,6 +23,12 @@ } }, "definitions": { + "ByteCount": { + "description": "Byte count to express memory or storage capacity.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, "DatasetConfig": { "description": "Configuration information necessary to request a single dataset", "type": "object", @@ -56,21 +62,25 @@ }, "quota": { "description": "The upper bound on the amount of storage used by this dataset", - "type": [ - "integer", - "null" - ], - "format": "uint", - "minimum": 0.0 + "anyOf": [ + { + "$ref": "#/definitions/ByteCount" + }, + { + "type": "null" + } + ] }, "reservation": { "description": "The lower bound on the amount of storage usable by this dataset", - "type": [ - "integer", - "null" - ], - "format": "uint", - "minimum": 0.0 + "anyOf": [ + { + "$ref": "#/definitions/ByteCount" + }, + { + "type": "null" + } + ] } } }, From 28e6497a75dd8152e0b9a2bc24e848f7793baf77 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 27 Aug 2024 16:31:20 -0700 Subject: [PATCH 42/84] Fix helios tests --- sled-agent/src/zone_bundle.rs | 13 ++++++++----- sled-storage/src/manager.rs | 2 +- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/sled-agent/src/zone_bundle.rs b/sled-agent/src/zone_bundle.rs index 46cee1c415..4f5c7f4101 100644 --- a/sled-agent/src/zone_bundle.rs +++ b/sled-agent/src/zone_bundle.rs @@ -1698,6 +1698,8 @@ mod illumos_tests { use chrono::TimeZone; use chrono::Timelike; use chrono::Utc; + use omicron_common::api::external::ByteCount; + use once_cell::sync::Lazy; use rand::RngCore; use sled_storage::manager_test_harness::StorageManagerTestHarness; use slog::Drain; @@ -1884,7 +1886,9 @@ mod illumos_tests { // i.e., the "ashift" value. An empty dataset is unlikely to contain more // than one megabyte of overhead, so use that as a conservative test size to // avoid issues. - const TEST_QUOTA: usize = sled_storage::dataset::DEBUG_DATASET_QUOTA; + static TEST_QUOTA: Lazy = Lazy::new(|| { + sled_storage::dataset::DEBUG_DATASET_QUOTA.try_into().unwrap() + }); async fn run_test_with_zfs_dataset(test: T) where @@ -1932,18 +1936,17 @@ mod illumos_tests { // If this needs to change, go modify the "add_vdevs" call in // "setup_storage". assert!( - TEST_QUOTA + *TEST_QUOTA < StorageManagerTestHarness::DEFAULT_VDEV_SIZE .try_into() .unwrap(), - "Quota larger than underlying device (quota: {}, device size: {})", + "Quota larger than underlying device (quota: {:?}, device size: {})", TEST_QUOTA, StorageManagerTestHarness::DEFAULT_VDEV_SIZE, ); anyhow::ensure!( - bundle_utilization.dataset_quota - == u64::try_from(TEST_QUOTA).unwrap(), + bundle_utilization.dataset_quota == TEST_QUOTA.to_bytes(), "computed incorrect dataset quota" ); diff --git a/sled-storage/src/manager.rs b/sled-storage/src/manager.rs index 747ac77823..e6f74afc8e 100644 --- a/sled-storage/src/manager.rs +++ b/sled-storage/src/manager.rs @@ -1668,7 +1668,7 @@ mod tests { // However, calling it with a different input and the same generation // number should fail. config.generation = current_config_generation; - config.datasets.values_mut().next().unwrap().reservation = Some(1024); + config.datasets.values_mut().next().unwrap().reservation = Some(1024.into()); let err = harness.handle().datasets_ensure(config.clone()).await.unwrap_err(); assert!(matches!(err, Error::DatasetConfigurationChanged { .. })); From b9d3dcc43d493f4de29f6e60483d87e3e89d2124 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 27 Aug 2024 16:38:34 -0700 Subject: [PATCH 43/84] fmt --- sled-storage/src/manager.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sled-storage/src/manager.rs b/sled-storage/src/manager.rs index e6f74afc8e..02789bfe76 100644 --- a/sled-storage/src/manager.rs +++ b/sled-storage/src/manager.rs @@ -1668,7 +1668,8 @@ mod tests { // However, calling it with a different input and the same generation // number should fail. config.generation = current_config_generation; - config.datasets.values_mut().next().unwrap().reservation = Some(1024.into()); + config.datasets.values_mut().next().unwrap().reservation = + Some(1024.into()); let err = harness.handle().datasets_ensure(config.clone()).await.unwrap_err(); assert!(matches!(err, Error::DatasetConfigurationChanged { .. })); From c9f170eb365902fb5eeebac40ef9bfaefa2a5651 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 27 Aug 2024 17:51:48 -0700 Subject: [PATCH 44/84] Make CompressionAlgorithm strongly typed --- common/src/disk.rs | 89 +++++++++++++++++++++++++++++++++++- illumos-utils/src/zfs.rs | 7 +-- sled-agent/src/backing_fs.rs | 13 +++--- sled-storage/src/dataset.rs | 22 ++++++--- 4 files changed, 113 insertions(+), 18 deletions(-) diff --git a/common/src/disk.rs b/common/src/disk.rs index a2016a9442..ed0bf8666e 100644 --- a/common/src/disk.rs +++ b/common/src/disk.rs @@ -135,6 +135,91 @@ impl DatasetName { } } +#[derive( + Copy, + Clone, + Debug, + Deserialize, + Serialize, + JsonSchema, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, +)] +pub struct GzipLevel(u8); + +// Fastest compression level +const GZIP_LEVEL_MIN: u8 = 1; + +// Best compression ratio +const GZIP_LEVEL_MAX: u8 = 9; + +impl GzipLevel { + pub const fn new() -> Self { + assert!(N >= GZIP_LEVEL_MIN, "Compression level too small"); + assert!(N <= GZIP_LEVEL_MAX, "Compression level too large"); + Self(N) + } +} + +#[derive( + Copy, + Clone, + Debug, + Default, + Deserialize, + Serialize, + JsonSchema, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, +)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum CompressionAlgorithm { + // Selects a default compression algorithm. This is dependent on both the + // zpool and OS version. + On, + + // Disables compression. + #[default] + Off, + + // Selects the default Gzip compression level. + // + // According to the ZFS docs, this is "gzip-6", but that's a default value, + // which may change with OS updates. + Gzip, + + GzipN { + level: GzipLevel, + }, + Lz4, + Lzjb, + Zle, +} + +impl fmt::Display for CompressionAlgorithm { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use CompressionAlgorithm::*; + let s = match self { + On => "on", + Off => "off", + Gzip => "gzip", + GzipN { level } => { + return write!(f, "gzip-{}", level.0); + } + Lz4 => "lz4", + Lzjb => "lzjb", + Zle => "zle", + }; + write!(f, "{}", s) + } +} + /// Configuration information necessary to request a single dataset #[derive( Clone, @@ -155,8 +240,8 @@ pub struct DatasetConfig { /// The dataset's name pub name: DatasetName, - /// The compression mode to be supplied, if any - pub compression: Option, + /// The compression mode to be used by the dataset + pub compression: CompressionAlgorithm, /// The upper bound on the amount of storage used by this dataset pub quota: Option, diff --git a/illumos-utils/src/zfs.rs b/illumos-utils/src/zfs.rs index 5df1b73c07..5d512677f8 100644 --- a/illumos-utils/src/zfs.rs +++ b/illumos-utils/src/zfs.rs @@ -6,6 +6,7 @@ use crate::{execute, PFEXEC}; use camino::{Utf8Path, Utf8PathBuf}; +use omicron_common::disk::CompressionAlgorithm; use omicron_common::disk::DiskIdentity; use std::fmt; @@ -204,7 +205,7 @@ pub struct EncryptionDetails { pub struct SizeDetails { pub quota: Option, pub reservation: Option, - pub compression: Option, + pub compression: CompressionAlgorithm, } #[cfg_attr(any(test, feature = "testing"), mockall::automock, allow(dead_code))] @@ -403,7 +404,7 @@ impl Zfs { mountpoint: &Mountpoint, quota: Option, reservation: Option, - compression: Option, + compression: CompressionAlgorithm, ) -> Result<(), EnsureFilesystemError> { let quota = quota .map(|q| q.to_string()) @@ -411,7 +412,7 @@ impl Zfs { let reservation = reservation .map(|r| r.to_string()) .unwrap_or_else(|| String::from("none")); - let compression = compression.unwrap_or_else(|| String::from("off")); + let compression = compression.to_string(); if let Err(err) = Self::set_value(name, "quota", "a) { return Err(EnsureFilesystemError { diff --git a/sled-agent/src/backing_fs.rs b/sled-agent/src/backing_fs.rs index 48002a8841..a0f7826db3 100644 --- a/sled-agent/src/backing_fs.rs +++ b/sled-agent/src/backing_fs.rs @@ -25,6 +25,7 @@ use camino::Utf8PathBuf; use illumos_utils::zfs::{ EnsureFilesystemError, GetValueError, Mountpoint, SizeDetails, Zfs, }; +use omicron_common::disk::CompressionAlgorithm; use std::io; #[derive(Debug, thiserror::Error)] @@ -50,7 +51,7 @@ struct BackingFs<'a> { // Optional quota, in _bytes_ quota: Option, // Optional compression mode - compression: Option<&'static str>, + compression: CompressionAlgorithm, // Linked service service: Option<&'static str>, // Subdirectories to ensure @@ -63,7 +64,7 @@ impl<'a> BackingFs<'a> { name, mountpoint: "legacy", quota: None, - compression: None, + compression: CompressionAlgorithm::Off, service: None, subdirs: None, } @@ -79,8 +80,8 @@ impl<'a> BackingFs<'a> { self } - const fn compression(mut self, compression: &'static str) -> Self { - self.compression = Some(compression); + const fn compression(mut self, compression: CompressionAlgorithm) -> Self { + self.compression = compression; self } @@ -101,7 +102,7 @@ const BACKING_FMD_SUBDIRS: [&'static str; 3] = ["rsrc", "ckpt", "xprt"]; const BACKING_FMD_SERVICE: &'static str = "svc:/system/fmd:default"; const BACKING_FMD_QUOTA: usize = 500 * (1 << 20); // 500 MiB -const BACKING_COMPRESSION: &'static str = "on"; +const BACKING_COMPRESSION: CompressionAlgorithm = CompressionAlgorithm::On; const BACKINGFS_COUNT: usize = 1; static BACKINGFS: [BackingFs; BACKINGFS_COUNT] = @@ -138,7 +139,7 @@ pub(crate) fn ensure_backing_fs( let size_details = Some(SizeDetails { quota: bfs.quota, reservation: None, - compression: bfs.compression.map(|s| s.to_string()), + compression: bfs.compression, }); Zfs::ensure_filesystem( diff --git a/sled-storage/src/dataset.rs b/sled-storage/src/dataset.rs index b95877418e..e2b024db11 100644 --- a/sled-storage/src/dataset.rs +++ b/sled-storage/src/dataset.rs @@ -15,7 +15,9 @@ use illumos_utils::zfs::{ use illumos_utils::zpool::ZpoolName; use key_manager::StorageKeyRequester; use omicron_common::api::internal::shared::DatasetKind; -use omicron_common::disk::{DatasetName, DiskIdentity, DiskVariant}; +use omicron_common::disk::{ + CompressionAlgorithm, DatasetName, DiskIdentity, DiskVariant, GzipLevel, +}; use rand::distributions::{Alphanumeric, DistString}; use slog::{debug, info, Logger}; use std::process::Stdio; @@ -43,7 +45,8 @@ cfg_if! { // tuned as needed. pub const DUMP_DATASET_QUOTA: usize = 100 * (1 << 30); // passed to zfs create -o compression= -pub const DUMP_DATASET_COMPRESSION: &'static str = "gzip-9"; +pub const DUMP_DATASET_COMPRESSION: CompressionAlgorithm = + CompressionAlgorithm::GzipN { level: GzipLevel::new::<9>() }; // U.2 datasets live under the encrypted dataset and inherit encryption pub const ZONE_DATASET: &'static str = "crypt/zone"; @@ -100,12 +103,17 @@ struct ExpectedDataset { // Identifies if the dataset should be deleted on boot wipe: bool, // Optional compression mode - compression: Option<&'static str>, + compression: CompressionAlgorithm, } impl ExpectedDataset { const fn new(name: &'static str) -> Self { - ExpectedDataset { name, quota: None, wipe: false, compression: None } + ExpectedDataset { + name, + quota: None, + wipe: false, + compression: CompressionAlgorithm::Off, + } } const fn quota(mut self, quota: usize) -> Self { @@ -118,8 +126,8 @@ impl ExpectedDataset { self } - const fn compression(mut self, compression: &'static str) -> Self { - self.compression = Some(compression); + const fn compression(mut self, compression: CompressionAlgorithm) -> Self { + self.compression = compression; self } } @@ -291,7 +299,7 @@ pub(crate) async fn ensure_zpool_has_datasets( let size_details = Some(SizeDetails { quota: dataset.quota, reservation: None, - compression: dataset.compression.map(|s| s.to_string()), + compression: dataset.compression, }); Zfs::ensure_filesystem( name, From f5bc35ad1fb8aa0ce48f6713e1640aeea04e3705 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 27 Aug 2024 17:53:38 -0700 Subject: [PATCH 45/84] Fixing helios-only tests, clippy --- sled-storage/src/manager.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sled-storage/src/manager.rs b/sled-storage/src/manager.rs index 747ac77823..88e1bbaa34 100644 --- a/sled-storage/src/manager.rs +++ b/sled-storage/src/manager.rs @@ -1028,7 +1028,7 @@ impl StorageManager { let size_details = Some(illumos_utils::zfs::SizeDetails { quota: config.quota, reservation: config.reservation, - compression: config.compression.clone(), + compression: config.compression, }); Zfs::ensure_filesystem( fs_name, @@ -1122,6 +1122,7 @@ mod tests { use super::*; use camino_tempfile::tempdir_in; use omicron_common::api::external::Generation; + use omicron_common::disk::CompressionAlgorithm; use omicron_common::disk::DatasetKind; use omicron_common::disk::DiskManagementError; use omicron_common::ledger; @@ -1632,7 +1633,7 @@ mod tests { DatasetConfig { id, name, - compression: None, + compression: CompressionAlgorithm::Off, quota: None, reservation: None, }, From 474ec133d0cc105366632249e5bc93cad4fe517f Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 28 Aug 2024 10:11:22 -0700 Subject: [PATCH 46/84] schemas --- openapi/sled-agent.json | 121 ++++++++++++++++++++++++++++++++++- schema/omicron-datasets.json | 121 +++++++++++++++++++++++++++++++++-- 2 files changed, 235 insertions(+), 7 deletions(-) diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index c314504745..bb8e4e0b87 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -2061,6 +2061,112 @@ } ] }, + "CompressionAlgorithm": { + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "on" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "off" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "gzip" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "level": { + "$ref": "#/components/schemas/GzipLevel" + }, + "type": { + "type": "string", + "enum": [ + "gzip_n" + ] + } + }, + "required": [ + "level", + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "lz4" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "lzjb" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "zle" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "CrucibleOpts": { "description": "CrucibleOpts\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"id\", \"lossy\", \"read_only\", \"target\" ], \"properties\": { \"cert_pem\": { \"type\": [ \"string\", \"null\" ] }, \"control\": { \"type\": [ \"string\", \"null\" ] }, \"flush_timeout\": { \"type\": [ \"number\", \"null\" ], \"format\": \"float\" }, \"id\": { \"type\": \"string\", \"format\": \"uuid\" }, \"key\": { \"type\": [ \"string\", \"null\" ] }, \"key_pem\": { \"type\": [ \"string\", \"null\" ] }, \"lossy\": { \"type\": \"boolean\" }, \"read_only\": { \"type\": \"boolean\" }, \"root_cert_pem\": { \"type\": [ \"string\", \"null\" ] }, \"target\": { \"type\": \"array\", \"items\": { \"type\": \"string\" } } } } ```
", "type": "object", @@ -2119,9 +2225,12 @@ "type": "object", "properties": { "compression": { - "nullable": true, - "description": "The compression mode to be supplied, if any", - "type": "string" + "description": "The compression mode to be used by the dataset", + "allOf": [ + { + "$ref": "#/components/schemas/CompressionAlgorithm" + } + ] }, "id": { "description": "The UUID of the dataset being requested", @@ -2155,6 +2264,7 @@ } }, "required": [ + "compression", "id", "name" ] @@ -2874,6 +2984,11 @@ "format": "uint64", "minimum": 0 }, + "GzipLevel": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, "HostIdentifier": { "description": "A `HostIdentifier` represents either an IP host or network (v4 or v6), or an entire VPC (identified by its VNI). It is used in firewall rule host filters.", "oneOf": [ diff --git a/schema/omicron-datasets.json b/schema/omicron-datasets.json index 8b4bf59ae9..07fc2cfb13 100644 --- a/schema/omicron-datasets.json +++ b/schema/omicron-datasets.json @@ -23,19 +23,127 @@ } }, "definitions": { + "CompressionAlgorithm": { + "oneOf": [ + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "on" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "off" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "gzip" + ] + } + } + }, + { + "type": "object", + "required": [ + "level", + "type" + ], + "properties": { + "level": { + "$ref": "#/definitions/GzipLevel" + }, + "type": { + "type": "string", + "enum": [ + "gzip_n" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "lz4" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "lzjb" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "zle" + ] + } + } + } + ] + }, "DatasetConfig": { "description": "Configuration information necessary to request a single dataset", "type": "object", "required": [ + "compression", "id", "name" ], "properties": { "compression": { - "description": "The compression mode to be supplied, if any", - "type": [ - "string", - "null" + "description": "The compression mode to be used by the dataset", + "allOf": [ + { + "$ref": "#/definitions/CompressionAlgorithm" + } ] }, "id": { @@ -99,6 +207,11 @@ "format": "uint64", "minimum": 0.0 }, + "GzipLevel": { + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, "TypedUuidForDatasetKind": { "type": "string", "format": "uuid" From 38560d1e20972fa35190930c905769e27c9e4b87 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 28 Aug 2024 12:45:28 -0700 Subject: [PATCH 47/84] clippy --- live-tests/tests/test_nexus_add_remove.rs | 7 ++++++- nexus/reconfigurator/execution/src/datasets.rs | 2 +- sled-agent/src/rack_setup/service.rs | 7 ++----- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/live-tests/tests/test_nexus_add_remove.rs b/live-tests/tests/test_nexus_add_remove.rs index 70e55b704a..5121847d8c 100644 --- a/live-tests/tests/test_nexus_add_remove.rs +++ b/live-tests/tests/test_nexus_add_remove.rs @@ -63,7 +63,12 @@ async fn test_nexus_add_remove(lc: &LiveTestContext) { .context("adding Nexus zone")?; assert_matches!( count, - EnsureMultiple::Changed { added: 1, removed: 0 } + EnsureMultiple::Changed { + added: 1, + removed: 0, + updated: 0, + expunged: 0 + } ); Ok(()) }, diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index 827c5ba74b..1a671dd716 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -58,7 +58,7 @@ pub(crate) async fn deploy_datasets( let config: DatasetsConfig = match config.clone().try_into() { Ok(config) => config, - Err(err) => return Some(err.into()) + Err(err) => return Some(err) }; let result = diff --git a/sled-agent/src/rack_setup/service.rs b/sled-agent/src/rack_setup/service.rs index 9276157eea..3a7e36765f 100644 --- a/sled-agent/src/rack_setup/service.rs +++ b/sled-agent/src/rack_setup/service.rs @@ -96,7 +96,6 @@ use nexus_types::deployment::{ }; use nexus_types::external_api::views::SledState; use omicron_common::address::get_sled_address; -use omicron_common::api::external::ByteCount; use omicron_common::api::external::Generation; use omicron_common::api::internal::shared::ExternalPortDiscovery; use omicron_common::api::internal::shared::LldpAdminStatus; @@ -1440,10 +1439,8 @@ pub(crate) fn build_initial_blueprint_from_sled_configs( kind: d.name.dataset().clone(), address, compression: d.compression.to_string(), - quota: d.quota.map(|q| ByteCount::try_from(q).unwrap()), - reservation: d - .reservation - .map(|r| ByteCount::try_from(r).unwrap()), + quota: d.quota, + reservation: d.reservation, }, ); } From 77b3721600198ca815727f7d3ab2760ac3cd56d1 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 28 Aug 2024 14:08:25 -0700 Subject: [PATCH 48/84] Pass byte values as single integers (no 'GiB' suffixes) --- illumos-utils/src/zfs.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/illumos-utils/src/zfs.rs b/illumos-utils/src/zfs.rs index 64226c1fa1..71c22425fa 100644 --- a/illumos-utils/src/zfs.rs +++ b/illumos-utils/src/zfs.rs @@ -513,10 +513,10 @@ impl Zfs { compression: CompressionAlgorithm, ) -> Result<(), EnsureFilesystemError> { let quota = quota - .map(|q| q.to_string()) + .map(|q| q.to_bytes().to_string()) .unwrap_or_else(|| String::from("none")); let reservation = reservation - .map(|r| r.to_string()) + .map(|r| r.to_bytes().to_string()) .unwrap_or_else(|| String::from("none")); let compression = compression.to_string(); From d42e44aac4854ce76cca111b1222185443ba68ad Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 28 Aug 2024 15:35:04 -0700 Subject: [PATCH 49/84] comments --- nexus/reconfigurator/execution/src/datasets.rs | 2 +- .../planning/src/blueprint_builder/builder.rs | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index 1a671dd716..fa340667aa 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -123,7 +123,7 @@ pub(crate) struct EnsureDatasetsResult { /// database record exists in `datastore`. /// /// Updates all existing dataset records that don't match the blueprint. -/// Returns the number of datasets inserted. +/// Returns the number of datasets changed. pub(crate) async fn ensure_dataset_records_exist( opctx: &OpContext, datastore: &DataStore, diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index a7d7b5ff0e..026e7272b0 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -2628,7 +2628,9 @@ pub mod test { } }) .collect::>(); - // We saw two datasets being expunged earlier -- validate that + // We saw two datasets being expunged earlier when we called + // `sled_ensure_datasets` -- validate that this is true when inspecting + // the bluepirnt too. assert_eq!(expunged_datasets.len(), 2); // Remove these two datasets from the input. @@ -2665,6 +2667,9 @@ pub mod test { removed: 2 } ); + + // They should only be removed once -- repeated calls won't change the + // builder further. let r = builder.sled_ensure_datasets(sled_id, sled_resources).unwrap(); assert_eq!(r, EnsureMultiple::NotNeeded); From bfe6e7f935dc1831b1384b5347790031cbc38ecc Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 28 Aug 2024 15:46:42 -0700 Subject: [PATCH 50/84] more comments --- .../planning/src/blueprint_builder/builder.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 026e7272b0..b8ed931bf4 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -139,7 +139,9 @@ pub enum EnsureMultiple { updated: usize, /// An item was expunged in the blueprint expunged: usize, - /// An item was removed from the blueprint + /// An item was removed from the blueprint. + /// + /// This usually happens after the work of expungment has completed. removed: usize, }, @@ -1776,8 +1778,6 @@ struct BlueprintSledDatasetsBuilder<'a> { database_datasets: BTreeMap>, - // TODO: Could combine these maps? - // Datasets which are unchanged from the prior blueprint unchanged_datasets: BTreeMap>, From 4f42f4705b247935e582756c902e273f3ea717fc Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 28 Aug 2024 17:01:07 -0700 Subject: [PATCH 51/84] Make compression optional for back compat, fix tests --- nexus/db-model/src/dataset.rs | 22 +- nexus/db-model/src/schema.rs | 2 +- .../reconfigurator/execution/src/datasets.rs | 2 +- schema/crdb/blueprint-dataset/up01.sql | 2 +- schema/crdb/dbinit.sql | 2 +- schema/rss-service-plan-v4.json | 229 ++++++++++++++++++ 6 files changed, 249 insertions(+), 10 deletions(-) diff --git a/nexus/db-model/src/dataset.rs b/nexus/db-model/src/dataset.rs index 3bec08f836..a469d42ce7 100644 --- a/nexus/db-model/src/dataset.rs +++ b/nexus/db-model/src/dataset.rs @@ -51,7 +51,7 @@ pub struct Dataset { quota: Option, reservation: Option, - compression: String, + compression: Option, } impl Dataset { @@ -80,7 +80,7 @@ impl Dataset { zone_name, quota: None, reservation: None, - compression: String::new(), + compression: None, } } @@ -114,7 +114,11 @@ impl From for Dataset { zone_name, quota: bp.quota.map(ByteCount::from), reservation: bp.reservation.map(ByteCount::from), - compression: bp.compression, + compression: if bp.compression.is_empty() { + None + } else { + Some(bp.compression) + }, } } } @@ -123,6 +127,14 @@ impl TryFrom for omicron_common::disk::DatasetConfig { type Error = Error; fn try_from(dataset: Dataset) -> Result { + let compression = if let Some(c) = dataset.compression { + c.parse().map_err(|e: anyhow::Error| { + Error::internal_error(&e.to_string()) + })? + } else { + omicron_common::disk::CompressionAlgorithm::Off + }; + Ok(Self { id: DatasetUuid::from_untyped_uuid(dataset.identity.id), name: omicron_common::disk::DatasetName::new( @@ -133,9 +145,7 @@ impl TryFrom for omicron_common::disk::DatasetConfig { ), quota: dataset.quota.map(|q| q.into()), reservation: dataset.reservation.map(|r| r.into()), - compression: dataset.compression.parse().map_err( - |e: anyhow::Error| Error::internal_error(&e.to_string()), - )?, + compression, }) } } diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index d301379a09..1cdc9e9f8a 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1027,7 +1027,7 @@ table! { quota -> Nullable, reservation -> Nullable, - compression -> Text, + compression -> Nullable, } } diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index fa340667aa..fbbc301fc5 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -465,7 +465,7 @@ mod tests { first_dataset.quota = Some(ByteCount::from_kibibytes_u32(1)); first_dataset.reservation = Some(ByteCount::from_kibibytes_u32(2)); - first_dataset.compression = String::from("pied_piper"); + first_dataset.compression = String::from("lz4"); let _ = first_dataset; // Update the datastore diff --git a/schema/crdb/blueprint-dataset/up01.sql b/schema/crdb/blueprint-dataset/up01.sql index 94e0188c65..cfdde5bacd 100644 --- a/schema/crdb/blueprint-dataset/up01.sql +++ b/schema/crdb/blueprint-dataset/up01.sql @@ -1,4 +1,4 @@ ALTER TABLE omicron.public.dataset ADD COLUMN IF NOT EXISTS quota INT8, ADD COLUMN IF NOT EXISTS reservation INT8, - ADD COLUMN IF NOT EXISTS compression TEXT NOT NULL DEFAULT '' + ADD COLUMN IF NOT EXISTS compression TEXT diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 72706472c9..eb170be6b0 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -543,7 +543,7 @@ CREATE TABLE IF NOT EXISTS omicron.public.dataset ( quota INT8, reservation INT8, - compression TEXT NOT NULL DEFAULT '', + compression TEXT, /* Crucible must make use of 'size_used'; other datasets manage their own storage */ CONSTRAINT size_used_column_set_for_crucible CHECK ( diff --git a/schema/rss-service-plan-v4.json b/schema/rss-service-plan-v4.json index badfaf4589..18a5b84a05 100644 --- a/schema/rss-service-plan-v4.json +++ b/schema/rss-service-plan-v4.json @@ -458,6 +458,217 @@ } ] }, + "ByteCount": { + "description": "Byte count to express memory or storage capacity.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "CompressionAlgorithm": { + "oneOf": [ + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "on" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "off" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "gzip" + ] + } + } + }, + { + "type": "object", + "required": [ + "level", + "type" + ], + "properties": { + "level": { + "$ref": "#/definitions/GzipLevel" + }, + "type": { + "type": "string", + "enum": [ + "gzip_n" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "lz4" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "lzjb" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "zle" + ] + } + } + } + ] + }, + "DatasetConfig": { + "description": "Configuration information necessary to request a single dataset", + "type": "object", + "required": [ + "compression", + "id", + "name" + ], + "properties": { + "compression": { + "description": "The compression mode to be used by the dataset", + "allOf": [ + { + "$ref": "#/definitions/CompressionAlgorithm" + } + ] + }, + "id": { + "description": "The UUID of the dataset being requested", + "allOf": [ + { + "$ref": "#/definitions/TypedUuidForDatasetKind" + } + ] + }, + "name": { + "description": "The dataset's name", + "allOf": [ + { + "$ref": "#/definitions/DatasetName" + } + ] + }, + "quota": { + "description": "The upper bound on the amount of storage used by this dataset", + "anyOf": [ + { + "$ref": "#/definitions/ByteCount" + }, + { + "type": "null" + } + ] + }, + "reservation": { + "description": "The lower bound on the amount of storage usable by this dataset", + "anyOf": [ + { + "$ref": "#/definitions/ByteCount" + }, + { + "type": "null" + } + ] + } + } + }, + "DatasetKind": { + "description": "The kind of dataset. See the `DatasetKind` enum in omicron-common for possible values.", + "type": "string" + }, + "DatasetName": { + "type": "object", + "required": [ + "kind", + "pool_name" + ], + "properties": { + "kind": { + "$ref": "#/definitions/DatasetKind" + }, + "pool_name": { + "$ref": "#/definitions/ZpoolName" + } + } + }, + "DatasetsConfig": { + "type": "object", + "required": [ + "datasets", + "generation" + ], + "properties": { + "datasets": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/DatasetConfig" + } + }, + "generation": { + "description": "generation number of this configuration\n\nThis generation number is owned by the control plane (i.e., RSS or Nexus, depending on whether RSS-to-Nexus handoff has happened). It should not be bumped within Sled Agent.\n\nSled Agent rejects attempts to set the configuration to a generation older than the one it's currently running.\n\nNote that \"Generation::new()\", AKA, the first generation number, is reserved for \"no datasets\". This is the default configuration for a sled before any requests have been made.", + "allOf": [ + { + "$ref": "#/definitions/Generation" + } + ] + } + } + }, "DiskIdentity": { "description": "Uniquely identifies a disk.", "type": "object", @@ -593,6 +804,11 @@ "format": "uint64", "minimum": 0.0 }, + "GzipLevel": { + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, "IpNet": { "oneOf": [ { @@ -892,10 +1108,19 @@ "SledConfig": { "type": "object", "required": [ + "datasets", "disks", "zones" ], "properties": { + "datasets": { + "description": "Datasets configured for this sled", + "allOf": [ + { + "$ref": "#/definitions/DatasetsConfig" + } + ] + }, "disks": { "description": "Control plane disks configured for this sled", "allOf": [ @@ -971,6 +1196,10 @@ } } }, + "TypedUuidForDatasetKind": { + "type": "string", + "format": "uuid" + }, "TypedUuidForExternalIpKind": { "type": "string", "format": "uuid" From 9e4da68232196b7d7b58fa77835018883d13fcf9 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 4 Sep 2024 18:12:16 -0700 Subject: [PATCH 52/84] Started review feedback --- .../planning/src/blueprint_builder/builder.rs | 7 +- nexus/types/src/deployment/planning_input.rs | 4 +- schema/rss-service-plan-v3.json | 106 -------- schema/rss-service-plan-v4.json | 229 ------------------ sled-agent/src/rack_setup/plan/service.rs | 42 +++- sled-agent/src/rack_setup/service.rs | 2 +- 6 files changed, 47 insertions(+), 343 deletions(-) diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 461c251045..95f0cc8f3a 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -933,7 +933,12 @@ impl<'a> BlueprintBuilder<'a> { )?; } - Ok(EnsureMultiple::Changed { added: to_add, removed: 0, expunged: 0, updated: 0 }) + Ok(EnsureMultiple::Changed { + added: to_add, + removed: 0, + expunged: 0, + updated: 0, + }) } pub fn sled_ensure_zone_ntp( diff --git a/nexus/types/src/deployment/planning_input.rs b/nexus/types/src/deployment/planning_input.rs index 47cd4fb562..1e48fe843f 100644 --- a/nexus/types/src/deployment/planning_input.rs +++ b/nexus/types/src/deployment/planning_input.rs @@ -517,11 +517,11 @@ impl SledResources { pub fn all_datasets( &self, filter: ZpoolFilter, - ) -> impl Iterator)> + '_ { + ) -> impl Iterator + '_ { self.zpools.iter().filter_map(move |(zpool, (disk, datasets))| { filter .matches_policy_and_state(disk.policy, disk.state) - .then_some((zpool, datasets)) + .then_some((zpool, datasets.as_slice())) }) } } diff --git a/schema/rss-service-plan-v3.json b/schema/rss-service-plan-v3.json index 58f1877b80..a003cde6f0 100644 --- a/schema/rss-service-plan-v3.json +++ b/schema/rss-service-plan-v3.json @@ -18,99 +18,6 @@ } }, "definitions": { - "DatasetConfig": { - "description": "Configuration information necessary to request a single dataset", - "type": "object", - "required": [ - "id", - "name" - ], - "properties": { - "compression": { - "description": "The compression mode to be supplied, if any", - "type": [ - "string", - "null" - ] - }, - "id": { - "description": "The UUID of the dataset being requested", - "allOf": [ - { - "$ref": "#/definitions/TypedUuidForDatasetKind" - } - ] - }, - "name": { - "description": "The dataset's name", - "allOf": [ - { - "$ref": "#/definitions/DatasetName" - } - ] - }, - "quota": { - "description": "The upper bound on the amount of storage used by this dataset", - "type": [ - "integer", - "null" - ], - "format": "uint64", - "minimum": 0.0 - }, - "reservation": { - "description": "The lower bound on the amount of storage usable by this dataset", - "type": [ - "integer", - "null" - ], - "format": "uint64", - "minimum": 0.0 - } - } - }, - "DatasetKind": { - "description": "The kind of dataset. See the `DatasetKind` enum in omicron-common for possible values.", - "type": "string" - }, - "DatasetName": { - "type": "object", - "required": [ - "kind", - "pool_name" - ], - "properties": { - "kind": { - "$ref": "#/definitions/DatasetKind" - }, - "pool_name": { - "$ref": "#/definitions/ZpoolName" - } - } - }, - "DatasetsConfig": { - "type": "object", - "required": [ - "datasets", - "generation" - ], - "properties": { - "datasets": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/DatasetConfig" - } - }, - "generation": { - "description": "generation number of this configuration\n\nThis generation number is owned by the control plane (i.e., RSS or Nexus, depending on whether RSS-to-Nexus handoff has happened). It should not be bumped within Sled Agent.\n\nSled Agent rejects attempts to set the configuration to a generation older than the one it's currently running.\n\nNote that \"Generation::new()\", AKA, the first generation number, is reserved for \"no datasets\". This is the default configuration for a sled before any requests have been made.", - "allOf": [ - { - "$ref": "#/definitions/Generation" - } - ] - } - } - }, "DiskIdentity": { "description": "Uniquely identifies a disk.", "type": "object", @@ -903,19 +810,10 @@ "SledConfig": { "type": "object", "required": [ - "datasets", "disks", "zones" ], "properties": { - "datasets": { - "description": "Datasets configured for this sled", - "allOf": [ - { - "$ref": "#/definitions/DatasetsConfig" - } - ] - }, "disks": { "description": "Control plane disks configured for this sled", "allOf": [ @@ -991,10 +889,6 @@ } } }, - "TypedUuidForDatasetKind": { - "type": "string", - "format": "uuid" - }, "TypedUuidForZpoolKind": { "type": "string", "format": "uuid" diff --git a/schema/rss-service-plan-v4.json b/schema/rss-service-plan-v4.json index 18a5b84a05..badfaf4589 100644 --- a/schema/rss-service-plan-v4.json +++ b/schema/rss-service-plan-v4.json @@ -458,217 +458,6 @@ } ] }, - "ByteCount": { - "description": "Byte count to express memory or storage capacity.", - "type": "integer", - "format": "uint64", - "minimum": 0.0 - }, - "CompressionAlgorithm": { - "oneOf": [ - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "on" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "off" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "gzip" - ] - } - } - }, - { - "type": "object", - "required": [ - "level", - "type" - ], - "properties": { - "level": { - "$ref": "#/definitions/GzipLevel" - }, - "type": { - "type": "string", - "enum": [ - "gzip_n" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "lz4" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "lzjb" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "zle" - ] - } - } - } - ] - }, - "DatasetConfig": { - "description": "Configuration information necessary to request a single dataset", - "type": "object", - "required": [ - "compression", - "id", - "name" - ], - "properties": { - "compression": { - "description": "The compression mode to be used by the dataset", - "allOf": [ - { - "$ref": "#/definitions/CompressionAlgorithm" - } - ] - }, - "id": { - "description": "The UUID of the dataset being requested", - "allOf": [ - { - "$ref": "#/definitions/TypedUuidForDatasetKind" - } - ] - }, - "name": { - "description": "The dataset's name", - "allOf": [ - { - "$ref": "#/definitions/DatasetName" - } - ] - }, - "quota": { - "description": "The upper bound on the amount of storage used by this dataset", - "anyOf": [ - { - "$ref": "#/definitions/ByteCount" - }, - { - "type": "null" - } - ] - }, - "reservation": { - "description": "The lower bound on the amount of storage usable by this dataset", - "anyOf": [ - { - "$ref": "#/definitions/ByteCount" - }, - { - "type": "null" - } - ] - } - } - }, - "DatasetKind": { - "description": "The kind of dataset. See the `DatasetKind` enum in omicron-common for possible values.", - "type": "string" - }, - "DatasetName": { - "type": "object", - "required": [ - "kind", - "pool_name" - ], - "properties": { - "kind": { - "$ref": "#/definitions/DatasetKind" - }, - "pool_name": { - "$ref": "#/definitions/ZpoolName" - } - } - }, - "DatasetsConfig": { - "type": "object", - "required": [ - "datasets", - "generation" - ], - "properties": { - "datasets": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/DatasetConfig" - } - }, - "generation": { - "description": "generation number of this configuration\n\nThis generation number is owned by the control plane (i.e., RSS or Nexus, depending on whether RSS-to-Nexus handoff has happened). It should not be bumped within Sled Agent.\n\nSled Agent rejects attempts to set the configuration to a generation older than the one it's currently running.\n\nNote that \"Generation::new()\", AKA, the first generation number, is reserved for \"no datasets\". This is the default configuration for a sled before any requests have been made.", - "allOf": [ - { - "$ref": "#/definitions/Generation" - } - ] - } - } - }, "DiskIdentity": { "description": "Uniquely identifies a disk.", "type": "object", @@ -804,11 +593,6 @@ "format": "uint64", "minimum": 0.0 }, - "GzipLevel": { - "type": "integer", - "format": "uint8", - "minimum": 0.0 - }, "IpNet": { "oneOf": [ { @@ -1108,19 +892,10 @@ "SledConfig": { "type": "object", "required": [ - "datasets", "disks", "zones" ], "properties": { - "datasets": { - "description": "Datasets configured for this sled", - "allOf": [ - { - "$ref": "#/definitions/DatasetsConfig" - } - ] - }, "disks": { "description": "Control plane disks configured for this sled", "allOf": [ @@ -1196,10 +971,6 @@ } } }, - "TypedUuidForDatasetKind": { - "type": "string", - "format": "uuid" - }, "TypedUuidForExternalIpKind": { "type": "string", "format": "uuid" diff --git a/sled-agent/src/rack_setup/plan/service.rs b/sled-agent/src/rack_setup/plan/service.rs index 3fc6851a94..9863982d1c 100644 --- a/sled-agent/src/rack_setup/plan/service.rs +++ b/sled-agent/src/rack_setup/plan/service.rs @@ -124,6 +124,12 @@ pub enum PlanError { #[error("Found only v2 service plan")] FoundV2, + + #[error("Found only v3 service plan")] + FoundV3, + + #[error("Found only v4 service plan")] + FoundV4, } #[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema)] @@ -153,7 +159,8 @@ impl Ledgerable for Plan { const RSS_SERVICE_PLAN_V1_FILENAME: &str = "rss-service-plan.json"; const RSS_SERVICE_PLAN_V2_FILENAME: &str = "rss-service-plan-v2.json"; const RSS_SERVICE_PLAN_V3_FILENAME: &str = "rss-service-plan-v3.json"; -const RSS_SERVICE_PLAN_FILENAME: &str = "rss-service-plan-v4.json"; +const RSS_SERVICE_PLAN_V4_FILENAME: &str = "rss-service-plan-v4.json"; +const RSS_SERVICE_PLAN_FILENAME: &str = "rss-service-plan-v5.json"; pub fn from_sockaddr_to_external_floating_addr( addr: SocketAddr, @@ -265,7 +272,15 @@ impl Plan { err, } })? { - Err(PlanError::FoundV2) + Err(PlanError::FoundV3) + } else if Self::has_v4(storage_manager).await.map_err(|err| { + // Same as the comment above, but for version 4. + PlanError::Io { + message: String::from("looking for v4 RSS plan"), + err, + } + })? { + Err(PlanError::FoundV4) } else { Ok(None) } @@ -328,6 +343,25 @@ impl Plan { Ok(false) } + async fn has_v4( + storage_manager: &StorageHandle, + ) -> Result { + let paths = storage_manager + .get_latest_disks() + .await + .all_m2_mountpoints(CONFIG_DATASET) + .into_iter() + .map(|p| p.join(RSS_SERVICE_PLAN_V4_FILENAME)); + + for p in paths { + if p.try_exists()? { + return Ok(true); + } + } + + Ok(false) + } + async fn is_sled_scrimlet( log: &Logger, address: SocketAddrV6, @@ -1499,10 +1533,10 @@ mod tests { } #[test] - fn test_rss_service_plan_v4_schema() { + fn test_rss_service_plan_v5_schema() { let schema = schemars::schema_for!(Plan); expectorate::assert_contents( - "../schema/rss-service-plan-v4.json", + "../schema/rss-service-plan-v5.json", &serde_json::to_string_pretty(&schema).unwrap(), ); } diff --git a/sled-agent/src/rack_setup/service.rs b/sled-agent/src/rack_setup/service.rs index 48146369c2..9505eb2d97 100644 --- a/sled-agent/src/rack_setup/service.rs +++ b/sled-agent/src/rack_setup/service.rs @@ -17,7 +17,7 @@ //! state files that get generated as RSS executes: //! //! - /pool/int/UUID/config/rss-sled-plan.json (Sled Plan) -//! - /pool/int/UUID/config/rss-service-plan-v3.json (Service Plan) +//! - /pool/int/UUID/config/rss-service-plan-v5.json (Service Plan) //! - /pool/int/UUID/config/rss-plan-completed.marker (Plan Execution Complete) //! //! These phases are described below. As each phase completes, a corresponding From ec04e8b21a23bf0941c5a1028e56a387c9d1ab58 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 5 Sep 2024 10:34:05 -0700 Subject: [PATCH 53/84] Actually add the v5 schema --- schema/rss-service-plan-v5.json | 1228 +++++++++++++++++++++++++++++++ 1 file changed, 1228 insertions(+) create mode 100644 schema/rss-service-plan-v5.json diff --git a/schema/rss-service-plan-v5.json b/schema/rss-service-plan-v5.json new file mode 100644 index 0000000000..18a5b84a05 --- /dev/null +++ b/schema/rss-service-plan-v5.json @@ -0,0 +1,1228 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Plan", + "type": "object", + "required": [ + "dns_config", + "services" + ], + "properties": { + "dns_config": { + "$ref": "#/definitions/DnsConfigParams" + }, + "services": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/SledConfig" + } + } + }, + "definitions": { + "BlueprintZoneConfig": { + "description": "Describes one Omicron-managed zone in a blueprint.\n\nPart of [`BlueprintZonesConfig`].", + "type": "object", + "required": [ + "disposition", + "id", + "underlay_address", + "zone_type" + ], + "properties": { + "disposition": { + "description": "The disposition (desired state) of this zone recorded in the blueprint.", + "allOf": [ + { + "$ref": "#/definitions/BlueprintZoneDisposition" + } + ] + }, + "filesystem_pool": { + "anyOf": [ + { + "$ref": "#/definitions/ZpoolName" + }, + { + "type": "null" + } + ] + }, + "id": { + "$ref": "#/definitions/TypedUuidForOmicronZoneKind" + }, + "underlay_address": { + "type": "string", + "format": "ipv6" + }, + "zone_type": { + "$ref": "#/definitions/BlueprintZoneType" + } + } + }, + "BlueprintZoneDisposition": { + "description": "The desired state of an Omicron-managed zone in a blueprint.\n\nPart of [`BlueprintZoneConfig`].", + "oneOf": [ + { + "description": "The zone is in-service.", + "type": "string", + "enum": [ + "in_service" + ] + }, + { + "description": "The zone is not in service.", + "type": "string", + "enum": [ + "quiesced" + ] + }, + { + "description": "The zone is permanently gone.", + "type": "string", + "enum": [ + "expunged" + ] + } + ] + }, + "BlueprintZoneType": { + "oneOf": [ + { + "type": "object", + "required": [ + "address", + "dns_servers", + "external_ip", + "nic", + "ntp_servers", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dns_servers": { + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "domain": { + "type": [ + "string", + "null" + ] + }, + "external_ip": { + "$ref": "#/definitions/OmicronZoneExternalSnatIp" + }, + "nic": { + "description": "The service vNIC providing outbound connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/definitions/NetworkInterface" + } + ] + }, + "ntp_servers": { + "type": "array", + "items": { + "type": "string" + } + }, + "type": { + "type": "string", + "enum": [ + "boundary_ntp" + ] + } + } + }, + { + "description": "Used in single-node clickhouse setups", + "type": "object", + "required": [ + "address", + "dataset", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "clickhouse" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "dataset", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "clickhouse_keeper" + ] + } + } + }, + { + "description": "Used in replicated clickhouse setups", + "type": "object", + "required": [ + "address", + "dataset", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "clickhouse_server" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "dataset", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "cockroach_db" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "dataset", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "crucible" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "crucible_pantry" + ] + } + } + }, + { + "type": "object", + "required": [ + "dataset", + "dns_address", + "http_address", + "nic", + "type" + ], + "properties": { + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "dns_address": { + "description": "The address at which the external DNS server is reachable.", + "allOf": [ + { + "$ref": "#/definitions/OmicronZoneExternalFloatingAddr" + } + ] + }, + "http_address": { + "description": "The address at which the external DNS server API is reachable.", + "type": "string" + }, + "nic": { + "description": "The service vNIC providing external connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/definitions/NetworkInterface" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "external_dns" + ] + } + } + }, + { + "type": "object", + "required": [ + "dataset", + "dns_address", + "gz_address", + "gz_address_index", + "http_address", + "type" + ], + "properties": { + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "dns_address": { + "type": "string" + }, + "gz_address": { + "description": "The addresses in the global zone which should be created\n\nFor the DNS service, which exists outside the sleds's typical subnet - adding an address in the GZ is necessary to allow inter-zone traffic routing.", + "type": "string", + "format": "ipv6" + }, + "gz_address_index": { + "description": "The address is also identified with an auxiliary bit of information to ensure that the created global zone address can have a unique name.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "http_address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "internal_dns" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "dns_servers", + "ntp_servers", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dns_servers": { + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "domain": { + "type": [ + "string", + "null" + ] + }, + "ntp_servers": { + "type": "array", + "items": { + "type": "string" + } + }, + "type": { + "type": "string", + "enum": [ + "internal_ntp" + ] + } + } + }, + { + "type": "object", + "required": [ + "external_dns_servers", + "external_ip", + "external_tls", + "internal_address", + "nic", + "type" + ], + "properties": { + "external_dns_servers": { + "description": "External DNS servers Nexus can use to resolve external hosts.", + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "external_ip": { + "description": "The address at which the external nexus server is reachable.", + "allOf": [ + { + "$ref": "#/definitions/OmicronZoneExternalFloatingIp" + } + ] + }, + "external_tls": { + "description": "Whether Nexus's external endpoint should use TLS", + "type": "boolean" + }, + "internal_address": { + "description": "The address at which the internal nexus server is reachable.", + "type": "string" + }, + "nic": { + "description": "The service vNIC providing external connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/definitions/NetworkInterface" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "nexus" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "oximeter" + ] + } + } + } + ] + }, + "ByteCount": { + "description": "Byte count to express memory or storage capacity.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "CompressionAlgorithm": { + "oneOf": [ + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "on" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "off" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "gzip" + ] + } + } + }, + { + "type": "object", + "required": [ + "level", + "type" + ], + "properties": { + "level": { + "$ref": "#/definitions/GzipLevel" + }, + "type": { + "type": "string", + "enum": [ + "gzip_n" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "lz4" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "lzjb" + ] + } + } + }, + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "zle" + ] + } + } + } + ] + }, + "DatasetConfig": { + "description": "Configuration information necessary to request a single dataset", + "type": "object", + "required": [ + "compression", + "id", + "name" + ], + "properties": { + "compression": { + "description": "The compression mode to be used by the dataset", + "allOf": [ + { + "$ref": "#/definitions/CompressionAlgorithm" + } + ] + }, + "id": { + "description": "The UUID of the dataset being requested", + "allOf": [ + { + "$ref": "#/definitions/TypedUuidForDatasetKind" + } + ] + }, + "name": { + "description": "The dataset's name", + "allOf": [ + { + "$ref": "#/definitions/DatasetName" + } + ] + }, + "quota": { + "description": "The upper bound on the amount of storage used by this dataset", + "anyOf": [ + { + "$ref": "#/definitions/ByteCount" + }, + { + "type": "null" + } + ] + }, + "reservation": { + "description": "The lower bound on the amount of storage usable by this dataset", + "anyOf": [ + { + "$ref": "#/definitions/ByteCount" + }, + { + "type": "null" + } + ] + } + } + }, + "DatasetKind": { + "description": "The kind of dataset. See the `DatasetKind` enum in omicron-common for possible values.", + "type": "string" + }, + "DatasetName": { + "type": "object", + "required": [ + "kind", + "pool_name" + ], + "properties": { + "kind": { + "$ref": "#/definitions/DatasetKind" + }, + "pool_name": { + "$ref": "#/definitions/ZpoolName" + } + } + }, + "DatasetsConfig": { + "type": "object", + "required": [ + "datasets", + "generation" + ], + "properties": { + "datasets": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/DatasetConfig" + } + }, + "generation": { + "description": "generation number of this configuration\n\nThis generation number is owned by the control plane (i.e., RSS or Nexus, depending on whether RSS-to-Nexus handoff has happened). It should not be bumped within Sled Agent.\n\nSled Agent rejects attempts to set the configuration to a generation older than the one it's currently running.\n\nNote that \"Generation::new()\", AKA, the first generation number, is reserved for \"no datasets\". This is the default configuration for a sled before any requests have been made.", + "allOf": [ + { + "$ref": "#/definitions/Generation" + } + ] + } + } + }, + "DiskIdentity": { + "description": "Uniquely identifies a disk.", + "type": "object", + "required": [ + "model", + "serial", + "vendor" + ], + "properties": { + "model": { + "type": "string" + }, + "serial": { + "type": "string" + }, + "vendor": { + "type": "string" + } + } + }, + "DnsConfigParams": { + "description": "DnsConfigParams\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"generation\", \"time_created\", \"zones\" ], \"properties\": { \"generation\": { \"type\": \"integer\", \"format\": \"uint64\", \"minimum\": 0.0 }, \"time_created\": { \"type\": \"string\", \"format\": \"date-time\" }, \"zones\": { \"type\": \"array\", \"items\": { \"$ref\": \"#/components/schemas/DnsConfigZone\" } } } } ```
", + "type": "object", + "required": [ + "generation", + "time_created", + "zones" + ], + "properties": { + "generation": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "time_created": { + "type": "string", + "format": "date-time" + }, + "zones": { + "type": "array", + "items": { + "$ref": "#/definitions/DnsConfigZone" + } + } + } + }, + "DnsConfigZone": { + "description": "DnsConfigZone\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"records\", \"zone_name\" ], \"properties\": { \"records\": { \"type\": \"object\", \"additionalProperties\": { \"type\": \"array\", \"items\": { \"$ref\": \"#/components/schemas/DnsRecord\" } } }, \"zone_name\": { \"type\": \"string\" } } } ```
", + "type": "object", + "required": [ + "records", + "zone_name" + ], + "properties": { + "records": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/definitions/DnsRecord" + } + } + }, + "zone_name": { + "type": "string" + } + } + }, + "DnsRecord": { + "description": "DnsRecord\n\n
JSON schema\n\n```json { \"oneOf\": [ { \"type\": \"object\", \"required\": [ \"data\", \"type\" ], \"properties\": { \"data\": { \"type\": \"string\", \"format\": \"ipv4\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"A\" ] } } }, { \"type\": \"object\", \"required\": [ \"data\", \"type\" ], \"properties\": { \"data\": { \"type\": \"string\", \"format\": \"ipv6\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"AAAA\" ] } } }, { \"type\": \"object\", \"required\": [ \"data\", \"type\" ], \"properties\": { \"data\": { \"$ref\": \"#/components/schemas/Srv\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"SRV\" ] } } } ] } ```
", + "oneOf": [ + { + "type": "object", + "required": [ + "data", + "type" + ], + "properties": { + "data": { + "type": "string", + "format": "ipv4" + }, + "type": { + "type": "string", + "enum": [ + "A" + ] + } + } + }, + { + "type": "object", + "required": [ + "data", + "type" + ], + "properties": { + "data": { + "type": "string", + "format": "ipv6" + }, + "type": { + "type": "string", + "enum": [ + "AAAA" + ] + } + } + }, + { + "type": "object", + "required": [ + "data", + "type" + ], + "properties": { + "data": { + "$ref": "#/definitions/Srv" + }, + "type": { + "type": "string", + "enum": [ + "SRV" + ] + } + } + } + ] + }, + "Generation": { + "description": "Generation numbers stored in the database, used for optimistic concurrency control", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "GzipLevel": { + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "IpNet": { + "oneOf": [ + { + "title": "v4", + "allOf": [ + { + "$ref": "#/definitions/Ipv4Net" + } + ] + }, + { + "title": "v6", + "allOf": [ + { + "$ref": "#/definitions/Ipv6Net" + } + ] + } + ], + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::IpNet", + "version": "0.1.0" + } + }, + "Ipv4Net": { + "title": "An IPv4 subnet", + "description": "An IPv4 subnet, including prefix and prefix length", + "examples": [ + "192.168.1.0/24" + ], + "type": "string", + "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv4Net", + "version": "0.1.0" + } + }, + "Ipv6Net": { + "title": "An IPv6 subnet", + "description": "An IPv6 subnet, including prefix and subnet mask", + "examples": [ + "fd12:3456::/64" + ], + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv6Net", + "version": "0.1.0" + } + }, + "MacAddr": { + "title": "A MAC address", + "description": "A Media Access Control address, in EUI-48 format", + "examples": [ + "ff:ff:ff:ff:ff:ff" + ], + "type": "string", + "maxLength": 17, + "minLength": 5, + "pattern": "^([0-9a-fA-F]{0,2}:){5}[0-9a-fA-F]{0,2}$" + }, + "Name": { + "title": "A name unique within the parent collection", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.", + "type": "string", + "maxLength": 63, + "minLength": 1, + "pattern": "^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$" + }, + "NetworkInterface": { + "description": "Information required to construct a virtual network interface", + "type": "object", + "required": [ + "id", + "ip", + "kind", + "mac", + "name", + "primary", + "slot", + "subnet", + "vni" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "ip": { + "type": "string", + "format": "ip" + }, + "kind": { + "$ref": "#/definitions/NetworkInterfaceKind" + }, + "mac": { + "$ref": "#/definitions/MacAddr" + }, + "name": { + "$ref": "#/definitions/Name" + }, + "primary": { + "type": "boolean" + }, + "slot": { + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "subnet": { + "$ref": "#/definitions/IpNet" + }, + "transit_ips": { + "default": [], + "type": "array", + "items": { + "$ref": "#/definitions/IpNet" + } + }, + "vni": { + "$ref": "#/definitions/Vni" + } + } + }, + "NetworkInterfaceKind": { + "description": "The type of network interface", + "oneOf": [ + { + "description": "A vNIC attached to a guest instance", + "type": "object", + "required": [ + "id", + "type" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "instance" + ] + } + } + }, + { + "description": "A vNIC associated with an internal service", + "type": "object", + "required": [ + "id", + "type" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "service" + ] + } + } + }, + { + "description": "A vNIC associated with a probe", + "type": "object", + "required": [ + "id", + "type" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "probe" + ] + } + } + } + ] + }, + "OmicronPhysicalDiskConfig": { + "type": "object", + "required": [ + "id", + "identity", + "pool_id" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "identity": { + "$ref": "#/definitions/DiskIdentity" + }, + "pool_id": { + "$ref": "#/definitions/TypedUuidForZpoolKind" + } + } + }, + "OmicronPhysicalDisksConfig": { + "type": "object", + "required": [ + "disks", + "generation" + ], + "properties": { + "disks": { + "type": "array", + "items": { + "$ref": "#/definitions/OmicronPhysicalDiskConfig" + } + }, + "generation": { + "description": "generation number of this configuration\n\nThis generation number is owned by the control plane (i.e., RSS or Nexus, depending on whether RSS-to-Nexus handoff has happened). It should not be bumped within Sled Agent.\n\nSled Agent rejects attempts to set the configuration to a generation older than the one it's currently running.", + "allOf": [ + { + "$ref": "#/definitions/Generation" + } + ] + } + } + }, + "OmicronZoneDataset": { + "description": "Describes a persistent ZFS dataset associated with an Omicron zone", + "type": "object", + "required": [ + "pool_name" + ], + "properties": { + "pool_name": { + "$ref": "#/definitions/ZpoolName" + } + } + }, + "OmicronZoneExternalFloatingAddr": { + "description": "Floating external address with port allocated to an Omicron-managed zone.", + "type": "object", + "required": [ + "addr", + "id" + ], + "properties": { + "addr": { + "type": "string" + }, + "id": { + "$ref": "#/definitions/TypedUuidForExternalIpKind" + } + } + }, + "OmicronZoneExternalFloatingIp": { + "description": "Floating external IP allocated to an Omicron-managed zone.\n\nThis is a slimmer `nexus_db_model::ExternalIp` that only stores the fields necessary for blueprint planning, and requires that the zone have a single IP.", + "type": "object", + "required": [ + "id", + "ip" + ], + "properties": { + "id": { + "$ref": "#/definitions/TypedUuidForExternalIpKind" + }, + "ip": { + "type": "string", + "format": "ip" + } + } + }, + "OmicronZoneExternalSnatIp": { + "description": "SNAT (outbound) external IP allocated to an Omicron-managed zone.\n\nThis is a slimmer `nexus_db_model::ExternalIp` that only stores the fields necessary for blueprint planning, and requires that the zone have a single IP.", + "type": "object", + "required": [ + "id", + "snat_cfg" + ], + "properties": { + "id": { + "$ref": "#/definitions/TypedUuidForExternalIpKind" + }, + "snat_cfg": { + "$ref": "#/definitions/SourceNatConfig" + } + } + }, + "SledConfig": { + "type": "object", + "required": [ + "datasets", + "disks", + "zones" + ], + "properties": { + "datasets": { + "description": "Datasets configured for this sled", + "allOf": [ + { + "$ref": "#/definitions/DatasetsConfig" + } + ] + }, + "disks": { + "description": "Control plane disks configured for this sled", + "allOf": [ + { + "$ref": "#/definitions/OmicronPhysicalDisksConfig" + } + ] + }, + "zones": { + "description": "zones configured for this sled", + "type": "array", + "items": { + "$ref": "#/definitions/BlueprintZoneConfig" + } + } + } + }, + "SourceNatConfig": { + "description": "An IP address and port range used for source NAT, i.e., making outbound network connections from guests or services.", + "type": "object", + "required": [ + "first_port", + "ip", + "last_port" + ], + "properties": { + "first_port": { + "description": "The first port used for source NAT, inclusive.", + "type": "integer", + "format": "uint16", + "minimum": 0.0 + }, + "ip": { + "description": "The external address provided to the instance or service.", + "type": "string", + "format": "ip" + }, + "last_port": { + "description": "The last port used for source NAT, also inclusive.", + "type": "integer", + "format": "uint16", + "minimum": 0.0 + } + } + }, + "Srv": { + "description": "Srv\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"port\", \"prio\", \"target\", \"weight\" ], \"properties\": { \"port\": { \"type\": \"integer\", \"format\": \"uint16\", \"minimum\": 0.0 }, \"prio\": { \"type\": \"integer\", \"format\": \"uint16\", \"minimum\": 0.0 }, \"target\": { \"type\": \"string\" }, \"weight\": { \"type\": \"integer\", \"format\": \"uint16\", \"minimum\": 0.0 } } } ```
", + "type": "object", + "required": [ + "port", + "prio", + "target", + "weight" + ], + "properties": { + "port": { + "type": "integer", + "format": "uint16", + "minimum": 0.0 + }, + "prio": { + "type": "integer", + "format": "uint16", + "minimum": 0.0 + }, + "target": { + "type": "string" + }, + "weight": { + "type": "integer", + "format": "uint16", + "minimum": 0.0 + } + } + }, + "TypedUuidForDatasetKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForExternalIpKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForOmicronZoneKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForZpoolKind": { + "type": "string", + "format": "uuid" + }, + "Vni": { + "description": "A Geneve Virtual Network Identifier", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "ZpoolName": { + "title": "The name of a Zpool", + "description": "Zpool names are of the format ox{i,p}_. They are either Internal or External, and should be unique", + "type": "string", + "pattern": "^ox[ip]_[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + } + } +} \ No newline at end of file From a379e1bc6cc8e9d934d4feff0c8b320f49499ce0 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 5 Sep 2024 10:44:42 -0700 Subject: [PATCH 54/84] Add filter --- nexus/reconfigurator/execution/src/lib.rs | 3 +- nexus/types/src/deployment.rs | 35 +++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/nexus/reconfigurator/execution/src/lib.rs b/nexus/reconfigurator/execution/src/lib.rs index 49f2c19a98..6fb7e3ead3 100644 --- a/nexus/reconfigurator/execution/src/lib.rs +++ b/nexus/reconfigurator/execution/src/lib.rs @@ -11,6 +11,7 @@ use internal_dns::resolver::Resolver; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; use nexus_types::deployment::Blueprint; +use nexus_types::deployment::BlueprintDatasetFilter; use nexus_types::deployment::BlueprintZoneFilter; use nexus_types::deployment::SledFilter; use nexus_types::external_api::views::SledState; @@ -186,7 +187,7 @@ pub async fn realize_blueprint_with_overrides( datasets::ensure_dataset_records_exist( &opctx, datastore, - blueprint.all_omicron_datasets(), + blueprint.all_omicron_datasets(BlueprintDatasetFilter::All), ) .await .map_err(|err| vec![err])?; diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 18deda5ca3..86ea7f3d46 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -217,10 +217,12 @@ impl Blueprint { /// Iterate over the [`BlueprintDatasetsConfig`] instances in the blueprint. pub fn all_omicron_datasets( &self, + filter: BlueprintDatasetFilter, ) -> impl Iterator { self.blueprint_datasets .iter() .flat_map(move |(_, datasets)| datasets.datasets.values()) + .filter(move |d| d.disposition.matches(filter)) } /// Iterate over the [`BlueprintZoneConfig`] instances in the blueprint @@ -765,6 +767,22 @@ pub enum BlueprintZoneFilter { ShouldDeployVpcFirewallRules, } +/// Filters that apply to blueprint datasets. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum BlueprintDatasetFilter { + // --- + // Prefer to keep this list in alphabetical order. + // --- + /// All datasets + All, + + /// Datasets that have been expunged. + Expunged, + + /// Datasets that are in-service. + InService, +} + /// Information about an Omicron physical disk as recorded in a blueprint. /// /// Part of [`Blueprint`]. @@ -822,6 +840,23 @@ pub enum BlueprintDatasetDisposition { Expunged, } +impl BlueprintDatasetDisposition { + pub fn matches(self, filter: BlueprintDatasetFilter) -> bool { + match self { + Self::InService => match filter { + BlueprintDatasetFilter::All => true, + BlueprintDatasetFilter::Expunged => false, + BlueprintDatasetFilter::InService => true, + }, + Self::Expunged => match filter { + BlueprintDatasetFilter::All => true, + BlueprintDatasetFilter::Expunged => true, + BlueprintDatasetFilter::InService => false, + }, + } + } +} + /// Information about a dataset as recorded in a blueprint #[derive(Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize)] pub struct BlueprintDatasetConfig { From 4568f0c1779567724841706fea186510a4caf877 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 5 Sep 2024 10:52:19 -0700 Subject: [PATCH 55/84] Strongly typed CompressionAlgorithm --- nexus/db-model/src/dataset.rs | 6 +----- nexus/db-model/src/deployment.rs | 4 ++-- .../reconfigurator/execution/src/datasets.rs | 20 +++++++++---------- .../planning/src/blueprint_builder/builder.rs | 2 +- nexus/types/src/deployment.rs | 15 +++++++------- sled-agent/src/rack_setup/service.rs | 2 +- 6 files changed, 21 insertions(+), 28 deletions(-) diff --git a/nexus/db-model/src/dataset.rs b/nexus/db-model/src/dataset.rs index a469d42ce7..a04831d51d 100644 --- a/nexus/db-model/src/dataset.rs +++ b/nexus/db-model/src/dataset.rs @@ -114,11 +114,7 @@ impl From for Dataset { zone_name, quota: bp.quota.map(ByteCount::from), reservation: bp.reservation.map(ByteCount::from), - compression: if bp.compression.is_empty() { - None - } else { - Some(bp.compression) - }, + compression: Some(bp.compression.to_string()), } } } diff --git a/nexus/db-model/src/deployment.rs b/nexus/db-model/src/deployment.rs index 5a7db0af93..122dc337ea 100644 --- a/nexus/db-model/src/deployment.rs +++ b/nexus/db-model/src/deployment.rs @@ -322,7 +322,7 @@ impl BpOmicronDataset { port: dataset_config.address.map(|addr| addr.port().into()), quota: dataset_config.quota.map(|q| q.into()), reservation: dataset_config.reservation.map(|r| r.into()), - compression: dataset_config.compression.clone(), + compression: dataset_config.compression.to_string(), } } } @@ -354,7 +354,7 @@ impl TryFrom for BlueprintDatasetConfig { address, quota: dataset.quota.map(|b| b.into()), reservation: dataset.reservation.map(|b| b.into()), - compression: dataset.compression, + compression: dataset.compression.parse()?, }) } } diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index fbbc301fc5..ba7ba20486 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -251,6 +251,7 @@ mod tests { use nexus_types::deployment::BlueprintZoneFilter; use omicron_common::api::external::ByteCount; use omicron_common::api::internal::shared::DatasetKind; + use omicron_common::disk::CompressionAlgorithm; use omicron_common::zpool_name::ZpoolName; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::ZpoolUuid; @@ -274,7 +275,7 @@ mod tests { address: Some(dataset.address), quota: None, reservation: None, - compression: String::new(), + compression: CompressionAlgorithm::Off, }) } else { None @@ -389,7 +390,7 @@ mod tests { address: None, quota: None, reservation: None, - compression: String::new(), + compression: CompressionAlgorithm::Off, }, BlueprintDatasetConfig { disposition: BlueprintDatasetDisposition::InService, @@ -399,7 +400,7 @@ mod tests { address: None, quota: None, reservation: None, - compression: String::new(), + compression: CompressionAlgorithm::Off, }, ]; @@ -461,11 +462,11 @@ mod tests { let first_dataset = &mut all_datasets[0]; assert_eq!(first_dataset.quota, None); assert_eq!(first_dataset.reservation, None); - assert_eq!(first_dataset.compression, ""); + assert_eq!(first_dataset.compression, CompressionAlgorithm::Off); first_dataset.quota = Some(ByteCount::from_kibibytes_u32(1)); first_dataset.reservation = Some(ByteCount::from_kibibytes_u32(2)); - first_dataset.compression = String::from("lz4"); + first_dataset.compression = CompressionAlgorithm::Lz4; let _ = first_dataset; // Update the datastore @@ -491,10 +492,7 @@ mod tests { observed_dataset.try_into().unwrap(); assert_eq!(observed_dataset.quota, first_dataset.quota,); assert_eq!(observed_dataset.reservation, first_dataset.reservation,); - assert_eq!( - observed_dataset.compression.to_string(), - first_dataset.compression, - ); + assert_eq!(observed_dataset.compression, first_dataset.compression,); } #[nexus_test] @@ -531,7 +529,7 @@ mod tests { address: None, quota: None, reservation: None, - compression: String::new(), + compression: CompressionAlgorithm::Off, }); let EnsureDatasetsResult { inserted, updated, removed } = ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) @@ -633,7 +631,7 @@ mod tests { address: None, quota: None, reservation: None, - compression: String::new(), + compression: CompressionAlgorithm::Off, }); let EnsureDatasetsResult { inserted, updated, removed } = diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 95f0cc8f3a..f52f96cce4 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -1946,7 +1946,7 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { address, quota, reservation, - compression: compression.to_string(), + compression, }; // This dataset already exists in the blueprint diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 86ea7f3d46..d587594d5e 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -26,6 +26,7 @@ use nexus_sled_agent_shared::inventory::ZoneKind; use omicron_common::api::external::ByteCount; use omicron_common::api::external::Generation; use omicron_common::api::internal::shared::DatasetKind; +use omicron_common::disk::CompressionAlgorithm; use omicron_common::disk::DatasetConfig; use omicron_common::disk::DatasetName; use omicron_common::disk::DatasetsConfig; @@ -868,20 +869,18 @@ pub struct BlueprintDatasetConfig { pub address: Option, pub quota: Option, pub reservation: Option, - pub compression: String, + pub compression: CompressionAlgorithm, } -impl TryFrom for DatasetConfig { - type Error = anyhow::Error; - - fn try_from(config: BlueprintDatasetConfig) -> Result { - Ok(Self { +impl From for DatasetConfig { + fn from(config: BlueprintDatasetConfig) -> Self { + Self { id: config.id, name: DatasetName::new(config.pool, config.kind), quota: config.quota, reservation: config.reservation, - compression: config.compression.parse()?, - }) + compression: config.compression, + } } } diff --git a/sled-agent/src/rack_setup/service.rs b/sled-agent/src/rack_setup/service.rs index 9505eb2d97..28ba882197 100644 --- a/sled-agent/src/rack_setup/service.rs +++ b/sled-agent/src/rack_setup/service.rs @@ -1475,7 +1475,7 @@ pub(crate) fn build_initial_blueprint_from_sled_configs( pool: d.name.pool().clone(), kind: d.name.dataset().clone(), address, - compression: d.compression.to_string(), + compression: d.compression, quota: d.quota, reservation: d.reservation, }, From 45394b80e6d2e71301d492fa69cf9079bd4eef43 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 5 Sep 2024 10:58:24 -0700 Subject: [PATCH 56/84] Better compare --- nexus/reconfigurator/preparation/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/reconfigurator/preparation/src/lib.rs b/nexus/reconfigurator/preparation/src/lib.rs index 19ef13ed91..3f41443f1f 100644 --- a/nexus/reconfigurator/preparation/src/lib.rs +++ b/nexus/reconfigurator/preparation/src/lib.rs @@ -183,7 +183,7 @@ impl PlanningInputFromDb<'_> { ) }) .collect(); - datasets.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); + datasets.sort_by(|(a, _), (b, _)| a.cmp(&b)); let mut datasets_by_zpool: BTreeMap<_, Vec<_>> = BTreeMap::new(); for (zpool_id, dataset) in datasets { datasets_by_zpool From d30cfcb1df9d06e316e1d72b42b5ecfe17d394b2 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 5 Sep 2024 11:10:46 -0700 Subject: [PATCH 57/84] fmt --- nexus/reconfigurator/planning/src/blueprint_builder/builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index f52f96cce4..13d805742a 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -231,7 +231,7 @@ impl fmt::Display for Operation { } fn zone_name(zone: &BlueprintZoneConfig) -> String { - format!("oxz_{}_{}", zone.zone_type.kind().zone_prefix(), zone.id,) + format!("oxz_{}_{}", zone.zone_type.kind().zone_prefix(), zone.id) } /// Helper for assembling a blueprint From 7b941a3aa429ea654db524294166ad9c5db3ce43 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 5 Sep 2024 11:33:08 -0700 Subject: [PATCH 58/84] misc builder updates --- .../planning/src/blueprint_builder/builder.rs | 53 +++++++++---------- 1 file changed, 26 insertions(+), 27 deletions(-) diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 13d805742a..f63a616c20 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -723,7 +723,9 @@ impl<'a> BlueprintBuilder<'a> { sled_id: SledUuid, resources: &SledResources, ) -> Result { - let (mut additions, mut updates, expunges, removals) = { + const DEBUG_QUOTA_SIZE_GB: u32 = 100; + + let (mut additions, mut updates, mut expunges, removals) = { let mut datasets_builder = BlueprintSledDatasetsBuilder::new( self.log.clone(), sled_id, @@ -743,7 +745,7 @@ impl<'a> BlueprintBuilder<'a> { datasets_builder.ensure( DatasetName::new(zpool.clone(), DatasetKind::Debug), address, - Some(ByteCount::from_gibibytes_u32(100)), + Some(ByteCount::from_gibibytes_u32(DEBUG_QUOTA_SIZE_GB)), None, CompressionAlgorithm::Off, ); @@ -849,9 +851,15 @@ impl<'a> BlueprintBuilder<'a> { // Mark unused datasets as expunged. // // This indicates that the dataset should be removed from the database. - if expunges.contains(&config.id) { + if expunges.remove(&config.id) { config.disposition = BlueprintDatasetDisposition::Expunged; } + + // Small optimization -- if no expungement nor updates are left, + // bail. + if expunges.is_empty() && updates.is_empty() { + break; + } } // Remove all datasets that we've finished expunging. @@ -1885,16 +1893,15 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { resources: &'a SledResources, ) -> Self { // Gather all datasets known to the blueprint - let mut blueprint_datasets = BTreeMap::new(); + let mut blueprint_datasets: BTreeMap< + ZpoolUuid, + BTreeMap, + > = BTreeMap::new(); for dataset in datasets.current_sled_datasets(sled_id) { blueprint_datasets .entry(dataset.pool.id()) - .and_modify(|values: &mut BTreeMap<_, _>| { - values.insert(dataset.kind.clone(), dataset); - }) - .or_insert_with(|| { - BTreeMap::from([(dataset.kind.clone(), dataset)]) - }); + .or_default() + .insert(dataset.kind.clone(), dataset); } // Gather all datasets known to the database @@ -1921,11 +1928,11 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { /// Attempts to add a dataset to the builder. /// - /// - If the dataset exists in the blueprint already, use it - /// - Otherwise, if the dataset exists in the database, re-use - /// the UUID, but add it to the blueprint - /// - Otherwse, create a new dataset in both the database - /// and the blueprint + /// - If the dataset exists in the blueprint already, use it. + /// - Otherwise, if the dataset exists in the database, re-use the UUID, but + /// add it to the blueprint. + /// - Otherwse, create a new dataset in the blueprint, which will propagate + /// to the database during execution. pub fn ensure( &mut self, dataset: DatasetName, @@ -1961,12 +1968,8 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { }; target .entry(zpool_id) - .and_modify(|values: &mut BTreeMap<_, _>| { - values.insert(new_config.kind.clone(), new_config.clone()); - }) - .or_insert_with(|| { - BTreeMap::from([(new_config.kind.clone(), new_config)]) - }); + .or_default() + .insert(new_config.kind.clone(), new_config); return; } @@ -1980,12 +1983,8 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { let new_config = make_config(id); self.new_datasets .entry(zpool_id) - .and_modify(|values: &mut BTreeMap<_, _>| { - values.insert(new_config.kind.clone(), new_config.clone()); - }) - .or_insert_with(|| { - BTreeMap::from([(new_config.kind.clone(), new_config)]) - }); + .or_default() + .insert(new_config.kind.clone(), new_config); } /// Returns all datasets in the old blueprint that are not planned to be From b103fd6028189b89c130fdd5a90dbde253d39c2d Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 5 Sep 2024 11:48:38 -0700 Subject: [PATCH 59/84] Review feedback --- .../reconfigurator/execution/src/datasets.rs | 42 +++++++------------ .../planning/src/blueprint_builder/builder.rs | 24 ++--------- nexus/types/src/deployment.rs | 14 +++---- 3 files changed, 26 insertions(+), 54 deletions(-) diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index ba7ba20486..4a66f7def7 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -56,11 +56,7 @@ pub(crate) async fn deploy_datasets( &log, ); - let config: DatasetsConfig = match config.clone().try_into() { - Ok(config) => config, - Err(err) => return Some(err) - }; - + let config: DatasetsConfig = config.clone().into(); let result = client.datasets_put(&config).await.with_context( || format!("Failed to put {config:#?} to sled {sled_id}"), @@ -266,20 +262,17 @@ mod tests { blueprint .all_omicron_zones(BlueprintZoneFilter::All) .filter_map(|(_, zone)| { - if let Some(dataset) = zone.zone_type.durable_dataset() { - Some(BlueprintDatasetConfig { - disposition: BlueprintDatasetDisposition::InService, - id: DatasetUuid::new_v4(), - pool: dataset.dataset.pool_name.clone(), - kind: dataset.kind, - address: Some(dataset.address), - quota: None, - reservation: None, - compression: CompressionAlgorithm::Off, - }) - } else { - None - } + let dataset = zone.zone_type.durable_dataset()?; + Some(BlueprintDatasetConfig { + disposition: BlueprintDatasetDisposition::InService, + id: DatasetUuid::new_v4(), + pool: dataset.dataset.pool_name.clone(), + kind: dataset.kind, + address: Some(dataset.address), + quota: None, + reservation: None, + compression: CompressionAlgorithm::Off, + }) }) .collect::>() } @@ -467,7 +460,6 @@ mod tests { first_dataset.quota = Some(ByteCount::from_kibibytes_u32(1)); first_dataset.reservation = Some(ByteCount::from_kibibytes_u32(2)); first_dataset.compression = CompressionAlgorithm::Lz4; - let _ = first_dataset; // Update the datastore let EnsureDatasetsResult { inserted, updated, removed } = @@ -490,9 +482,9 @@ mod tests { .expect("Couldn't find dataset we tried to update?"); let observed_dataset: DatasetConfig = observed_dataset.try_into().unwrap(); - assert_eq!(observed_dataset.quota, first_dataset.quota,); - assert_eq!(observed_dataset.reservation, first_dataset.reservation,); - assert_eq!(observed_dataset.compression, first_dataset.compression,); + assert_eq!(observed_dataset.quota, first_dataset.quota); + assert_eq!(observed_dataset.reservation, first_dataset.reservation); + assert_eq!(observed_dataset.compression, first_dataset.compression); } #[nexus_test] @@ -552,7 +544,6 @@ mod tests { ); crucible_dataset.disposition = BlueprintDatasetDisposition::Expunged; let crucible_dataset_id = crucible_dataset.id; - let _ = crucible_dataset; let non_crucible_dataset = all_datasets .iter_mut() @@ -565,7 +556,6 @@ mod tests { non_crucible_dataset.disposition = BlueprintDatasetDisposition::Expunged; let non_crucible_dataset_id = non_crucible_dataset.id; - let _ = non_crucible_dataset; // Observe that we only remove one dataset. // @@ -646,7 +636,7 @@ mod tests { // a dataset, we'll just remove it from the "blueprint". // // This situation mimics a scenario where we are an "old Nexus, - // executing and old blueprint" - more datasets might be created + // executing an old blueprint" - more datasets might be created // concurrently with our execution, and we should leave them alone. assert_eq!(dataset_id, all_datasets.pop().unwrap().id); diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index f63a616c20..70112bb52a 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -2001,18 +2001,13 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { return false; }; - for (_, dataset_config) in datasets { - if dataset_config.id == dataset_id { - return true; - } - } - return false; + datasets.values().any(|config| config.id == dataset_id) }; let mut expunges = BTreeSet::new(); for (zpool_id, datasets) in &self.blueprint_datasets { - for (_dataset_kind, dataset_config) in datasets { + for dataset_config in datasets.values() { match dataset_config.disposition { // Already expunged; ignore BlueprintDatasetDisposition::Expunged => continue, @@ -2058,12 +2053,7 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { return false; }; - for (_, dataset_config) in datasets { - if dataset_config.id == dataset_id { - return true; - } - } - return false; + datasets.values().any(|config| config.id == dataset_id) }; let mut removals = BTreeSet::new(); @@ -2226,12 +2216,6 @@ pub mod test { for (sled_id, zone_config) in blueprint.all_omicron_zones(BlueprintZoneFilter::ShouldBeRunning) { - match blueprint.sled_state.get(&sled_id) { - // Decommissioned sleds don't keep dataset state around - None | Some(SledState::Decommissioned) => continue, - Some(SledState::Active) => (), - } - let datasets = datasets_for_sled(&blueprint, sled_id); let zpool = zone_config.filesystem_pool.as_ref().unwrap(); @@ -2716,7 +2700,7 @@ pub mod test { .collect::>(); // We saw two datasets being expunged earlier when we called // `sled_ensure_datasets` -- validate that this is true when inspecting - // the bluepirnt too. + // the blueprint too. assert_eq!(expunged_datasets.len(), 2); // Remove these two datasets from the input. diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index d587594d5e..832832b0f8 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -800,18 +800,16 @@ pub struct BlueprintDatasetsConfig { pub datasets: BTreeMap, } -impl TryFrom for DatasetsConfig { - type Error = anyhow::Error; - - fn try_from(config: BlueprintDatasetsConfig) -> Result { - Ok(Self { +impl From for DatasetsConfig { + fn from(config: BlueprintDatasetsConfig) -> Self { + Self { generation: config.generation, datasets: config .datasets .into_iter() - .map(|(id, d)| Ok((id, d.try_into()?))) - .collect::>()?, - }) + .map(|(id, d)| (id, d.into())) + .collect(), + } } } From 26c578dd66333975c82e881796f245dee7027687 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 5 Sep 2024 12:53:52 -0700 Subject: [PATCH 60/84] Fix tests --- .../planning/src/blueprint_builder/builder.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 70112bb52a..8647769c74 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -2216,6 +2216,15 @@ pub mod test { for (sled_id, zone_config) in blueprint.all_omicron_zones(BlueprintZoneFilter::ShouldBeRunning) { + match blueprint.sled_state.get(&sled_id) { + // Decommissioned sleds don't keep dataset state around. + // + // Normally we wouldn't observe zones from decommissioned sleds + // anyway, but that's the responsibility of the Planner, not the + // BlueprintBuilder. + None | Some(SledState::Decommissioned) => continue, + Some(SledState::Active) => (), + } let datasets = datasets_for_sled(&blueprint, sled_id); let zpool = zone_config.filesystem_pool.as_ref().unwrap(); From 4ac2d22be9d1424ecad8664370eed4c890da36da Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 5 Sep 2024 12:59:08 -0700 Subject: [PATCH 61/84] Update openapi --- openapi/nexus-internal.json | 113 +++++++++++++++++++++++++++++++++++- 1 file changed, 112 insertions(+), 1 deletion(-) diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 16eef8d19a..827e866ebb 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -1966,7 +1966,7 @@ "type": "string" }, "compression": { - "type": "string" + "$ref": "#/components/schemas/CompressionAlgorithm" }, "disposition": { "$ref": "#/components/schemas/BlueprintDatasetDisposition" @@ -2715,6 +2715,112 @@ } ] }, + "CompressionAlgorithm": { + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "on" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "off" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "gzip" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "level": { + "$ref": "#/components/schemas/GzipLevel" + }, + "type": { + "type": "string", + "enum": [ + "gzip_n" + ] + } + }, + "required": [ + "level", + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "lz4" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "lzjb" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "zle" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "CurrentStatus": { "description": "Describes the current status of a background task", "oneOf": [ @@ -3365,6 +3471,11 @@ "format": "uint64", "minimum": 0 }, + "GzipLevel": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, "ImportExportPolicy": { "description": "Define policy relating to the import and export of prefixes from a BGP peer.", "oneOf": [ From 67c218186903a1e89370111bc8849fde0b9fdf76 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 5 Sep 2024 19:57:38 -0700 Subject: [PATCH 62/84] review feedback --- dev-tools/omdb/src/bin/omdb/db.rs | 9 ++- illumos-utils/src/zfs.rs | 101 ++++++++++++++++-------------- schema/crdb/inv-dataset/up01.sql | 1 - 3 files changed, 63 insertions(+), 48 deletions(-) diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 3cea514d6d..642a4fb31b 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -4777,7 +4777,14 @@ fn inv_collection_print_sleds(collection: &Collection) { reservation, compression, } = dataset; - println!(" {name} - id: {id:?}, compression: {compression}"); + + let id = if let Some(id) = id { + id.to_string() + } else { + String::from("none") + }; + + println!(" {name} - id: {id}, compression: {compression}"); println!(" available: {available}, used: {used}"); println!(" reservation: {reservation:?}, quota: {quota:?}"); } diff --git a/illumos-utils/src/zfs.rs b/illumos-utils/src/zfs.rs index 71c22425fa..d711ca4f9d 100644 --- a/illumos-utils/src/zfs.rs +++ b/illumos-utils/src/zfs.rs @@ -218,65 +218,74 @@ pub struct DatasetProperties { pub id: Option, /// The full name of the dataset. pub name: String, - /// Remaining space in the dataset and descendents. + /// Remaining space in the dataset and descendants. pub avail: ByteCount, - /// Space used by dataset and descendents. + /// Space used by dataset and descendants. pub used: ByteCount, - /// Maximum space usable by dataset and descendents. + /// Maximum space usable by dataset and descendants. pub quota: Option, - /// Minimum space guaranteed to dataset and descendents. + /// Minimum space guaranteed to dataset and descendants. pub reservation: Option, /// The compression algorithm used for this dataset. + /// + /// This probably aligns with a value from + /// [omicron_common::disk::CompressionAlgorithm], but is left as an untyped + /// string so that unexpected compression formats don't prevent inventory + /// from being collected. pub compression: String, } -impl FromStr for DatasetProperties { - type Err = anyhow::Error; +impl DatasetProperties { + // care about. + const ZFS_LIST_STR: &'static str = + "oxide:uuid,name,avail,used,quota,reservation,compression"; +} - fn from_str(s: &str) -> Result { - let mut iter = s.split_whitespace(); +// An inner parsing function, so that the FromStr implementation can always emit +// the string 's' that failed to parse in the error message. +fn dataset_properties_parse( + s: &str, +) -> Result { + let mut iter = s.split_whitespace(); - let id = match iter.next().context("Missing UUID")? { - "-" => None, - anything_else => Some(anything_else.parse::()?), - }; + let id = match iter.next().context("Missing UUID")? { + "-" => None, + anything_else => Some(anything_else.parse::()?), + }; - let name = iter.next().context("Missing 'name'")?.to_string(); - let avail = iter - .next() - .context("Missing 'avail'")? - .parse::()? - .try_into()?; - let used = iter - .next() - .context("Missing 'used'")? - .parse::()? - .try_into()?; - let quota = - match iter.next().context("Missing 'quota'")?.parse::()? { - 0 => None, - q => Some(q.try_into()?), - }; - let reservation = match iter - .next() - .context("Missing 'reservation'")? - .parse::()? - { + let name = iter.next().context("Missing 'name'")?.to_string(); + let avail = + iter.next().context("Missing 'avail'")?.parse::()?.try_into()?; + let used = + iter.next().context("Missing 'used'")?.parse::()?.try_into()?; + let quota = match iter.next().context("Missing 'quota'")?.parse::()? { + 0 => None, + q => Some(q.try_into()?), + }; + let reservation = + match iter.next().context("Missing 'reservation'")?.parse::()? { 0 => None, r => Some(r.try_into()?), }; - let compression = - iter.next().context("Missing 'compression'")?.to_string(); + let compression = iter.next().context("Missing 'compression'")?.to_string(); + + Ok(DatasetProperties { + id, + name, + avail, + used, + quota, + reservation, + compression, + }) +} - Ok(DatasetProperties { - id, - name, - avail, - used, - quota, - reservation, - compression, - }) +impl FromStr for DatasetProperties { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + dataset_properties_parse(s) + .with_context(|| format!("Failed to parse: {s}")) } } @@ -316,13 +325,13 @@ impl Zfs { let cmd = command.args(&["list", "-d", "1", "-rHpo"]); // Note: this is tightly coupled with the layout of DatasetProperties - cmd.arg("oxide:uuid,name,avail,used,quota,reservation,compression"); + cmd.arg(DatasetProperties::ZFS_LIST_STR); cmd.args(datasets); let output = execute(cmd).with_context(|| { format!("Failed to get dataset properties for {datasets:?}") })?; - let stdout = String::from_utf8_lossy(&output.stdout); + let stdout = String::from_utf8(output.stdout)?; let mut datasets = stdout .trim() .split('\n') diff --git a/schema/crdb/inv-dataset/up01.sql b/schema/crdb/inv-dataset/up01.sql index 4504768c40..d3d21d16ae 100644 --- a/schema/crdb/inv-dataset/up01.sql +++ b/schema/crdb/inv-dataset/up01.sql @@ -13,4 +13,3 @@ CREATE TABLE IF NOT EXISTS omicron.public.inv_dataset ( PRIMARY KEY (inv_collection_id, sled_id, name) ); - From 557dc222dc61b8d3c6f571efeac17f7430f3def5 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 5 Sep 2024 22:53:32 -0700 Subject: [PATCH 63/84] Fix error wrapping in tests --- illumos-utils/src/zfs.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/illumos-utils/src/zfs.rs b/illumos-utils/src/zfs.rs index d711ca4f9d..f92fd5d60f 100644 --- a/illumos-utils/src/zfs.rs +++ b/illumos-utils/src/zfs.rs @@ -859,7 +859,7 @@ mod test { let err = DatasetProperties::from_str(&input) .expect_err("Should have failed to parse"); assert!( - err.to_string().contains("error parsing UUID (dataset)"), + format!("{err:#}").contains("error parsing UUID (dataset)"), "{err}" ); } @@ -870,7 +870,7 @@ mod test { let err = DatasetProperties::from_str(&input) .expect_err("Should have failed to parse"); assert!( - err.to_string().contains("invalid digit found in string"), + format!("{err:#}").contains("invalid digit found in string"), "{err}" ); } @@ -881,7 +881,7 @@ mod test { let err = DatasetProperties::from_str(&input) .expect_err("Should have failed to parse"); assert!( - err.to_string().contains("invalid digit found in string"), + format!("{err:#}").contains("invalid digit found in string"), "{err}" ); } @@ -892,7 +892,7 @@ mod test { let err = DatasetProperties::from_str(&input) .expect_err("Should have failed to parse"); assert!( - err.to_string().contains("invalid digit found in string"), + format!("{err:#}").contains("invalid digit found in string"), "{err}" ); } @@ -903,7 +903,7 @@ mod test { let err = DatasetProperties::from_str(&input) .expect_err("Should have failed to parse"); assert!( - err.to_string().contains("invalid digit found in string"), + format!("{err:#}").contains("invalid digit found in string"), "{err}" ); } @@ -913,7 +913,8 @@ mod test { let expect_missing = |input: &str, what: &str| { let err = DatasetProperties::from_str(input) .expect_err("Should have failed to parse"); - assert!(err.to_string().contains(&format!("Missing {what}"))); + let err = format!("{err:#}"); + assert!(err.contains(&format!("Missing {what}")), "{err}"); }; expect_missing( From d4b730b08e0c977c6f6d158adefa13a08b916ae6 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 23 Sep 2024 15:40:30 -0700 Subject: [PATCH 64/84] Fix reconfigurator registration --- nexus/reconfigurator/execution/src/lib.rs | 15 +++++++++++---- nexus/types/src/deployment/execution.rs | 1 + 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/nexus/reconfigurator/execution/src/lib.rs b/nexus/reconfigurator/execution/src/lib.rs index c16f585de6..372b0c6678 100644 --- a/nexus/reconfigurator/execution/src/lib.rs +++ b/nexus/reconfigurator/execution/src/lib.rs @@ -155,6 +155,13 @@ pub async fn realize_blueprint_with_overrides( sled_list.clone(), ); + register_deploy_datasets_step( + &engine.for_component(ExecutionComponent::Datasets), + &opctx, + blueprint, + sled_list.clone(), + ); + register_deploy_zones_step( &engine.for_component(ExecutionComponent::OmicronZones), &opctx, @@ -322,14 +329,14 @@ fn register_deploy_datasets_step<'a>( opctx: &'a OpContext, blueprint: &'a Blueprint, sleds: SharedStepHandle>>, -) -> StepHandle { +) { registrar .new_step( ExecutionStepId::Ensure, "Deploy datasets", move |cx| async move { let sleds_by_id = sleds.into_value(cx.token()).await; - let done = omicron_physical_disks::deploy_datasets( + datasets::deploy_datasets( &opctx, &sleds_by_id, &blueprint.blueprint_datasets, @@ -337,10 +344,10 @@ fn register_deploy_datasets_step<'a>( .await .map_err(merge_anyhow_list)?; - StepSuccess::new(done).into() + StepSuccess::new(()).into() }, ) - .register() + .register(); } fn register_deploy_zones_step<'a>( diff --git a/nexus/types/src/deployment/execution.rs b/nexus/types/src/deployment/execution.rs index 16bf73873a..2add37b21a 100644 --- a/nexus/types/src/deployment/execution.rs +++ b/nexus/types/src/deployment/execution.rs @@ -33,6 +33,7 @@ pub enum ExecutionComponent { OmicronZones, FirewallRules, DatasetRecords, + Datasets, Dns, Cockroach, } From 8ce16e430fd78b8f96fb1f0d33c9105f31a0373f Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 23 Sep 2024 16:51:32 -0700 Subject: [PATCH 65/84] Feedback from 9/5 meeting --- .../planning/src/blueprint_builder/builder.rs | 43 +++++++++++-------- schema/rss-service-plan-v5.json | 1 + 2 files changed, 27 insertions(+), 17 deletions(-) diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index c00163bd82..8da170d11d 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -724,7 +724,7 @@ impl<'a> BlueprintBuilder<'a> { /// - If new datasets are proposed, they are added to the blueprint. /// - If datasets are changed, they are updated in the blueprint. /// - If datasets are not proposed, but they exist in the parent blueprint, - /// they are removed. + /// they are expunged. pub fn sled_ensure_datasets( &mut self, sled_id: SledUuid, @@ -1987,6 +1987,10 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { } // If the dataset exists in the datastore, re-use the UUID. + // + // TODO(https://github.com/oxidecomputer/omicron/issues/6645): We + // could avoid reading from the datastore if we were confident all + // provisioned datasets existed in the parent blueprint. let id = if let Some(old_config) = self.get_from_db(zpool_id, kind) { old_config.id } else { @@ -2050,10 +2054,18 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { expunges } - /// Returns all datasets that have been expunged in a prior blueprint, - /// and which are also deleted from the database. + /// TODO(https://github.com/oxidecomputer/omicron/issues/6646): This + /// function SHOULD do the following: /// + /// Returns all datasets that have been expunged in a prior blueprint, and + /// which have also been removed from the database and from inventory. /// This is our sign that the work of expungement has completed. + /// + /// TODO: In reality, however, this function actually implements the + /// following: + /// + /// - It returns an empty BTreeSet, effectively saying "no datasets are + /// removable from the blueprint". pub fn get_removable_datasets(&self) -> BTreeSet { let dataset_exists_in = |group: &BTreeMap< @@ -2069,7 +2081,7 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { datasets.values().any(|config| config.id == dataset_id) }; - let mut removals = BTreeSet::new(); + let removals = BTreeSet::new(); for (zpool_id, datasets) in &self.blueprint_datasets { for (_kind, config) in datasets { if config.disposition == BlueprintDatasetDisposition::Expunged @@ -2080,7 +2092,11 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { ) { info!(self.log, "dataset removable (expunged, not in database)"; "id" => ?config.id); - removals.insert(config.id); + + // TODO(https://github.com/oxidecomputer/omicron/issues/6646): + // We could call `removals.insert(config.id)` here, but + // instead, opt to just log that the dataset is removable + // and keep it in the blueprint. } } } @@ -2794,19 +2810,12 @@ pub mod test { // we no longer need to keep around records of their expungement. let sled_resources = input.sled_resources(&sled_id).unwrap(); let r = builder.sled_ensure_datasets(sled_id, sled_resources).unwrap(); - assert_eq!( - r, - EnsureMultiple::Changed { - added: 0, - updated: 0, - expunged: 0, - removed: 2 - } - ); - // They should only be removed once -- repeated calls won't change the - // builder further. - let r = builder.sled_ensure_datasets(sled_id, sled_resources).unwrap(); + // TODO(https://github.com/oxidecomputer/omicron/issues/6646): + // Because of the workaround for #6646, we don't actually remove + // datasets yet. + // + // In the future, however, we will. assert_eq!(r, EnsureMultiple::NotNeeded); logctx.cleanup_successful(); diff --git a/schema/rss-service-plan-v5.json b/schema/rss-service-plan-v5.json index 18a5b84a05..e2268371ec 100644 --- a/schema/rss-service-plan-v5.json +++ b/schema/rss-service-plan-v5.json @@ -37,6 +37,7 @@ ] }, "filesystem_pool": { + "description": "zpool used for the zone's (transient) root filesystem", "anyOf": [ { "$ref": "#/definitions/ZpoolName" From 40c2c8eff517de7f129b5cbb161ecd30b630c089 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 23 Sep 2024 17:00:22 -0700 Subject: [PATCH 66/84] rustdocs --- .../reconfigurator/planning/src/blueprint_builder/builder.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 8da170d11d..cc40a1e2f6 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -2054,8 +2054,8 @@ impl<'a> BlueprintSledDatasetsBuilder<'a> { expunges } - /// TODO(https://github.com/oxidecomputer/omicron/issues/6646): This - /// function SHOULD do the following: + /// TODO: + /// This function SHOULD do the following: /// /// Returns all datasets that have been expunged in a prior blueprint, and /// which have also been removed from the database and from inventory. From 20b7c625c85cbbb1e5dba157b822829065f61ec5 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 25 Sep 2024 12:55:22 -0700 Subject: [PATCH 67/84] Fixing merge issues --- .../planning/src/blueprint_builder/builder.rs | 31 ++++++++++++++----- nexus/reconfigurator/planning/src/planner.rs | 21 +++++++++++-- 2 files changed, 41 insertions(+), 11 deletions(-) diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index ac25f8b33a..0c8a6ad2ac 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -1050,7 +1050,12 @@ impl<'a> BlueprintBuilder<'a> { } } - Ok(EnsureMultiple::Changed { added, removed: 0 }) + Ok(EnsureMultiple::Changed { + added, + updated: 0, + removed: 0, + expunged: 0, + }) } pub fn sled_ensure_zone_ntp( @@ -2828,7 +2833,7 @@ pub mod test { // // If we haven't changed inputs, the output should be the same! for (sled_id, resources) in - input.all_sled_resources(SledFilter::InService) + input.all_sled_resources(SledFilter::Commissioned) { let r = builder.sled_ensure_datasets(sled_id, resources).unwrap(); assert_eq!(r, EnsureMultiple::NotNeeded); @@ -2840,7 +2845,8 @@ pub mod test { .all_sled_ids(SledFilter::Commissioned) .next() .expect("at least one sled present"); - let sled_resources = input.sled_resources(&sled_id).unwrap(); + let sled_details = + input.sled_lookup(SledFilter::Commissioned, sled_id).unwrap(); let crucible_zone_id = builder .zones .current_sled_zones(sled_id, BlueprintZoneFilter::ShouldBeRunning) @@ -2857,7 +2863,9 @@ pub mod test { // In the case of Crucible, we have a durable dataset and a transient // zone filesystem, so we expect two datasets to be expunged. - let r = builder.sled_ensure_datasets(sled_id, sled_resources).unwrap(); + let r = builder + .sled_ensure_datasets(sled_id, &sled_details.resources) + .unwrap(); assert_eq!( r, EnsureMultiple::Changed { @@ -2868,7 +2876,9 @@ pub mod test { } ); // Once the datasets are expunged, no further changes will be proposed. - let r = builder.sled_ensure_datasets(sled_id, sled_resources).unwrap(); + let r = builder + .sled_ensure_datasets(sled_id, &sled_details.resources) + .unwrap(); assert_eq!(r, EnsureMultiple::NotNeeded); let blueprint = builder.build(); @@ -2885,7 +2895,9 @@ pub mod test { // While the datasets still exist in the input (effectively, the db) we // cannot remove them. - let r = builder.sled_ensure_datasets(sled_id, sled_resources).unwrap(); + let r = builder + .sled_ensure_datasets(sled_id, &sled_details.resources) + .unwrap(); assert_eq!(r, EnsureMultiple::NotNeeded); let blueprint = builder.build(); @@ -2937,8 +2949,11 @@ pub mod test { // Now, we should see the datasets "removed" from the blueprint, since // we no longer need to keep around records of their expungement. - let sled_resources = input.sled_resources(&sled_id).unwrap(); - let r = builder.sled_ensure_datasets(sled_id, sled_resources).unwrap(); + let sled_details = + input.sled_lookup(SledFilter::Commissioned, sled_id).unwrap(); + let r = builder + .sled_ensure_datasets(sled_id, &sled_details.resources) + .unwrap(); // TODO(https://github.com/oxidecomputer/omicron/issues/6646): // Because of the workaround for #6646, we don't actually remove diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 9d1e9388b7..b30648c315 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -1466,7 +1466,12 @@ mod test { builder .sled_ensure_zone_multiple_external_dns(sled_id, 3) .expect("can't add external DNS zones"), - EnsureMultiple::Changed { added: 0, removed: 0 }, + EnsureMultiple::Changed { + added: 0, + updated: 0, + removed: 0, + expunged: 0 + }, ); // Build a builder for a modfied blueprint that will include @@ -1505,13 +1510,23 @@ mod test { blueprint_builder .sled_ensure_zone_multiple_external_dns(sled_1, 2) .expect("can't add external DNS zones to blueprint"), - EnsureMultiple::Changed { added: 2, removed: 0 } + EnsureMultiple::Changed { + added: 2, + updated: 0, + removed: 0, + expunged: 0 + } )); assert!(matches!( blueprint_builder .sled_ensure_zone_multiple_external_dns(sled_2, 1) .expect("can't add external DNS zones to blueprint"), - EnsureMultiple::Changed { added: 1, removed: 0 } + EnsureMultiple::Changed { + added: 1, + updated: 0, + removed: 0, + expunged: 0 + } )); let blueprint1a = blueprint_builder.build(); From fdab22a75c794bbddbc8d550008a8b1848a9e347 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 25 Sep 2024 13:05:02 -0700 Subject: [PATCH 68/84] Zone names, full enumeration --- common/src/api/internal/shared.rs | 20 +++++++++---------- nexus/db-model/src/dataset.rs | 19 +++++++++++++----- nexus/db-model/src/dataset_kind.rs | 20 ++++++++++++------- .../reconfigurator/execution/src/datasets.rs | 2 +- .../planning/src/blueprint_builder/builder.rs | 14 ++++++++----- 5 files changed, 47 insertions(+), 28 deletions(-) diff --git a/common/src/api/internal/shared.rs b/common/src/api/internal/shared.rs index 4826292863..dff94f7255 100644 --- a/common/src/api/internal/shared.rs +++ b/common/src/api/internal/shared.rs @@ -853,8 +853,8 @@ pub enum DatasetKind { InternalDns, // Zone filesystems - ZoneRoot, - Zone { + TransientZoneRoot, + TransientZone { name: String, }, @@ -920,7 +920,7 @@ impl DatasetKind { match self { Cockroach | Crucible | Clickhouse | ClickhouseKeeper | ClickhouseServer | ExternalDns | InternalDns => true, - ZoneRoot | Zone { .. } | Debug => false, + TransientZoneRoot | TransientZone { .. } | Debug => false, } } @@ -928,7 +928,7 @@ impl DatasetKind { /// /// Otherwise, returns "None". pub fn zone_name(&self) -> Option<&str> { - if let DatasetKind::Zone { name } = self { + if let DatasetKind::TransientZone { name } = self { Some(name) } else { None @@ -952,8 +952,8 @@ impl fmt::Display for DatasetKind { ClickhouseServer => "clickhouse_server", ExternalDns => "external_dns", InternalDns => "internal_dns", - ZoneRoot => "zone", - Zone { name } => { + TransientZoneRoot => "zone", + TransientZone { name } => { write!(f, "zone/{}", name)?; return Ok(()); } @@ -982,11 +982,11 @@ impl FromStr for DatasetKind { "clickhouse_server" => ClickhouseServer, "external_dns" => ExternalDns, "internal_dns" => InternalDns, - "zone" => ZoneRoot, + "zone" => TransientZoneRoot, "debug" => Debug, other => { if let Some(name) = other.strip_prefix("zone/") { - Zone { name: name.to_string() } + TransientZone { name: name.to_string() } } else { return Err(DatasetKindParseError::UnknownDataset( s.to_string(), @@ -1076,8 +1076,8 @@ mod tests { DatasetKind::ClickhouseServer, DatasetKind::ExternalDns, DatasetKind::InternalDns, - DatasetKind::ZoneRoot, - DatasetKind::Zone { name: String::from("myzone") }, + DatasetKind::TransientZoneRoot, + DatasetKind::TransientZone { name: String::from("myzone") }, DatasetKind::Debug, ]; diff --git a/nexus/db-model/src/dataset.rs b/nexus/db-model/src/dataset.rs index a04831d51d..503a936942 100644 --- a/nexus/db-model/src/dataset.rs +++ b/nexus/db-model/src/dataset.rs @@ -64,7 +64,7 @@ impl Dataset { let kind = DatasetKind::from(&api_kind); let (size_used, zone_name) = match api_kind { ApiDatasetKind::Crucible => (Some(0), None), - ApiDatasetKind::Zone { name } => (None, Some(name)), + ApiDatasetKind::TransientZone { name } => (None, Some(name)), _ => (None, None), }; @@ -96,10 +96,19 @@ impl Dataset { impl From for Dataset { fn from(bp: BlueprintDatasetConfig) -> Self { let kind = DatasetKind::from(&bp.kind); - let (size_used, zone_name) = match bp.kind { - ApiDatasetKind::Crucible => (Some(0), None), - ApiDatasetKind::Zone { name } => (None, Some(name)), - _ => (None, None), + let zone_name = bp.kind.zone_name().map(|s| s.to_string()); + // Only Crucible uses this "size_used" field. + let size_used = match bp.kind { + ApiDatasetKind::Crucible => Some(0), + ApiDatasetKind::Cockroach + | ApiDatasetKind::Clickhouse + | ApiDatasetKind::ClickhouseKeeper + | ApiDatasetKind::ClickhouseServer + | ApiDatasetKind::ExternalDns + | ApiDatasetKind::InternalDns + | ApiDatasetKind::TransientZone { .. } + | ApiDatasetKind::TransientZoneRoot + | ApiDatasetKind::Debug => None, }; let addr = bp.address; Self { diff --git a/nexus/db-model/src/dataset_kind.rs b/nexus/db-model/src/dataset_kind.rs index fe782dd21f..57c8975e65 100644 --- a/nexus/db-model/src/dataset_kind.rs +++ b/nexus/db-model/src/dataset_kind.rs @@ -24,8 +24,8 @@ impl_enum_type!( ClickhouseServer => b"clickhouse_server" ExternalDns => b"external_dns" InternalDns => b"internal_dns" - ZoneRoot => b"zone_root" - Zone => b"zone" + TransientZoneRoot => b"zone_root" + TransientZone => b"zone" Debug => b"debug" ); @@ -43,10 +43,12 @@ impl DatasetKind { (Self::ClickhouseServer, None) => ApiKind::ClickhouseServer, (Self::ExternalDns, None) => ApiKind::ExternalDns, (Self::InternalDns, None) => ApiKind::InternalDns, - (Self::ZoneRoot, None) => ApiKind::ZoneRoot, - (Self::Zone, Some(name)) => ApiKind::Zone { name }, + (Self::TransientZoneRoot, None) => ApiKind::TransientZoneRoot, + (Self::TransientZone, Some(name)) => { + ApiKind::TransientZone { name } + } (Self::Debug, None) => ApiKind::Debug, - (Self::Zone, None) => { + (Self::TransientZone, None) => { return Err(Error::internal_error("Zone kind needs name")) } (_, Some(_)) => { @@ -78,12 +80,16 @@ impl From<&internal::shared::DatasetKind> for DatasetKind { internal::shared::DatasetKind::InternalDns => { DatasetKind::InternalDns } - internal::shared::DatasetKind::ZoneRoot => DatasetKind::ZoneRoot, + internal::shared::DatasetKind::TransientZoneRoot => { + DatasetKind::TransientZoneRoot + } // Enums in the database do not have associated data, so this drops // the "name" of the zone and only considers the type. // // The zone name, if it exists, is stored in a separate column. - internal::shared::DatasetKind::Zone { .. } => DatasetKind::Zone, + internal::shared::DatasetKind::TransientZone { .. } => { + DatasetKind::TransientZone + } internal::shared::DatasetKind::Debug => DatasetKind::Debug, } } diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index 4a66f7def7..e92ff090a9 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -389,7 +389,7 @@ mod tests { disposition: BlueprintDatasetDisposition::InService, id: DatasetUuid::new_v4(), pool: ZpoolName::new_external(new_zpool_id), - kind: DatasetKind::ZoneRoot, + kind: DatasetKind::TransientZoneRoot, address: None, quota: None, reservation: None, diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 0c8a6ad2ac..de14bf2d09 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -763,7 +763,7 @@ impl<'a> BlueprintBuilder<'a> { CompressionAlgorithm::Off, ); datasets_builder.ensure( - DatasetName::new(zpool, DatasetKind::ZoneRoot), + DatasetName::new(zpool, DatasetKind::TransientZoneRoot), address, None, None, @@ -783,7 +783,7 @@ impl<'a> BlueprintBuilder<'a> { datasets_builder.ensure( DatasetName::new( fs_zpool.clone(), - DatasetKind::Zone { name }, + DatasetKind::TransientZone { name }, ), address, None, @@ -2367,8 +2367,11 @@ pub mod test { dataset.disposition, BlueprintDatasetDisposition::InService ); - let dataset = - find_dataset(&datasets, &zpool, DatasetKind::ZoneRoot); + let dataset = find_dataset( + &datasets, + &zpool, + DatasetKind::TransientZoneRoot, + ); assert_eq!( dataset.disposition, BlueprintDatasetDisposition::InService @@ -2391,7 +2394,8 @@ pub mod test { let datasets = datasets_for_sled(&blueprint, sled_id); let zpool = zone_config.filesystem_pool.as_ref().unwrap(); - let kind = DatasetKind::Zone { name: zone_name(&zone_config) }; + let kind = + DatasetKind::TransientZone { name: zone_name(&zone_config) }; let dataset = find_dataset(&datasets, &zpool, kind); assert_eq!( dataset.disposition, From 50019ee95bd274238a35d0abd453ade8c85a5d98 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 25 Sep 2024 16:52:52 -0700 Subject: [PATCH 69/84] Make dataset creation/deletion dependent on target blueprint --- nexus/db-queries/src/db/datastore/dataset.rs | 284 +++++++++++++++--- .../db-queries/src/db/datastore/deployment.rs | 133 ++++++-- .../reconfigurator/execution/src/datasets.rs | 164 ++++++++-- nexus/reconfigurator/execution/src/lib.rs | 3 + uuid-kinds/src/lib.rs | 1 + 5 files changed, 492 insertions(+), 93 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/dataset.rs b/nexus/db-queries/src/db/datastore/dataset.rs index 3b459b1df8..759435e230 100644 --- a/nexus/db-queries/src/db/datastore/dataset.rs +++ b/nexus/db-queries/src/db/datastore/dataset.rs @@ -12,6 +12,7 @@ use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; use crate::db::error::public_error_from_diesel; +use crate::db::error::retryable; use crate::db::error::ErrorHandler; use crate::db::identity::Asset; use crate::db::model::Dataset; @@ -20,10 +21,12 @@ use crate::db::model::PhysicalDiskPolicy; use crate::db::model::Zpool; use crate::db::pagination::paginated; use crate::db::pagination::Paginator; +use crate::db::TransactionError; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; use diesel::upsert::excluded; +use futures::FutureExt; use nexus_db_model::DatasetKind; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; @@ -33,6 +36,7 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; +use omicron_uuid_kinds::BlueprintUuid; use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use uuid::Uuid; @@ -56,6 +60,45 @@ impl DataStore { &self, dataset: Dataset, ) -> CreateResult { + let conn = &*self.pool_connection_unauthorized().await?; + Self::dataset_upsert_on_connection(&conn, dataset).await.map_err(|e| { + match e { + TransactionError::CustomError(e) => e, + TransactionError::Database(e) => { + public_error_from_diesel(e, ErrorHandler::Server) + } + } + }) + } + + pub async fn dataset_upsert_if_blueprint_is_enabled( + &self, + opctx: &OpContext, + bp_id: BlueprintUuid, + dataset: Dataset, + ) -> CreateResult { + let conn = self.pool_connection_unauthorized().await?; + + self.transaction_if_current_blueprint_is( + &conn, + "dataset_upsert_if_blueprint_is_enabled", + opctx, + bp_id, + |conn| { + let dataset = dataset.clone(); + async move { + Self::dataset_upsert_on_connection(&conn, dataset).await + } + .boxed() + }, + ) + .await + } + + async fn dataset_upsert_on_connection( + conn: &async_bb8_diesel::Connection, + dataset: Dataset, + ) -> Result> { use db::schema::dataset::dsl; let dataset_id = dataset.id(); @@ -78,22 +121,27 @@ impl DataStore { dsl::compression.eq(excluded(dsl::compression)), )), ) - .insert_and_get_result_async( - &*self.pool_connection_unauthorized().await?, - ) + .insert_and_get_result_async(&*conn) .await .map_err(|e| match e { - AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { - type_name: ResourceType::Zpool, - lookup_type: LookupType::ById(zpool_id), - }, - AsyncInsertError::DatabaseError(e) => public_error_from_diesel( - e, - ErrorHandler::Conflict( - ResourceType::Dataset, - &dataset_id.to_string(), - ), - ), + AsyncInsertError::CollectionNotFound => { + TransactionError::CustomError(Error::ObjectNotFound { + type_name: ResourceType::Zpool, + lookup_type: LookupType::ById(zpool_id), + }) + } + AsyncInsertError::DatabaseError(e) => { + if retryable(&e) { + return TransactionError::Database(e); + } + TransactionError::CustomError(public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::Dataset, + &dataset_id.to_string(), + ), + )) + } }) } @@ -196,12 +244,44 @@ impl DataStore { id: DatasetUuid, ) -> DeleteResult { opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + let conn = self.pool_connection_authorized(&opctx).await?; + + Self::dataset_delete_on_connection(&conn, id) + .await + .map_err(|e| e.into()) + } + + pub async fn dataset_delete_if_blueprint_is_enabled( + &self, + opctx: &OpContext, + bp_id: BlueprintUuid, + id: DatasetUuid, + ) -> DeleteResult { + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + let conn = self.pool_connection_authorized(&opctx).await?; + + self.transaction_if_current_blueprint_is( + &conn, + "dataset_delete_if_blueprint_is_enabled", + opctx, + bp_id, + |conn| { + async move { + Self::dataset_delete_on_connection(&conn, id).await + } + .boxed() + }, + ) + .await + } + async fn dataset_delete_on_connection( + conn: &async_bb8_diesel::Connection, + id: DatasetUuid, + ) -> Result<(), TransactionError> { use db::schema::dataset::dsl as dataset_dsl; let now = Utc::now(); - let conn = &*self.pool_connection_authorized(&opctx).await?; - let id = *id.as_untyped_uuid(); diesel::update(dataset_dsl::dataset) .filter(dataset_dsl::time_deleted.is_null()) @@ -272,27 +352,23 @@ mod test { use nexus_db_model::SledBaseboard; use nexus_db_model::SledSystemHardware; use nexus_db_model::SledUpdate; + use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_test_utils::db::test_setup_database; + use nexus_types::deployment::Blueprint; + use nexus_types::deployment::BlueprintTarget; use omicron_common::api::internal::shared::DatasetKind as ApiDatasetKind; use omicron_test_utils::dev; + use omicron_uuid_kinds::SledUuid; + use omicron_uuid_kinds::ZpoolUuid; - #[tokio::test] - async fn test_insert_if_not_exists() { - let logctx = dev::test_setup_log("inventory_insert"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; - let opctx = &opctx; - - // There should be no datasets initially. - assert_eq!( - datastore.dataset_list_all_batched(opctx, None).await.unwrap(), - [] - ); - + async fn create_sled_and_zpool( + datastore: &DataStore, + opctx: &OpContext, + ) -> (SledUuid, ZpoolUuid) { // Create a fake sled that holds our fake zpool. - let sled_id = Uuid::new_v4(); + let sled_id = SledUuid::new_v4(); let sled = SledUpdate::new( - sled_id, + *sled_id.as_untyped_uuid(), "[::1]:0".parse().unwrap(), SledBaseboard { serial_number: "test-sn".to_string(), @@ -311,18 +387,41 @@ mod test { datastore.sled_upsert(sled).await.expect("failed to upsert sled"); // Create a fake zpool that backs our fake datasets. - let zpool_id = Uuid::new_v4(); - let zpool = Zpool::new(zpool_id, sled_id, Uuid::new_v4()); + let zpool_id = ZpoolUuid::new_v4(); + let zpool = Zpool::new( + *zpool_id.as_untyped_uuid(), + *sled_id.as_untyped_uuid(), + Uuid::new_v4(), + ); datastore .zpool_insert(opctx, zpool) .await .expect("failed to upsert zpool"); + (sled_id, zpool_id) + } + + #[tokio::test] + async fn test_insert_if_not_exists() { + let logctx = dev::test_setup_log("insert_if_not_exists"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + let opctx = &opctx; + + // There should be no datasets initially. + assert_eq!( + datastore.dataset_list_all_batched(opctx, None).await.unwrap(), + [] + ); + + let (_sled_id, zpool_id) = + create_sled_and_zpool(&datastore, opctx).await; + // Inserting a new dataset should succeed. let dataset1 = datastore .dataset_insert_if_not_exists(Dataset::new( Uuid::new_v4(), - zpool_id, + *zpool_id.as_untyped_uuid(), Some("[::1]:0".parse().unwrap()), ApiDatasetKind::Crucible, )) @@ -355,7 +454,7 @@ mod test { let insert_again_result = datastore .dataset_insert_if_not_exists(Dataset::new( dataset1.id(), - zpool_id, + *zpool_id.as_untyped_uuid(), Some("[::1]:12345".parse().unwrap()), ApiDatasetKind::Cockroach, )) @@ -371,7 +470,7 @@ mod test { let dataset2 = datastore .dataset_upsert(Dataset::new( Uuid::new_v4(), - zpool_id, + *zpool_id.as_untyped_uuid(), Some("[::1]:0".parse().unwrap()), ApiDatasetKind::Cockroach, )) @@ -403,7 +502,7 @@ mod test { let insert_again_result = datastore .dataset_insert_if_not_exists(Dataset::new( dataset1.id(), - zpool_id, + *zpool_id.as_untyped_uuid(), Some("[::1]:12345".parse().unwrap()), ApiDatasetKind::Cockroach, )) @@ -418,4 +517,115 @@ mod test { db.cleanup().await.unwrap(); logctx.cleanup_successful(); } + + async fn bp_insert_and_make_target( + opctx: &OpContext, + datastore: &DataStore, + bp: &Blueprint, + ) { + datastore + .blueprint_insert(opctx, bp) + .await + .expect("inserted blueprint"); + datastore + .blueprint_target_set_current( + opctx, + BlueprintTarget { + target_id: bp.id, + enabled: true, + time_made_target: Utc::now(), + }, + ) + .await + .expect("made blueprint the target"); + } + + fn new_dataset_on(zpool_id: ZpoolUuid) -> Dataset { + Dataset::new( + Uuid::new_v4(), + *zpool_id.as_untyped_uuid(), + Some("[::1]:0".parse().unwrap()), + ApiDatasetKind::Cockroach, + ) + } + + #[tokio::test] + async fn test_upsert_and_delete_while_blueprint_changes() { + let logctx = + dev::test_setup_log("upsert_and_delete_while_blueprint_changes"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + let opctx = &opctx; + + let (sled_id, zpool_id) = + create_sled_and_zpool(&datastore, opctx).await; + + // The datastore methods don't actually read the blueprint, but they do + // guard against concurrent changes to the current target. + // + // We can test behavior by swapping between empty blueprints. + let bp0 = BlueprintBuilder::build_empty_with_sleds( + [sled_id].into_iter(), + "test", + ); + bp_insert_and_make_target(&opctx, &datastore, &bp0).await; + + let bp1 = { + let mut bp1 = bp0.clone(); + bp1.id = Uuid::new_v4(); + bp1.parent_blueprint_id = Some(bp0.id); + bp1 + }; + bp_insert_and_make_target(&opctx, &datastore, &bp1).await; + + let old_blueprint_id = BlueprintUuid::from_untyped_uuid(bp0.id); + let current_blueprint_id = BlueprintUuid::from_untyped_uuid(bp1.id); + + // Upsert referencing old blueprint: Error + datastore + .dataset_upsert_if_blueprint_is_enabled( + &opctx, + old_blueprint_id, + new_dataset_on(zpool_id), + ) + .await + .expect_err( + "Shouldn't be able to insert referencing old blueprint", + ); + + // Upsert referencing current blueprint: OK + let dataset = datastore + .dataset_upsert_if_blueprint_is_enabled( + &opctx, + current_blueprint_id, + new_dataset_on(zpool_id), + ) + .await + .expect("Should be able to insert while blueprint is active"); + + // Delete referencing old blueprint: Error + datastore + .dataset_delete_if_blueprint_is_enabled( + &opctx, + old_blueprint_id, + DatasetUuid::from_untyped_uuid(dataset.id()), + ) + .await + .expect_err( + "Shouldn't be able to delete referencing old blueprint", + ); + + // Delete referencing current blueprint: OK + datastore + .dataset_delete_if_blueprint_is_enabled( + &opctx, + current_blueprint_id, + DatasetUuid::from_untyped_uuid(dataset.id()), + ) + .await + .expect("Should be able to delete while blueprint is active"); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } } diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index 251fd71392..1584850376 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -20,6 +20,8 @@ use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::DateTime; use chrono::Utc; +use core::future::Future; +use core::pin::Pin; use diesel::expression::SelectableHelper; use diesel::pg::Pg; use diesel::query_builder::AstPass; @@ -35,6 +37,7 @@ use diesel::IntoSql; use diesel::OptionalExtension; use diesel::QueryDsl; use diesel::RunQueryDsl; +use futures::FutureExt; use nexus_db_model::Blueprint as DbBlueprint; use nexus_db_model::BpOmicronDataset; use nexus_db_model::BpOmicronPhysicalDisk; @@ -60,6 +63,7 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::bail_unless; +use omicron_uuid_kinds::BlueprintUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::SledUuid; use std::collections::BTreeMap; @@ -99,6 +103,76 @@ impl DataStore { Self::blueprint_insert_on_connection(&conn, opctx, blueprint).await } + /// Creates a transaction iff the current blueprint is "bp_id". + /// + /// - The transaction is retryable and named "name" + /// - The "bp_id" value is checked as the first operation within the + /// transaction. + /// - If "bp_id" is still the current target, then "f" is called, + /// within a transactional context. + pub async fn transaction_if_current_blueprint_is( + &self, + conn: &async_bb8_diesel::Connection, + name: &'static str, + opctx: &OpContext, + bp_id: BlueprintUuid, + f: Func, + ) -> Result + where + Func: for<'t> Fn( + &'t async_bb8_diesel::Connection, + ) -> Pin< + Box< + dyn Future>> + + Send + + 't, + >, + > + Send + + Sync + + Clone, + R: Send + 'static, + { + let err = OptionalError::new(); + let r = self + .transaction_retry_wrapper(name) + .transaction(&conn, |conn| { + let err = err.clone(); + let f = f.clone(); + async move { + // Bail if `bp_id` is no longer the target + let target = + Self::blueprint_target_get_current_on_connection( + &conn, opctx, + ) + .await + .map_err(|txn_error| txn_error.into_diesel(&err))?; + let bp_id_current = + BlueprintUuid::from_untyped_uuid(target.target_id); + if bp_id_current != bp_id { + return Err(err.bail( + Error::invalid_request(format!( + "blueprint target has changed from {} -> {}", + bp_id, bp_id_current + )) + .into(), + )); + } + + // Otherwise, perform our actual operation + f(&conn) + .await + .map_err(|txn_error| txn_error.into_diesel(&err)) + } + .boxed() + }) + .await + .map_err(|e| match err.take() { + Some(txn_error) => txn_error.into(), + None => public_error_from_diesel(e, ErrorHandler::Server), + })?; + Ok(r) + } + /// Variant of [Self::blueprint_insert] which may be called from a /// transaction context. pub(crate) async fn blueprint_insert_on_connection( @@ -809,12 +883,11 @@ impl DataStore { .transaction_async(|conn| async move { // Ensure that blueprint we're about to delete is not the // current target. - let current_target = self - .blueprint_current_target_only( - &conn, - SelectFlavor::Standard, - ) - .await?; + let current_target = Self::blueprint_current_target_only( + &conn, + SelectFlavor::Standard, + ) + .await?; if current_target.target_id == blueprint_id { return Err(TransactionError::CustomError( Error::conflict(format!( @@ -1023,18 +1096,20 @@ impl DataStore { async move { // Bail out if `blueprint` isn't the current target. - let current_target = self - .blueprint_current_target_only( - &conn, - SelectFlavor::ForUpdate, - ) - .await - .map_err(|e| err.bail(e))?; + let current_target = Self::blueprint_current_target_only( + &conn, + SelectFlavor::ForUpdate, + ) + .await + .map_err(|txn_error| txn_error.into_diesel(&err))?; if current_target.target_id != blueprint.id { - return Err(err.bail(Error::invalid_request(format!( + return Err(err.bail( + Error::invalid_request(format!( "blueprint {} is not the current target blueprint ({})", blueprint.id, current_target.target_id - )))); + )) + .into(), + )); } // See the comment on this method; this lets us notify our test @@ -1063,7 +1138,7 @@ impl DataStore { .map(|(_sled_id, zone)| zone), ) .await - .map_err(|e| err.bail(e))?; + .map_err(|e| err.bail(e.into()))?; self.ensure_zone_external_networking_allocated_on_connection( &conn, opctx, @@ -1074,7 +1149,7 @@ impl DataStore { .map(|(_sled_id, zone)| zone), ) .await - .map_err(|e| err.bail(e))?; + .map_err(|e| err.bail(e.into()))?; // See the comment on this method; this lets us wait until our // test caller is ready for us to return. @@ -1095,7 +1170,7 @@ impl DataStore { .await .map_err(|e| { if let Some(err) = err.take() { - err + err.into() } else { public_error_from_diesel(e, ErrorHandler::Server) } @@ -1245,9 +1320,9 @@ impl DataStore { opctx.authorize(authz::Action::Read, &authz::BLUEPRINT_CONFIG).await?; let conn = self.pool_connection_authorized(opctx).await?; - let target = self - .blueprint_current_target_only(&conn, SelectFlavor::Standard) - .await?; + let target = + Self::blueprint_current_target_only(&conn, SelectFlavor::Standard) + .await?; // The blueprint for the current target cannot be deleted while it is // the current target, but it's possible someone else (a) made a new @@ -1261,6 +1336,15 @@ impl DataStore { Ok((target, blueprint)) } + /// Get the current target blueprint, if one exists + pub async fn blueprint_target_get_current_on_connection( + conn: &async_bb8_diesel::Connection, + opctx: &OpContext, + ) -> Result> { + opctx.authorize(authz::Action::Read, &authz::BLUEPRINT_CONFIG).await?; + Self::blueprint_current_target_only(&conn, SelectFlavor::Standard).await + } + /// Get the current target blueprint, if one exists pub async fn blueprint_target_get_current( &self, @@ -1268,7 +1352,9 @@ impl DataStore { ) -> Result { opctx.authorize(authz::Action::Read, &authz::BLUEPRINT_CONFIG).await?; let conn = self.pool_connection_authorized(opctx).await?; - self.blueprint_current_target_only(&conn, SelectFlavor::Standard).await + Self::blueprint_current_target_only(&conn, SelectFlavor::Standard) + .await + .map_err(|e| e.into()) } // Helper to fetch the current blueprint target (without fetching the entire @@ -1276,10 +1362,9 @@ impl DataStore { // // Caller is responsible for checking authz for this operation. async fn blueprint_current_target_only( - &self, conn: &async_bb8_diesel::Connection, select_flavor: SelectFlavor, - ) -> Result { + ) -> Result> { use db::schema::bp_target::dsl; let query_result = match select_flavor { diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index e92ff090a9..72a63be737 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -19,6 +19,7 @@ use nexus_types::deployment::BlueprintDatasetsConfig; use nexus_types::identity::Asset; use omicron_common::disk::DatasetConfig; use omicron_common::disk::DatasetsConfig; +use omicron_uuid_kinds::BlueprintUuid; use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::SledUuid; @@ -123,6 +124,7 @@ pub(crate) struct EnsureDatasetsResult { pub(crate) async fn ensure_dataset_records_exist( opctx: &OpContext, datastore: &DataStore, + bp_id: BlueprintUuid, bp_datasets: impl Iterator, ) -> anyhow::Result { // Before attempting to insert any datasets, first query for any existing @@ -175,9 +177,12 @@ pub(crate) async fn ensure_dataset_records_exist( }; let dataset = Dataset::from(bp_dataset.clone()); - datastore.dataset_upsert(dataset).await.with_context(|| { - format!("failed to upsert dataset record for dataset {id}") - })?; + datastore + .dataset_upsert_if_blueprint_is_enabled(&opctx, bp_id, dataset) + .await + .with_context(|| { + format!("failed to upsert dataset record for dataset {id}") + })?; info!( opctx.log, @@ -202,7 +207,13 @@ pub(crate) async fn ensure_dataset_records_exist( continue; } - datastore.dataset_delete(&opctx, bp_dataset.id).await?; + datastore + .dataset_delete_if_blueprint_is_enabled( + &opctx, + bp_id, + bp_dataset.id, + ) + .await?; num_removed += 1; } } @@ -291,7 +302,11 @@ mod tests { let opctx = &opctx; // Use the standard example system. - let (collection, _, blueprint) = example(&opctx.log, TEST_NAME, 5); + let (collection, _, mut blueprint) = example(&opctx.log, TEST_NAME, 5); + + // Set the target so our database-modifying operations know they + // can safely act on the current target blueprint. + update_blueprint_target(&datastore, &opctx, &mut blueprint).await; // Record the sleds and zpools. crate::tests::insert_sled_records(datastore, &blueprint).await; @@ -320,10 +335,16 @@ mod tests { let nzones_with_durable_datasets = all_datasets.len(); assert!(nzones_with_durable_datasets > 0); + let bp_id = BlueprintUuid::from_untyped_uuid(blueprint.id); let EnsureDatasetsResult { inserted, updated, removed } = - ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) - .await - .expect("failed to ensure datasets"); + ensure_dataset_records_exist( + opctx, + datastore, + bp_id, + all_datasets.iter(), + ) + .await + .expect("failed to ensure datasets"); // We should have inserted a dataset for each zone with a durable // dataset. @@ -341,9 +362,14 @@ mod tests { // Ensuring the same datasets again should insert no new records. let EnsureDatasetsResult { inserted, updated, removed } = - ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) - .await - .expect("failed to ensure datasets"); + ensure_dataset_records_exist( + opctx, + datastore, + bp_id, + all_datasets.iter(), + ) + .await + .expect("failed to ensure datasets"); assert_eq!(inserted, 0); assert_eq!(updated, 0); assert_eq!(removed, 0); @@ -401,6 +427,7 @@ mod tests { ensure_dataset_records_exist( opctx, datastore, + bp_id, all_datasets.iter().chain(&new_zones), ) .await @@ -418,6 +445,34 @@ mod tests { ); } + // Sets the target blueprint to "blueprint" + // + // Reads the current target, and uses it as the "parent" blueprint + async fn update_blueprint_target( + datastore: &DataStore, + opctx: &OpContext, + blueprint: &mut Blueprint, + ) { + // Fetch the initial blueprint installed during rack initialization. + let parent_blueprint_target = datastore + .blueprint_target_get_current(&opctx) + .await + .expect("failed to read current target blueprint"); + blueprint.parent_blueprint_id = Some(parent_blueprint_target.target_id); + datastore.blueprint_insert(&opctx, &blueprint).await.unwrap(); + datastore + .blueprint_target_set_current( + &opctx, + nexus_types::deployment::BlueprintTarget { + target_id: blueprint.id, + enabled: true, + time_made_target: nexus_inventory::now_db_precision(), + }, + ) + .await + .unwrap(); + } + #[nexus_test] async fn test_dataset_records_update(cptestctx: &ControlPlaneTestContext) { const TEST_NAME: &str = "test_dataset_records_update"; @@ -432,7 +487,11 @@ mod tests { let opctx = &opctx; // Use the standard example system. - let (_, _, blueprint) = example(&opctx.log, TEST_NAME, 5); + let (_, _, mut blueprint) = example(&opctx.log, TEST_NAME, 5); + + // Set the target so our database-modifying operations know they + // can safely act on the current target blueprint. + update_blueprint_target(&datastore, &opctx, &mut blueprint).await; // Record the sleds and zpools. crate::tests::insert_sled_records(datastore, &blueprint).await; @@ -442,10 +501,16 @@ mod tests { .await; let mut all_datasets = get_all_datasets_from_zones(&blueprint); + let bp_id = BlueprintUuid::from_untyped_uuid(blueprint.id); let EnsureDatasetsResult { inserted, updated, removed } = - ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) - .await - .expect("failed to ensure datasets"); + ensure_dataset_records_exist( + opctx, + datastore, + bp_id, + all_datasets.iter(), + ) + .await + .expect("failed to ensure datasets"); assert_eq!(inserted, all_datasets.len()); assert_eq!(updated, 0); assert_eq!(removed, 0); @@ -463,9 +528,14 @@ mod tests { // Update the datastore let EnsureDatasetsResult { inserted, updated, removed } = - ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) - .await - .expect("failed to ensure datasets"); + ensure_dataset_records_exist( + opctx, + datastore, + bp_id, + all_datasets.iter(), + ) + .await + .expect("failed to ensure datasets"); assert_eq!(inserted, 0); assert_eq!(updated, 1); assert_eq!(removed, 0); @@ -501,7 +571,11 @@ mod tests { let opctx = &opctx; // Use the standard example system. - let (_, _, blueprint) = example(&opctx.log, TEST_NAME, 5); + let (_, _, mut blueprint) = example(&opctx.log, TEST_NAME, 5); + + // Set the target so our database-modifying operations know they + // can safely act on the current target blueprint. + update_blueprint_target(&datastore, &opctx, &mut blueprint).await; // Record the sleds and zpools. crate::tests::insert_sled_records(datastore, &blueprint).await; @@ -523,10 +597,16 @@ mod tests { reservation: None, compression: CompressionAlgorithm::Off, }); + let bp_id = BlueprintUuid::from_untyped_uuid(blueprint.id); let EnsureDatasetsResult { inserted, updated, removed } = - ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) - .await - .expect("failed to ensure datasets"); + ensure_dataset_records_exist( + opctx, + datastore, + bp_id, + all_datasets.iter(), + ) + .await + .expect("failed to ensure datasets"); assert_eq!(inserted, all_datasets.len()); assert_eq!(updated, 0); assert_eq!(removed, 0); @@ -563,9 +643,14 @@ mod tests { // dataset, where we punt the deletion to a background task. let EnsureDatasetsResult { inserted, updated, removed } = - ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) - .await - .expect("failed to ensure datasets"); + ensure_dataset_records_exist( + opctx, + datastore, + bp_id, + all_datasets.iter(), + ) + .await + .expect("failed to ensure datasets"); assert_eq!(inserted, 0); assert_eq!(updated, 0); assert_eq!(removed, 1); @@ -600,7 +685,11 @@ mod tests { let opctx = &opctx; // Use the standard example system. - let (_, _, blueprint) = example(&opctx.log, TEST_NAME, 5); + let (_, _, mut blueprint) = example(&opctx.log, TEST_NAME, 5); + + // Set the target so our database-modifying operations know they + // can safely act on the current target blueprint. + update_blueprint_target(&datastore, &opctx, &mut blueprint).await; // Record the sleds and zpools. crate::tests::insert_sled_records(datastore, &blueprint).await; @@ -624,10 +713,16 @@ mod tests { compression: CompressionAlgorithm::Off, }); + let bp_id = BlueprintUuid::from_untyped_uuid(blueprint.id); let EnsureDatasetsResult { inserted, updated, removed } = - ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) - .await - .expect("failed to ensure datasets"); + ensure_dataset_records_exist( + opctx, + datastore, + bp_id, + all_datasets.iter(), + ) + .await + .expect("failed to ensure datasets"); assert_eq!(inserted, all_datasets.len()); assert_eq!(updated, 0); assert_eq!(removed, 0); @@ -642,9 +737,14 @@ mod tests { // Observe that no datasets are removed. let EnsureDatasetsResult { inserted, updated, removed } = - ensure_dataset_records_exist(opctx, datastore, all_datasets.iter()) - .await - .expect("failed to ensure datasets"); + ensure_dataset_records_exist( + opctx, + datastore, + bp_id, + all_datasets.iter(), + ) + .await + .expect("failed to ensure datasets"); assert_eq!(inserted, 0); assert_eq!(updated, 0); assert_eq!(removed, 0); diff --git a/nexus/reconfigurator/execution/src/lib.rs b/nexus/reconfigurator/execution/src/lib.rs index 372b0c6678..0472024ae0 100644 --- a/nexus/reconfigurator/execution/src/lib.rs +++ b/nexus/reconfigurator/execution/src/lib.rs @@ -20,6 +20,7 @@ use nexus_types::identity::Asset; use omicron_common::address::Ipv6Subnet; use omicron_common::address::SLED_PREFIX; use omicron_physical_disks::DeployDisksDone; +use omicron_uuid_kinds::BlueprintUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::SledUuid; @@ -415,6 +416,7 @@ fn register_dataset_records_step<'a>( datastore: &'a DataStore, blueprint: &'a Blueprint, ) { + let bp_id = BlueprintUuid::from_untyped_uuid(blueprint.id); registrar .new_step( ExecutionStepId::Ensure, @@ -423,6 +425,7 @@ fn register_dataset_records_step<'a>( datasets::ensure_dataset_records_exist( &opctx, datastore, + bp_id, blueprint.all_omicron_datasets(BlueprintDatasetFilter::All), ) .await?; diff --git a/uuid-kinds/src/lib.rs b/uuid-kinds/src/lib.rs index ba586c03a5..7947062a82 100644 --- a/uuid-kinds/src/lib.rs +++ b/uuid-kinds/src/lib.rs @@ -49,6 +49,7 @@ macro_rules! impl_typed_uuid_kind { // Please keep this list in alphabetical order. impl_typed_uuid_kind! { + Blueprint => "blueprint", Collection => "collection", Dataset => "dataset", DemoSaga => "demo_saga", From 40bbfab0eb7154b578b1ba2d16292eb6ca155e6a Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 25 Sep 2024 17:10:07 -0700 Subject: [PATCH 70/84] Clippy --- nexus/db-queries/src/db/datastore/dataset.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/db-queries/src/db/datastore/dataset.rs b/nexus/db-queries/src/db/datastore/dataset.rs index 759435e230..49c2398ef6 100644 --- a/nexus/db-queries/src/db/datastore/dataset.rs +++ b/nexus/db-queries/src/db/datastore/dataset.rs @@ -121,7 +121,7 @@ impl DataStore { dsl::compression.eq(excluded(dsl::compression)), )), ) - .insert_and_get_result_async(&*conn) + .insert_and_get_result_async(conn) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => { From 24b0341b357c7e7ec98ae5026b57a629bf6dc5cc Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 25 Sep 2024 18:01:58 -0700 Subject: [PATCH 71/84] Shared zone name --- Cargo.lock | 1 + illumos-utils/src/running_zone.rs | 19 +++++++++---------- illumos-utils/src/zone.rs | 9 +++++++++ nexus/reconfigurator/planning/Cargo.toml | 1 + .../planning/src/blueprint_builder/builder.rs | 5 ++++- sled-agent/src/instance.rs | 8 ++++++-- sled-agent/src/params.rs | 3 +-- sled-agent/src/probe_manager.rs | 3 ++- sled-agent/src/services.rs | 4 +--- 9 files changed, 34 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ef7044b212..f200f19362 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5843,6 +5843,7 @@ dependencies = [ "debug-ignore", "expectorate", "gateway-client", + "illumos-utils", "indexmap 2.4.0", "internal-dns", "ipnet", diff --git a/illumos-utils/src/running_zone.rs b/illumos-utils/src/running_zone.rs index 5dbe4338cf..1dda130c95 100644 --- a/illumos-utils/src/running_zone.rs +++ b/illumos-utils/src/running_zone.rs @@ -12,12 +12,13 @@ use crate::dladm::Etherstub; use crate::link::{Link, VnicAllocator}; use crate::opte::{Port, PortTicket}; use crate::svc::wait_for_service; -use crate::zone::{AddressRequest, ZONE_PREFIX}; +use crate::zone::AddressRequest; use crate::zpool::{PathInPool, ZpoolName}; use camino::{Utf8Path, Utf8PathBuf}; use camino_tempfile::Utf8TempDir; use ipnetwork::IpNetwork; use omicron_common::backoff; +use omicron_uuid_kinds::OmicronZoneUuid; pub use oxlog::is_oxide_smf_log_file; use slog::{error, info, o, warn, Logger}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; @@ -26,7 +27,6 @@ use std::sync::Arc; use std::sync::OnceLock; #[cfg(target_os = "illumos")] use std::thread; -use uuid::Uuid; #[cfg(any(test, feature = "testing"))] use crate::zone::MockZones as Zones; @@ -947,12 +947,11 @@ impl InstalledZone { /// /// This results in a zone name which is distinct across different zpools, /// but stable and predictable across reboots. - pub fn get_zone_name(zone_type: &str, unique_name: Option) -> String { - let mut zone_name = format!("{}{}", ZONE_PREFIX, zone_type); - if let Some(suffix) = unique_name { - zone_name.push_str(&format!("_{}", suffix)); - } - zone_name + pub fn get_zone_name( + zone_type: &str, + unique_name: Option, + ) -> String { + crate::zone::zone_name(zone_type, unique_name) } /// Get the name of the bootstrap VNIC in the zone, if any. @@ -1055,7 +1054,7 @@ pub struct ZoneBuilder<'a> { // builder purposes - that is, skipping this field in the builder will // still result in an `Ok(InstalledZone)` from `.install()`, rather than // an `Err(InstallZoneError::IncompleteBuilder)`. - unique_name: Option, + unique_name: Option, /// ZFS datasets to be accessed from within the zone. datasets: Option<&'a [zone::Dataset]>, /// Filesystems to mount within the zone. @@ -1119,7 +1118,7 @@ impl<'a> ZoneBuilder<'a> { } /// Unique ID of the instance of the zone being created. (optional) - pub fn with_unique_name(mut self, uuid: Uuid) -> Self { + pub fn with_unique_name(mut self, uuid: OmicronZoneUuid) -> Self { self.unique_name = Some(uuid); self } diff --git a/illumos-utils/src/zone.rs b/illumos-utils/src/zone.rs index 47cc84dce6..da08c7b7df 100644 --- a/illumos-utils/src/zone.rs +++ b/illumos-utils/src/zone.rs @@ -17,6 +17,7 @@ use crate::dladm::{EtherstubVnic, VNIC_PREFIX_BOOTSTRAP, VNIC_PREFIX_CONTROL}; use crate::zpool::PathInPool; use crate::{execute, PFEXEC}; use omicron_common::address::SLED_PREFIX; +use omicron_uuid_kinds::OmicronZoneUuid; const DLADM: &str = "/usr/sbin/dladm"; pub const IPADM: &str = "/usr/sbin/ipadm"; @@ -29,6 +30,14 @@ pub const ROUTE: &str = "/usr/sbin/route"; pub const ZONE_PREFIX: &str = "oxz_"; pub const PROPOLIS_ZONE_PREFIX: &str = "oxz_propolis-server_"; +pub fn zone_name(prefix: &str, id: Option) -> String { + if let Some(id) = id { + format!("{ZONE_PREFIX}{}_{}", prefix, id) + } else { + format!("{ZONE_PREFIX}{}", prefix) + } +} + #[derive(thiserror::Error, Debug)] enum Error { #[error("Zone execution error: {0}")] diff --git a/nexus/reconfigurator/planning/Cargo.toml b/nexus/reconfigurator/planning/Cargo.toml index a66aa11309..5da6e9a944 100644 --- a/nexus/reconfigurator/planning/Cargo.toml +++ b/nexus/reconfigurator/planning/Cargo.toml @@ -12,6 +12,7 @@ clickhouse-admin-types.workspace = true chrono.workspace = true debug-ignore.workspace = true gateway-client.workspace = true +illumos-utils.workspace = true indexmap.workspace = true internal-dns.workspace = true ipnet.workspace = true diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index de14bf2d09..3b42b38c2e 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -238,7 +238,10 @@ impl fmt::Display for Operation { } fn zone_name(zone: &BlueprintZoneConfig) -> String { - format!("oxz_{}_{}", zone.zone_type.kind().zone_prefix(), zone.id) + illumos_utils::zone::zone_name( + zone.zone_type.kind().zone_prefix(), + Some(zone.id), + ) } /// Helper for assembling a blueprint diff --git a/sled-agent/src/instance.rs b/sled-agent/src/instance.rs index 071c70a497..9909f1e52c 100644 --- a/sled-agent/src/instance.rs +++ b/sled-agent/src/instance.rs @@ -32,7 +32,9 @@ use omicron_common::api::internal::shared::{ use omicron_common::backoff; use omicron_common::zpool_name::ZpoolName; use omicron_common::NoDebug; -use omicron_uuid_kinds::{GenericUuid, InstanceUuid, PropolisUuid}; +use omicron_uuid_kinds::{ + GenericUuid, InstanceUuid, OmicronZoneUuid, PropolisUuid, +}; use propolis_client::Client as PropolisClient; use rand::prelude::IteratorRandom; use rand::SeedableRng; @@ -1417,7 +1419,9 @@ impl InstanceRunner { .with_zone_root_path(root) .with_zone_image_paths(&["/opt/oxide".into()]) .with_zone_type("propolis-server") - .with_unique_name(self.propolis_id.into_untyped_uuid()) + .with_unique_name(OmicronZoneUuid::from_untyped_uuid( + self.propolis_id.into_untyped_uuid(), + )) .with_datasets(&[]) .with_filesystems(&[]) .with_data_links(&[]) diff --git a/sled-agent/src/params.rs b/sled-agent/src/params.rs index f9c0f117ba..de0b086752 100644 --- a/sled-agent/src/params.rs +++ b/sled-agent/src/params.rs @@ -4,7 +4,6 @@ use nexus_sled_agent_shared::inventory::{OmicronZoneConfig, OmicronZoneType}; use omicron_common::disk::{DatasetKind, DatasetName}; -use omicron_uuid_kinds::GenericUuid; pub use sled_hardware::DendriteAsic; use std::net::SocketAddrV6; @@ -20,7 +19,7 @@ impl OmicronZoneConfigExt for OmicronZoneConfig { fn zone_name(&self) -> String { illumos_utils::running_zone::InstalledZone::get_zone_name( self.zone_type.kind().zone_prefix(), - Some(self.id.into_untyped_uuid()), + Some(self.id), ) } } diff --git a/sled-agent/src/probe_manager.rs b/sled-agent/src/probe_manager.rs index 42186f66e9..fb1399a9c2 100644 --- a/sled-agent/src/probe_manager.rs +++ b/sled-agent/src/probe_manager.rs @@ -16,6 +16,7 @@ use omicron_common::api::external::{ use omicron_common::api::internal::shared::{ NetworkInterface, ResolvedVpcFirewallRule, }; +use omicron_uuid_kinds::{GenericUuid, OmicronZoneUuid}; use rand::prelude::IteratorRandom; use rand::SeedableRng; use sled_storage::dataset::ZONE_DATASET; @@ -330,7 +331,7 @@ impl ProbeManagerInner { .with_zone_root_path(zone_root_path) .with_zone_image_paths(&["/opt/oxide".into()]) .with_zone_type("probe") - .with_unique_name(probe.id) + .with_unique_name(OmicronZoneUuid::from_untyped_uuid(probe.id)) .with_datasets(&[]) .with_filesystems(&[]) .with_data_links(&[]) diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index f805267977..26391f2ce1 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -93,7 +93,6 @@ use omicron_common::backoff::{ use omicron_common::disk::{DatasetKind, DatasetName}; use omicron_common::ledger::{self, Ledger, Ledgerable}; use omicron_ddm_admin_client::{Client as DdmAdminClient, DdmError}; -use omicron_uuid_kinds::GenericUuid; use once_cell::sync::OnceCell; use rand::prelude::SliceRandom; use sled_agent_types::{ @@ -1529,8 +1528,7 @@ impl ServiceManager { Some(dir) => ZoneBuilderFactory::fake(Some(dir)).builder(), }; if let Some(uuid) = unique_name { - zone_builder = - zone_builder.with_unique_name(uuid.into_untyped_uuid()); + zone_builder = zone_builder.with_unique_name(uuid); } if let Some(vnic) = bootstrap_vnic { zone_builder = zone_builder.with_bootstrap_vnic(vnic); From 60ff0440b0d0d6e63ef5dd79dde67da27a0fbc37 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 25 Sep 2024 18:14:29 -0700 Subject: [PATCH 72/84] . --- nexus/reconfigurator/planning/src/blueprint_builder/builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 3b42b38c2e..9aa7c31234 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -872,7 +872,7 @@ impl<'a> BlueprintBuilder<'a> { } // Small optimization -- if no expungement nor updates are left, - // bail. + // bail if expunges.is_empty() && updates.is_empty() { break; } From 60c3c1a520dbc7971bbe66073694f9fe9f18f8bb Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 26 Sep 2024 08:57:16 -0700 Subject: [PATCH 73/84] sort --- nexus/reconfigurator/preparation/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/reconfigurator/preparation/src/lib.rs b/nexus/reconfigurator/preparation/src/lib.rs index b29a775031..e54f50e910 100644 --- a/nexus/reconfigurator/preparation/src/lib.rs +++ b/nexus/reconfigurator/preparation/src/lib.rs @@ -196,7 +196,7 @@ impl PlanningInputFromDb<'_> { ) }) .collect(); - datasets.sort_by(|(a, _), (b, _)| a.cmp(&b)); + datasets.sort_unstable_by_key(|(zpool_id, _)| *zpool_id); let mut datasets_by_zpool: BTreeMap<_, Vec<_>> = BTreeMap::new(); for (zpool_id, dataset) in datasets { datasets_by_zpool From 7ee5cc5feeb4c0de392aace5cf56bd4f5dc177e1 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 26 Sep 2024 09:02:17 -0700 Subject: [PATCH 74/84] Add extra safety checks --- .../planning/src/blueprint_builder/builder.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 9aa7c31234..8b3e0d342e 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -878,6 +878,19 @@ impl<'a> BlueprintBuilder<'a> { } } + // These conditions should be dead-code, and arguably could be + // assertions, but are safety nets to catch programming errors. + if !expunges.is_empty() { + return Err(Error::Planner(anyhow!( + "Should have marked all expunged datasets" + ))); + } + if !updates.is_empty() { + return Err(Error::Planner(anyhow!( + "Should have applied all updates" + ))); + } + // Remove all datasets that we've finished expunging. datasets.retain(|_id, d| { if removals.contains(&d.id) { From d0ad50c9f5f9ebff552bd4d9b1c18586b7e16d57 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 26 Sep 2024 09:06:10 -0700 Subject: [PATCH 75/84] Comment on compression --- common/src/disk.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/common/src/disk.rs b/common/src/disk.rs index bf1dc1de58..9acaba207b 100644 --- a/common/src/disk.rs +++ b/common/src/disk.rs @@ -215,6 +215,7 @@ pub enum CompressionAlgorithm { Zle, } +/// These match the arguments which can be passed to "zfs set compression=..." impl fmt::Display for CompressionAlgorithm { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use CompressionAlgorithm::*; From 3ba53d6bf25b0b444580531a36f4e1804ee46873 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 26 Sep 2024 09:14:56 -0700 Subject: [PATCH 76/84] reconfigurator format --- dev-tools/reconfigurator-cli/src/main.rs | 7 +++++-- .../tests/output/cmd-stdout | 20 +++++++++---------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/dev-tools/reconfigurator-cli/src/main.rs b/dev-tools/reconfigurator-cli/src/main.rs index ebd3685602..350dd09d16 100644 --- a/dev-tools/reconfigurator-cli/src/main.rs +++ b/dev-tools/reconfigurator-cli/src/main.rs @@ -617,9 +617,12 @@ fn cmd_sled_show( swriteln!(s, "sled {}", sled_id); swriteln!(s, "subnet {}", sled_resources.subnet.net()); swriteln!(s, "zpools ({}):", sled_resources.zpools.len()); - for (zpool, disk) in &sled_resources.zpools { + for (zpool, (disk, datasets)) in &sled_resources.zpools { swriteln!(s, " {:?}", zpool); - swriteln!(s, " ↳ {:?}", disk); + swriteln!(s, " {:?}", disk); + for dataset in datasets { + swriteln!(s, " ↳ {:?}", dataset); + } } Ok(Some(s)) } diff --git a/dev-tools/reconfigurator-cli/tests/output/cmd-stdout b/dev-tools/reconfigurator-cli/tests/output/cmd-stdout index cf0ab2063b..f24a516242 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmd-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmd-stdout @@ -24,25 +24,25 @@ sled ..................... subnet fd00:1122:3344:101::/64 zpools (10): ..................... (zpool) - ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ (SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active }, []) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } > sled-add ..................... From 4a4fcf3ee49d6aa351c93ffa9012c6563771ace2 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 26 Sep 2024 09:15:02 -0700 Subject: [PATCH 77/84] Compression algorithm comment --- nexus/db-model/src/dataset.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/nexus/db-model/src/dataset.rs b/nexus/db-model/src/dataset.rs index 503a936942..3086da058f 100644 --- a/nexus/db-model/src/dataset.rs +++ b/nexus/db-model/src/dataset.rs @@ -51,6 +51,12 @@ pub struct Dataset { quota: Option, reservation: Option, + // This is the stringified form of + // "omicron_common::disk::CompressionAlgorithm". + // + // It can't serialize to the database without forcing omicron_common to + // depend on Diesel -- we could create a newtype, but "to_string" and + // "parse" cover this usage similarly. compression: Option, } From 3c7e771daf53f5239fc4f6b3f5262f5fdc2aacb3 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 26 Sep 2024 09:18:08 -0700 Subject: [PATCH 78/84] current_target --- nexus/db-queries/src/db/datastore/dataset.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/dataset.rs b/nexus/db-queries/src/db/datastore/dataset.rs index 49c2398ef6..f0fea670e7 100644 --- a/nexus/db-queries/src/db/datastore/dataset.rs +++ b/nexus/db-queries/src/db/datastore/dataset.rs @@ -71,7 +71,7 @@ impl DataStore { }) } - pub async fn dataset_upsert_if_blueprint_is_enabled( + pub async fn dataset_upsert_if_blueprint_is_current_target( &self, opctx: &OpContext, bp_id: BlueprintUuid, @@ -81,7 +81,7 @@ impl DataStore { self.transaction_if_current_blueprint_is( &conn, - "dataset_upsert_if_blueprint_is_enabled", + "dataset_upsert_if_blueprint_is_current_target", opctx, bp_id, |conn| { @@ -251,7 +251,7 @@ impl DataStore { .map_err(|e| e.into()) } - pub async fn dataset_delete_if_blueprint_is_enabled( + pub async fn dataset_delete_if_blueprint_is_current_target( &self, opctx: &OpContext, bp_id: BlueprintUuid, @@ -262,7 +262,7 @@ impl DataStore { self.transaction_if_current_blueprint_is( &conn, - "dataset_delete_if_blueprint_is_enabled", + "dataset_delete_if_blueprint_is_current_target", opctx, bp_id, |conn| { @@ -583,7 +583,7 @@ mod test { // Upsert referencing old blueprint: Error datastore - .dataset_upsert_if_blueprint_is_enabled( + .dataset_upsert_if_blueprint_is_current_target( &opctx, old_blueprint_id, new_dataset_on(zpool_id), @@ -595,7 +595,7 @@ mod test { // Upsert referencing current blueprint: OK let dataset = datastore - .dataset_upsert_if_blueprint_is_enabled( + .dataset_upsert_if_blueprint_is_current_target( &opctx, current_blueprint_id, new_dataset_on(zpool_id), @@ -605,7 +605,7 @@ mod test { // Delete referencing old blueprint: Error datastore - .dataset_delete_if_blueprint_is_enabled( + .dataset_delete_if_blueprint_is_current_target( &opctx, old_blueprint_id, DatasetUuid::from_untyped_uuid(dataset.id()), @@ -617,7 +617,7 @@ mod test { // Delete referencing current blueprint: OK datastore - .dataset_delete_if_blueprint_is_enabled( + .dataset_delete_if_blueprint_is_current_target( &opctx, current_blueprint_id, DatasetUuid::from_untyped_uuid(dataset.id()), From 0749c9a96079b384b7c3e17da0065889b28073a9 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 26 Sep 2024 09:38:21 -0700 Subject: [PATCH 79/84] fix broken merge --- nexus/db-model/src/dataset.rs | 3 ++- nexus/db-model/src/dataset_kind.rs | 1 + nexus/reconfigurator/execution/src/datasets.rs | 6 ++++-- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/nexus/db-model/src/dataset.rs b/nexus/db-model/src/dataset.rs index 3086da058f..ad351fe612 100644 --- a/nexus/db-model/src/dataset.rs +++ b/nexus/db-model/src/dataset.rs @@ -114,7 +114,8 @@ impl From for Dataset { | ApiDatasetKind::InternalDns | ApiDatasetKind::TransientZone { .. } | ApiDatasetKind::TransientZoneRoot - | ApiDatasetKind::Debug => None, + | ApiDatasetKind::Debug + | ApiDatasetKind::Update => None, }; let addr = bp.address; Self { diff --git a/nexus/db-model/src/dataset_kind.rs b/nexus/db-model/src/dataset_kind.rs index 6b321491c9..e90f0d1db3 100644 --- a/nexus/db-model/src/dataset_kind.rs +++ b/nexus/db-model/src/dataset_kind.rs @@ -49,6 +49,7 @@ impl DatasetKind { ApiKind::TransientZone { name } } (Self::Debug, None) => ApiKind::Debug, + (Self::Update, None) => ApiKind::Update, (Self::TransientZone, None) => { return Err(Error::internal_error("Zone kind needs name")) } diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index 72a63be737..3fcf132944 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -178,7 +178,9 @@ pub(crate) async fn ensure_dataset_records_exist( let dataset = Dataset::from(bp_dataset.clone()); datastore - .dataset_upsert_if_blueprint_is_enabled(&opctx, bp_id, dataset) + .dataset_upsert_if_blueprint_is_current_target( + &opctx, bp_id, dataset, + ) .await .with_context(|| { format!("failed to upsert dataset record for dataset {id}") @@ -208,7 +210,7 @@ pub(crate) async fn ensure_dataset_records_exist( } datastore - .dataset_delete_if_blueprint_is_enabled( + .dataset_delete_if_blueprint_is_current_target( &opctx, bp_id, bp_dataset.id, From 78daf5d48844ffbd1c8afc7081cc6baf360d9cd6 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 26 Sep 2024 09:44:43 -0700 Subject: [PATCH 80/84] FOR UPDATE --- nexus/db-queries/src/db/datastore/deployment.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index 1584850376..fa7cad9e85 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -1337,12 +1337,22 @@ impl DataStore { } /// Get the current target blueprint, if one exists + /// + /// Grabs a write lock on the blueprint target row. + /// + /// This function may be called from a transactional context, + /// which could read or modify the blueprint target. + /// + /// CockroachDb guarantees serializability either way, but + /// this makes it less likely for concurrent transactions + /// to fail with retryable errors. pub async fn blueprint_target_get_current_on_connection( conn: &async_bb8_diesel::Connection, opctx: &OpContext, ) -> Result> { opctx.authorize(authz::Action::Read, &authz::BLUEPRINT_CONFIG).await?; - Self::blueprint_current_target_only(&conn, SelectFlavor::Standard).await + Self::blueprint_current_target_only(&conn, SelectFlavor::ForUpdate) + .await } /// Get the current target blueprint, if one exists From 80850518ed77ec95af05ffd786810fbee5451edb Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 26 Sep 2024 13:04:45 -0700 Subject: [PATCH 81/84] SELECT FOR UPDATE docs --- .../db-queries/src/db/datastore/deployment.rs | 27 +++++++++++++++---- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index fa7cad9e85..9848dd00e9 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -1340,12 +1340,29 @@ impl DataStore { /// /// Grabs a write lock on the blueprint target row. /// - /// This function may be called from a transactional context, - /// which could read or modify the blueprint target. + /// This "SELECT FOR UPDATE" statement is necessary for correctness: + /// without it, it's possible that an "old blueprint" value is read + /// while the blueprint is concurrently updated. /// - /// CockroachDb guarantees serializability either way, but - /// this makes it less likely for concurrent transactions - /// to fail with retryable errors. + /// For example: + /// + /// - Caller of this function: Start transaction, read blueprint value X, confirm it matches an + /// expected value. + /// - Elsewhere: Blueprint value updated to X + 1 + /// - Caller of this function: Performs a write operation, commit. + /// + /// In this situation, "no stale read" occurred, because the transaction checking the blueprint + /// value could have been "ordered before" the blueprint being updated. This is arguably quite + /// bad! In this situation, arbitrary database transactions could be performing modifications, + /// thinking they are enacting the current target blueprint, when in reality it has changed. + /// + /// However, with "SELECT FOR UPDATE" being applied to the blueprint target row, we acquire + /// an exclusive write lock on the row we're checking: + /// - If we're checking the current target blueprint, and + /// - A concurrent update tries to read the current target blueprint and update it, + /// + /// One of these operations will be blocked behind the other, which restores our mental + /// model of serialized transactions, with respect to the latest blueprint. pub async fn blueprint_target_get_current_on_connection( conn: &async_bb8_diesel::Connection, opctx: &OpContext, From 7f1f89f5acd75f31682b9072c30f8a95128cfb56 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 1 Oct 2024 13:37:48 -0700 Subject: [PATCH 82/84] schemas --- schema/rss-service-plan-v5.json | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/schema/rss-service-plan-v5.json b/schema/rss-service-plan-v5.json index e2268371ec..2c1439cb6d 100644 --- a/schema/rss-service-plan-v5.json +++ b/schema/rss-service-plan-v5.json @@ -353,33 +353,12 @@ "type": "object", "required": [ "address", - "dns_servers", - "ntp_servers", "type" ], "properties": { "address": { "type": "string" }, - "dns_servers": { - "type": "array", - "items": { - "type": "string", - "format": "ip" - } - }, - "domain": { - "type": [ - "string", - "null" - ] - }, - "ntp_servers": { - "type": "array", - "items": { - "type": "string" - } - }, "type": { "type": "string", "enum": [ From 2186ec3301bcd809fe3d3035d257983821625c36 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 2 Oct 2024 11:54:52 -0700 Subject: [PATCH 83/84] Fix mismerge --- .../reconfigurator/planning/src/blueprint_builder/builder.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index ecbf5c9611..d4e4dadce2 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -1536,6 +1536,8 @@ impl<'a> BlueprintBuilder<'a> { Ok(EnsureMultiple::Changed { added: num_clickhouse_servers_to_add, + updated: 0, + expunged: 0, removed: 0, }) } @@ -1592,6 +1594,8 @@ impl<'a> BlueprintBuilder<'a> { Ok(EnsureMultiple::Changed { added: num_clickhouse_keepers_to_add, + updated: 0, + expunged: 0, removed: 0, }) } From e0105f7b062864ca2add59d9a376ff60cad6cbe9 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 28 Oct 2024 14:36:20 -0700 Subject: [PATCH 84/84] expectorate --- .../tests/output/cmd-example-stdout | 28 +++++++++---------- .../tests/output/cmd-stdout | 20 ++++++------- schema/rss-service-plan-v5.json | 13 +-------- 3 files changed, 25 insertions(+), 36 deletions(-) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmd-example-stdout b/dev-tools/reconfigurator-cli/tests/output/cmd-example-stdout index 838695cd80..93e705c6af 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmd-example-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmd-example-stdout @@ -40,25 +40,25 @@ sled 2eb69596-f081-4e2d-9425-9994926e0832 subnet fd00:1122:3344:102::/64 zpools (10): 088ed702-551e-453b-80d7-57700372a844 (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-088ed702-551e-453b-80d7-57700372a844" }, disk_id: b2850ccb-4ac7-4034-aeab-b1cd582d407b (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-088ed702-551e-453b-80d7-57700372a844" }, disk_id: b2850ccb-4ac7-4034-aeab-b1cd582d407b (physical_disk), policy: InService, state: Active } 09e51697-abad-47c0-a193-eaf74bc5d3cd (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-09e51697-abad-47c0-a193-eaf74bc5d3cd" }, disk_id: c6d1fe0d-5226-4318-a55a-e86e20612277 (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-09e51697-abad-47c0-a193-eaf74bc5d3cd" }, disk_id: c6d1fe0d-5226-4318-a55a-e86e20612277 (physical_disk), policy: InService, state: Active } 3a512d49-edbe-47f3-8d0b-6051bfdc4044 (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-3a512d49-edbe-47f3-8d0b-6051bfdc4044" }, disk_id: 24510d37-20b1-4bdc-9ca7-c37fff39abb2 (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-3a512d49-edbe-47f3-8d0b-6051bfdc4044" }, disk_id: 24510d37-20b1-4bdc-9ca7-c37fff39abb2 (physical_disk), policy: InService, state: Active } 40517680-aa77-413c-bcf4-b9041dcf6612 (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-40517680-aa77-413c-bcf4-b9041dcf6612" }, disk_id: 30ed317f-1717-4df6-8c1c-69f9d438705e (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-40517680-aa77-413c-bcf4-b9041dcf6612" }, disk_id: 30ed317f-1717-4df6-8c1c-69f9d438705e (physical_disk), policy: InService, state: Active } 78d3cb96-9295-4644-bf78-2e32191c71f9 (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-78d3cb96-9295-4644-bf78-2e32191c71f9" }, disk_id: 5ac39660-8149-48a2-a6df-aebb0f30352a (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-78d3cb96-9295-4644-bf78-2e32191c71f9" }, disk_id: 5ac39660-8149-48a2-a6df-aebb0f30352a (physical_disk), policy: InService, state: Active } 853595e7-77da-404e-bc35-aba77478d55c (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-853595e7-77da-404e-bc35-aba77478d55c" }, disk_id: 43083372-c7d0-4df3-ac4e-96c45cde28d9 (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-853595e7-77da-404e-bc35-aba77478d55c" }, disk_id: 43083372-c7d0-4df3-ac4e-96c45cde28d9 (physical_disk), policy: InService, state: Active } 8926e0e7-65d9-4e2e-ac6d-f1298af81ef1 (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-8926e0e7-65d9-4e2e-ac6d-f1298af81ef1" }, disk_id: 13e65865-2a6e-41f7-aa18-6ef8dff59b4e (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-8926e0e7-65d9-4e2e-ac6d-f1298af81ef1" }, disk_id: 13e65865-2a6e-41f7-aa18-6ef8dff59b4e (physical_disk), policy: InService, state: Active } 9c0b9151-17f3-4857-94cc-b5bfcd402326 (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-9c0b9151-17f3-4857-94cc-b5bfcd402326" }, disk_id: 40383e60-18f6-4423-94e7-7b91ce939b43 (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-9c0b9151-17f3-4857-94cc-b5bfcd402326" }, disk_id: 40383e60-18f6-4423-94e7-7b91ce939b43 (physical_disk), policy: InService, state: Active } d61354fa-48d2-47c6-90bf-546e3ed1708b (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-d61354fa-48d2-47c6-90bf-546e3ed1708b" }, disk_id: e02ae523-7b66-4188-93c8-c5808c01c795 (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-d61354fa-48d2-47c6-90bf-546e3ed1708b" }, disk_id: e02ae523-7b66-4188-93c8-c5808c01c795 (physical_disk), policy: InService, state: Active } d792c8cb-7490-40cb-bb1c-d4917242edf4 (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-d792c8cb-7490-40cb-bb1c-d4917242edf4" }, disk_id: c19e5610-a3a2-4cc6-af4d-517a49ef610b (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-d792c8cb-7490-40cb-bb1c-d4917242edf4" }, disk_id: c19e5610-a3a2-4cc6-af4d-517a49ef610b (physical_disk), policy: InService, state: Active } > blueprint-show ade5749d-bdf3-4fab-a8ae-00bea01b3a5a @@ -494,13 +494,13 @@ sled 89d02b1b-478c-401a-8e28-7a26f74fa41b subnet fd00:1122:3344:101::/64 zpools (4): 44fa7024-c2bc-4d2c-b478-c4997e4aece8 (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-44fa7024-c2bc-4d2c-b478-c4997e4aece8" }, disk_id: 2a15b33c-dd0e-45b7-aba9-d05f40f030ff (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-44fa7024-c2bc-4d2c-b478-c4997e4aece8" }, disk_id: 2a15b33c-dd0e-45b7-aba9-d05f40f030ff (physical_disk), policy: InService, state: Active } 8562317c-4736-4cfc-9292-7dcab96a6fee (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-8562317c-4736-4cfc-9292-7dcab96a6fee" }, disk_id: cad6faa6-9409-4496-9aeb-392b3c50bed4 (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-8562317c-4736-4cfc-9292-7dcab96a6fee" }, disk_id: cad6faa6-9409-4496-9aeb-392b3c50bed4 (physical_disk), policy: InService, state: Active } ce1c13f3-bef2-4306-b0f2-4e39bd4a18b6 (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-ce1c13f3-bef2-4306-b0f2-4e39bd4a18b6" }, disk_id: 7d89a66e-0dcd-47ab-824d-62186812b8bd (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-ce1c13f3-bef2-4306-b0f2-4e39bd4a18b6" }, disk_id: 7d89a66e-0dcd-47ab-824d-62186812b8bd (physical_disk), policy: InService, state: Active } f931ec80-a3e3-4adb-a8ba-fa5adbd2294c (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-f931ec80-a3e3-4adb-a8ba-fa5adbd2294c" }, disk_id: 41755be9-2c77-4deb-87a4-cb53f09263fa (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-f931ec80-a3e3-4adb-a8ba-fa5adbd2294c" }, disk_id: 41755be9-2c77-4deb-87a4-cb53f09263fa (physical_disk), policy: InService, state: Active } > blueprint-show ade5749d-bdf3-4fab-a8ae-00bea01b3a5a diff --git a/dev-tools/reconfigurator-cli/tests/output/cmd-stdout b/dev-tools/reconfigurator-cli/tests/output/cmd-stdout index 2b5c9967cc..40489caeb5 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmd-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmd-stdout @@ -98,24 +98,24 @@ sled ..................... subnet fd00:1122:3344:101::/64 zpools (10): ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } diff --git a/schema/rss-service-plan-v5.json b/schema/rss-service-plan-v5.json index 2c1439cb6d..132b27c10e 100644 --- a/schema/rss-service-plan-v5.json +++ b/schema/rss-service-plan-v5.json @@ -24,7 +24,6 @@ "required": [ "disposition", "id", - "underlay_address", "zone_type" ], "properties": { @@ -50,10 +49,6 @@ "id": { "$ref": "#/definitions/TypedUuidForOmicronZoneKind" }, - "underlay_address": { - "type": "string", - "format": "ipv6" - }, "zone_type": { "$ref": "#/definitions/BlueprintZoneType" } @@ -670,7 +665,6 @@ } }, "DnsConfigParams": { - "description": "DnsConfigParams\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"generation\", \"time_created\", \"zones\" ], \"properties\": { \"generation\": { \"type\": \"integer\", \"format\": \"uint64\", \"minimum\": 0.0 }, \"time_created\": { \"type\": \"string\", \"format\": \"date-time\" }, \"zones\": { \"type\": \"array\", \"items\": { \"$ref\": \"#/components/schemas/DnsConfigZone\" } } } } ```
", "type": "object", "required": [ "generation", @@ -679,9 +673,7 @@ ], "properties": { "generation": { - "type": "integer", - "format": "uint64", - "minimum": 0.0 + "$ref": "#/definitions/Generation" }, "time_created": { "type": "string", @@ -696,7 +688,6 @@ } }, "DnsConfigZone": { - "description": "DnsConfigZone\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"records\", \"zone_name\" ], \"properties\": { \"records\": { \"type\": \"object\", \"additionalProperties\": { \"type\": \"array\", \"items\": { \"$ref\": \"#/components/schemas/DnsRecord\" } } }, \"zone_name\": { \"type\": \"string\" } } } ```
", "type": "object", "required": [ "records", @@ -718,7 +709,6 @@ } }, "DnsRecord": { - "description": "DnsRecord\n\n
JSON schema\n\n```json { \"oneOf\": [ { \"type\": \"object\", \"required\": [ \"data\", \"type\" ], \"properties\": { \"data\": { \"type\": \"string\", \"format\": \"ipv4\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"A\" ] } } }, { \"type\": \"object\", \"required\": [ \"data\", \"type\" ], \"properties\": { \"data\": { \"type\": \"string\", \"format\": \"ipv6\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"AAAA\" ] } } }, { \"type\": \"object\", \"required\": [ \"data\", \"type\" ], \"properties\": { \"data\": { \"$ref\": \"#/components/schemas/Srv\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"SRV\" ] } } } ] } ```
", "oneOf": [ { "type": "object", @@ -1147,7 +1137,6 @@ } }, "Srv": { - "description": "Srv\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"port\", \"prio\", \"target\", \"weight\" ], \"properties\": { \"port\": { \"type\": \"integer\", \"format\": \"uint16\", \"minimum\": 0.0 }, \"prio\": { \"type\": \"integer\", \"format\": \"uint16\", \"minimum\": 0.0 }, \"target\": { \"type\": \"string\" }, \"weight\": { \"type\": \"integer\", \"format\": \"uint16\", \"minimum\": 0.0 } } } ```
", "type": "object", "required": [ "port",