diff --git a/Cargo.lock b/Cargo.lock index e15afdfbab..dfd819f6f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3208,7 +3208,9 @@ dependencies = [ "futures-util", "http 0.2.11", "hyper 0.14.27", + "log", "rustls 0.21.9", + "rustls-native-certs 0.6.3", "tokio", "tokio-rustls 0.24.1", ] @@ -3225,7 +3227,7 @@ dependencies = [ "hyper-util", "log", "rustls 0.22.2", - "rustls-native-certs", + "rustls-native-certs 0.7.0", "rustls-pki-types", "tokio", "tokio-rustls 0.25.0", @@ -5351,7 +5353,6 @@ name = "omicron-workspace-hack" version = "0.1.0" dependencies = [ "ahash", - "aho-corasick", "anyhow", "base16ct", "bit-set", @@ -5370,14 +5371,13 @@ dependencies = [ "const-oid", "crossbeam-epoch", "crossbeam-utils", - "crossterm", "crypto-common", "der", "diesel", "digest", - "dof 0.3.0", "either", "elliptic-curve", + "errno", "ff", "flate2", "futures", @@ -5391,10 +5391,11 @@ dependencies = [ "generic-array", "getrandom 0.2.10", "group", - "hashbrown 0.14.3", + "hashbrown 0.13.2", "hex", "hmac", "hyper 0.14.27", + "hyper-rustls 0.24.2", "indexmap 2.2.3", "inout", "ipnetwork", @@ -5434,10 +5435,9 @@ dependencies = [ "sha2", "similar", "slog", - "socket2 0.5.5", + "snafu", "spin 0.9.8", "string_cache", - "strum 0.25.0", "subtle", "syn 1.0.109", "syn 2.0.48", @@ -5450,13 +5450,12 @@ dependencies = [ "toml 0.7.8", "toml_datetime", "toml_edit 0.19.15", - "toml_edit 0.22.6", + "toml_edit 0.21.1", "tracing", "trust-dns-proto", "unicode-bidi", "unicode-normalization", "usdt 0.3.5", - "usdt-impl 0.5.0", "uuid", "yasna", "zerocopy 0.7.32", @@ -7475,6 +7474,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile 1.0.3", + "schannel", + "security-framework", +] + [[package]] name = "rustls-native-certs" version = "0.7.0" @@ -8012,6 +8023,12 @@ dependencies = [ "digest", ] +[[package]] +name = "sha1_smol" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" + [[package]] name = "sha2" version = "0.10.8" @@ -9387,6 +9404,19 @@ dependencies = [ "winnow 0.5.15", ] +[[package]] +name = "toml_edit" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +dependencies = [ + "indexmap 2.2.3", + "serde", + "serde_spanned", + "toml_datetime", + "winnow 0.5.15", +] + [[package]] name = "toml_edit" version = "0.22.6" @@ -10089,6 +10119,7 @@ checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" dependencies = [ "getrandom 0.2.10", "serde", + "sha1_smol", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index db37547ea0..3c2bd5a33d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -411,7 +411,7 @@ unicode-width = "0.1.11" update-common = { path = "update-common" } update-engine = { path = "update-engine" } usdt = "0.5.0" -uuid = { version = "1.7.0", features = ["serde", "v4"] } +uuid = { version = "1.7.0", features = ["serde", "v4", "v5"] } walkdir = "2.4" wicket = { path = "wicket" } wicket-common = { path = "wicket-common" } diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index ecbb8365fe..3e1a7dc8e3 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -9,6 +9,8 @@ extern crate diesel; #[macro_use] extern crate newtype_derive; +use uuid::Uuid; + mod address_lot; mod bfd; mod bgp; @@ -45,6 +47,7 @@ mod network_interface; mod oximeter_info; mod physical_disk; mod physical_disk_kind; +mod physical_disk_state; mod producer_endpoint; mod project; mod semver_version; @@ -144,6 +147,7 @@ pub use network_interface::*; pub use oximeter_info::*; pub use physical_disk::*; pub use physical_disk_kind::*; +pub use physical_disk_state::*; pub use producer_endpoint::*; pub use project::*; pub use quota::*; @@ -310,6 +314,15 @@ macro_rules! impl_enum_type { pub(crate) use impl_enum_type; +/// This is an arbitrary UUID, but it's stable because it's embedded as a +/// constant. This defines a namespace, according to +/// , which allows generation of v5 +/// UUIDs which are deterministic. +/// +/// This UUID is used to identify hardware. +pub(crate) const HARDWARE_UUID_NAMESPACE: Uuid = + Uuid::from_u128(206230429496795504636731999500138461979); + /// Describes a type that's represented in the database using a String /// /// If you're reaching for this type, consider whether it'd be better to use an diff --git a/nexus/db-model/src/physical_disk.rs b/nexus/db-model/src/physical_disk.rs index 3628f7077f..1174800e42 100644 --- a/nexus/db-model/src/physical_disk.rs +++ b/nexus/db-model/src/physical_disk.rs @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use super::{Generation, PhysicalDiskKind}; +use super::{Generation, PhysicalDiskKind, PhysicalDiskState}; use crate::collection::DatastoreCollectionConfig; use crate::schema::{physical_disk, zpool}; use chrono::{DateTime, Utc}; @@ -25,6 +25,7 @@ pub struct PhysicalDisk { pub variant: PhysicalDiskKind, pub sled_id: Uuid, + pub state: PhysicalDiskState, } impl PhysicalDisk { @@ -35,8 +36,24 @@ impl PhysicalDisk { variant: PhysicalDiskKind, sled_id: Uuid, ) -> Self { + // NOTE: We may want to be more restrictive when parsing the vendor, + // serial, and model values, so that we can supply a separator + // distinguishing them. + // + // Theoretically, we could have the following problem: + // + // - A Disk vendor "Foo" makes a disk with serial "Bar", and model "Rev1". + // - This becomes: "FooBarRev1". + // - A Disk vendor "FooBar" makes a disk with serial "Rev", and model "1". + // - This becomes: "FooBarRev1", and conflicts. + let interpolated_name = format!("{vendor}{serial}{model}"); + let disk_id = Uuid::new_v5( + &crate::HARDWARE_UUID_NAMESPACE, + interpolated_name.as_bytes(), + ); + println!("Physical Disk ID: {disk_id}, from {interpolated_name}"); Self { - identity: PhysicalDiskIdentity::new(Uuid::new_v4()), + identity: PhysicalDiskIdentity::new(disk_id), time_deleted: None, rcgen: Generation::new(), vendor, @@ -44,6 +61,7 @@ impl PhysicalDisk { model, variant, sled_id, + state: PhysicalDiskState::Active, } } @@ -74,6 +92,7 @@ impl From for views::PhysicalDisk { vendor: disk.vendor, serial: disk.serial, model: disk.model, + state: disk.state.into(), form_factor: disk.variant.into(), } } diff --git a/nexus/db-model/src/physical_disk_state.rs b/nexus/db-model/src/physical_disk_state.rs new file mode 100644 index 0000000000..2129a54a8f --- /dev/null +++ b/nexus/db-model/src/physical_disk_state.rs @@ -0,0 +1,34 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::impl_enum_type; +use nexus_types::external_api::views; +use serde::{Deserialize, Serialize}; + +impl_enum_type!( + #[derive(Clone, SqlType, Debug, QueryId)] + #[diesel(postgres_type(name = "physical_disk_state"))] + pub struct PhysicalDiskStateEnum; + + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq)] + #[diesel(sql_type = PhysicalDiskStateEnum)] + pub enum PhysicalDiskState; + + // Enum values + Active => b"active" + Draining => b"draining" + Inactive => b"inactive" +); + +impl From for views::PhysicalDiskState { + fn from(state: PhysicalDiskState) -> Self { + use views::PhysicalDiskState as api; + use PhysicalDiskState as db; + match state { + db::Active => api::Active, + db::Draining => api::Draining, + db::Inactive => api::Inactive, + } + } +} diff --git a/nexus/db-model/src/queries/region_allocation.rs b/nexus/db-model/src/queries/region_allocation.rs index a1b9e0373a..c9771991e0 100644 --- a/nexus/db-model/src/queries/region_allocation.rs +++ b/nexus/db-model/src/queries/region_allocation.rs @@ -23,6 +23,7 @@ // a CTE (where we want the alias name to come first). use crate::schema::dataset; +use crate::schema::physical_disk; use crate::schema::sled; use crate::schema::zpool; @@ -151,7 +152,9 @@ diesel::allow_tables_to_appear_in_same_query!( diesel::allow_tables_to_appear_in_same_query!( do_insert, candidate_regions, + candidate_zpools, dataset, + physical_disk, zpool, ); @@ -170,8 +173,6 @@ diesel::allow_tables_to_appear_in_same_query!( updated_datasets, ); -diesel::allow_tables_to_appear_in_same_query!(candidate_zpools, dataset,); -diesel::allow_tables_to_appear_in_same_query!(candidate_zpools, zpool,); diesel::allow_tables_to_appear_in_same_query!(candidate_datasets, dataset); // == Needed for random region allocation == diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 54755486e5..1c18b043dd 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -892,6 +892,7 @@ table! { variant -> crate::PhysicalDiskKindEnum, sled_id -> Uuid, + state -> crate::PhysicalDiskStateEnum, } } @@ -961,11 +962,6 @@ table! { } } -allow_tables_to_appear_in_same_query! { - zpool, - physical_disk -} - table! { dataset (id) { id -> Uuid, diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 5f05aa1760..1cc8139868 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -419,8 +419,6 @@ mod test { }; use crate::db::queries::vpc_subnet::FilterConflictingVpcSubnetRangesQuery; use chrono::{Duration, Utc}; - use futures::stream; - use futures::StreamExt; use nexus_db_model::IpAttachState; use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params; @@ -697,7 +695,7 @@ mod test { ) -> Uuid { let physical_disk = PhysicalDisk::new( TEST_VENDOR.into(), - TEST_SERIAL.into(), + format!("{TEST_SERIAL}-{}", Uuid::new_v4()), TEST_MODEL.into(), kind, sled_id, @@ -714,14 +712,11 @@ mod test { datastore: &DataStore, sled_id: Uuid, physical_disk_id: Uuid, + size: ByteCount, ) -> Uuid { let zpool_id = Uuid::new_v4(); - let zpool = Zpool::new( - zpool_id, - sled_id, - physical_disk_id, - test_zpool_size().into(), - ); + let zpool = + Zpool::new(zpool_id, sled_id, physical_disk_id, size.into()); datastore.zpool_upsert(zpool).await.unwrap(); zpool_id } @@ -742,95 +737,159 @@ mod test { } } - struct TestDataset { - sled_id: Uuid, - dataset_id: Uuid, + struct RegionAllocationTestCtxBuilder { + sleds: usize, + // It's assumed that we have one zpool per disk + disks_per_sled: usize, + zpool_size: ByteCount, + datasets_per_zpool: usize, } - async fn create_test_datasets_for_region_allocation( - opctx: &OpContext, - datastore: Arc, - number_of_sleds: usize, - ) -> Vec { - // Create sleds... - let sled_ids: Vec = stream::iter(0..number_of_sleds) - .then(|_| create_test_sled(&datastore)) - .collect() - .await; + impl RegionAllocationTestCtxBuilder { + pub fn sleds(mut self, s: usize) -> Self { + self.sleds = s; + self + } + + pub fn disks_per_sled(mut self, d: usize) -> Self { + self.disks_per_sled = d; + self + } - struct PhysicalDisk { - sled_id: Uuid, - disk_id: Uuid, + pub fn zpool_size(mut self, b: ByteCount) -> Self { + self.zpool_size = b; + self } - // create 9 disks on each sled - let physical_disks: Vec = stream::iter(sled_ids) - .map(|sled_id| { - let sled_id_iter: Vec = (0..9).map(|_| sled_id).collect(); - stream::iter(sled_id_iter).then(|sled_id| { - let disk_id_future = create_test_physical_disk( + pub fn datasets_per_zpool(mut self, d: usize) -> Self { + self.datasets_per_zpool = d; + self + } + } + + impl Default for RegionAllocationTestCtxBuilder { + fn default() -> Self { + Self { + sleds: 1, + disks_per_sled: 1, + zpool_size: ByteCount::from_gibibytes_u32(100), + datasets_per_zpool: 1, + } + } + } + + impl RegionAllocationTestCtxBuilder { + async fn build( + &self, + opctx: &OpContext, + datastore: &Arc, + ) -> RegionAllocationTestCtx { + let mut sleds = vec![]; + + for _ in 0..self.sleds { + // Create a sled... + let mut sled = SledInfo { + id: create_test_sled(&datastore).await, + disks: vec![], + }; + + for _ in 0..self.disks_per_sled { + // ... and a disk on that sled... + let physical_disk_id = create_test_physical_disk( &datastore, opctx, - sled_id, + sled.id, PhysicalDiskKind::U2, - ); - async move { - let disk_id = disk_id_future.await; - PhysicalDisk { sled_id, disk_id } + ) + .await; + + // ... and a zpool within that disk... + let zpool_id = create_test_zpool( + &datastore, + sled.id, + physical_disk_id, + self.zpool_size, + ) + .await; + + let mut disk = DiskInfo { + id: physical_disk_id, + zpool_id, + datasets: vec![], + }; + + for _ in 0..self.datasets_per_zpool { + // ... and datasets within that zpool. + let bogus_addr = + SocketAddrV6::new(Ipv6Addr::LOCALHOST, 8080, 0, 0); + let dataset_id = Uuid::new_v4(); + let dataset = Dataset::new( + dataset_id, + zpool_id, + bogus_addr, + DatasetKind::Crucible, + ); + datastore.dataset_upsert(dataset).await.unwrap(); + disk.datasets.push(dataset_id); } - }) - }) - .flatten() - .collect() - .await; + sled.disks.push(disk); + } + sleds.push(sled); + } - #[derive(Copy, Clone)] - struct Zpool { - sled_id: Uuid, - pool_id: Uuid, + RegionAllocationTestCtx { sleds } } + } - // 1 pool per disk - let zpools: Vec = stream::iter(physical_disks) - .then(|disk| { - let pool_id_future = - create_test_zpool(&datastore, disk.sled_id, disk.disk_id); - async move { - let pool_id = pool_id_future.await; - Zpool { sled_id: disk.sled_id, pool_id } - } - }) - .collect() - .await; + struct DiskInfo { + id: Uuid, + #[allow(unused)] + zpool_id: Uuid, + #[allow(unused)] + datasets: Vec, + } - let bogus_addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 8080, 0, 0); - - let datasets: Vec = stream::iter(zpools) - .map(|zpool| { - // 3 datasets per zpool, to test that pools are distinct - let zpool_iter: Vec = (0..3).map(|_| zpool).collect(); - stream::iter(zpool_iter).then(|zpool| { - let id = Uuid::new_v4(); - let dataset = Dataset::new( - id, - zpool.pool_id, - bogus_addr, - DatasetKind::Crucible, - ); + struct SledInfo { + id: Uuid, + disks: Vec, + } - let datastore = datastore.clone(); - async move { - datastore.dataset_upsert(dataset).await.unwrap(); + struct RegionAllocationTestCtx { + sleds: Vec, + } - TestDataset { sled_id: zpool.sled_id, dataset_id: id } - } - }) - }) - .flatten() - .collect() - .await; + struct TestDataset { + sled_id: Uuid, + dataset_id: Uuid, + } - datasets + async fn create_test_datasets_for_region_allocation( + opctx: &OpContext, + datastore: Arc, + number_of_sleds: usize, + ) -> Vec { + let sleds = RegionAllocationTestCtxBuilder::default() + .sleds(number_of_sleds) + // create 9 disks per sled + .disks_per_sled(9) + // 3 datasets per zpool, to test that pools are distinct + .datasets_per_zpool(3) + .build(&opctx, &datastore) + .await + .sleds; + + let mut result = vec![]; + for sled_info in &sleds { + for disk in &sled_info.disks { + for dataset_id in &disk.datasets { + result.push(TestDataset { + sled_id: sled_info.id, + dataset_id: *dataset_id, + }); + } + } + } + result } #[tokio::test] @@ -1142,46 +1201,11 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - // Create a sled... - let sled_id = create_test_sled(&datastore).await; - - // ... and a disk on that sled... - let physical_disk_id = create_test_physical_disk( - &datastore, - &opctx, - sled_id, - PhysicalDiskKind::U2, - ) - .await; - - // 1 less than REDUNDANCY level of zpools - let zpool_ids: Vec = - stream::iter(0..REGION_REDUNDANCY_THRESHOLD - 1) - .then(|_| { - create_test_zpool(&datastore, sled_id, physical_disk_id) - }) - .collect() - .await; - - let bogus_addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 8080, 0, 0); - - // 1 dataset per zpool - stream::iter(zpool_ids) - .then(|zpool_id| { - let id = Uuid::new_v4(); - let dataset = Dataset::new( - id, - zpool_id, - bogus_addr, - DatasetKind::Crucible, - ); - let datastore = datastore.clone(); - async move { - datastore.dataset_upsert(dataset).await.unwrap(); - id - } - }) - .collect::>() + let _testctx = RegionAllocationTestCtxBuilder::default() + .sleds(1) + .disks_per_sled(REGION_REDUNDANCY_THRESHOLD - 1) + .datasets_per_zpool(1) + .build(&opctx, &datastore) .await; // Allocate regions from the datasets for this volume. @@ -1214,9 +1238,10 @@ mod test { } #[tokio::test] - async fn test_region_allocation_out_of_space_fails() { - let logctx = - dev::test_setup_log("test_region_allocation_out_of_space_fails"); + async fn test_region_allocation_out_of_space_fails_one_disk() { + let logctx = dev::test_setup_log( + "test_region_allocation_out_of_space_fails_one_disk", + ); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; @@ -1232,7 +1257,47 @@ mod test { let params = create_test_disk_create_params("disk1", alloc_size); let volume1_id = Uuid::new_v4(); - assert!(datastore + datastore + .region_allocate( + &opctx, + volume1_id, + ¶ms.disk_source, + params.size, + &RegionAllocationStrategy::Random { seed: Some(0) }, + ) + .await + .unwrap_err(); + + let _ = db.cleanup().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_region_allocation_out_of_space_fails_many_sleds() { + let logctx = dev::test_setup_log( + "test_region_allocation_out_of_space_fails_many_sleds", + ); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let zpool_size = ByteCount::from_gibibytes_u32(100); + let disk_size = ByteCount::from_gibibytes_u32(101); + let _testctx = RegionAllocationTestCtxBuilder::default() + .sleds(REGION_REDUNDANCY_THRESHOLD) + .disks_per_sled(1) + .zpool_size(zpool_size) + .datasets_per_zpool(1) + .build(&opctx, &datastore) + .await; + + // Allocate regions from the datasets for this disk. + // + // Note that we ask for a disk which is as large as the zpool, + // so we shouldn't have space for redundancy. + let params = create_test_disk_create_params("disk1", disk_size); + let volume1_id = Uuid::new_v4(); + + datastore .region_allocate( &opctx, volume1_id, @@ -1241,7 +1306,87 @@ mod test { &RegionAllocationStrategy::Random { seed: Some(0) }, ) .await - .is_err()); + .unwrap_err(); + + let _ = db.cleanup().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_region_allocation_ignores_inactive_disks() { + let logctx = dev::test_setup_log( + "test_region_allocation_ignores_inactive_disks", + ); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let zpool_size = ByteCount::from_gibibytes_u32(100); + let disk_size = ByteCount::from_mebibytes_u32(100); + let testctx = RegionAllocationTestCtxBuilder::default() + .sleds(1) + .disks_per_sled(REGION_REDUNDANCY_THRESHOLD) + .zpool_size(zpool_size) + .datasets_per_zpool(1) + .build(&opctx, &datastore) + .await; + + // Allocate one disk, observe that it works. + let params = create_test_disk_create_params("disk1", disk_size); + let volume_id = Uuid::new_v4(); + datastore + .region_allocate( + &opctx, + volume_id, + ¶ms.disk_source, + params.size, + &RegionAllocationStrategy::Random { seed: Some(0) }, + ) + .await + .unwrap(); + + // First, find the disk which we plan on deactivating. + let disk_to_disable = testctx.sleds[0].disks[0].id; + let disk = datastore + .physical_disk_list( + &opctx, + &DataPageParams:: { + marker: None, + direction: dropshot::PaginationOrder::Ascending, + limit: std::num::NonZeroU32::new(1024).unwrap(), + }, + ) + .await + .unwrap() + .into_iter() + .find(|disk| disk.uuid() == disk_to_disable) + .unwrap(); + + let (.., physical_disk_authz) = LookupPath::new(&opctx, &datastore) + .physical_disk(disk.vendor, disk.serial, disk.model) + .lookup_for(authz::Action::Modify) + .await + .unwrap(); + + // Deactivate the disk. + datastore + .physical_disk_deactivate(&opctx, &physical_disk_authz) + .await + .unwrap(); + + // After marking a disk as non-active, the provision fails, since + // one of the necessary disks cannot be used. + let params = create_test_disk_create_params("disk2", disk_size); + let volume_id = Uuid::new_v4(); + datastore + .region_allocate( + &opctx, + volume_id, + ¶ms.disk_source, + params.size, + &RegionAllocationStrategy::Random { seed: Some(0) }, + ) + .await + .unwrap_err(); let _ = db.cleanup().await; logctx.cleanup_successful(); @@ -1746,10 +1891,10 @@ mod test { ); // Deleting a non-existing record fails - assert!(datastore + datastore .deallocate_external_ip(&opctx, Uuid::nil()) .await - .is_err()); + .unwrap_err(); db.cleanup().await.unwrap(); logctx.cleanup_successful(); diff --git a/nexus/db-queries/src/db/datastore/physical_disk.rs b/nexus/db-queries/src/db/datastore/physical_disk.rs index ecb583ee29..f8d0282117 100644 --- a/nexus/db-queries/src/db/datastore/physical_disk.rs +++ b/nexus/db-queries/src/db/datastore/physical_disk.rs @@ -13,6 +13,8 @@ use crate::db::collection_insert::DatastoreCollection; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::PhysicalDisk; +use crate::db::model::PhysicalDiskKind; +use crate::db::model::PhysicalDiskState; use crate::db::model::Sled; use crate::db::pagination::paginated; use async_bb8_diesel::AsyncRunQueryDsl; @@ -26,9 +28,44 @@ use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; +use omicron_common::api::external::UpdateResult; use uuid::Uuid; impl DataStore { + /// - Sled Agents like to look up physical disks by "Vendor, Serial, Model" + /// - The external API likes to look up physical disks by UUID + /// - LookupPath objects are opinionated about how they perform lookups. They + /// support "primary keys" or "names", but they're opinionated about the + /// name objects being a single string. + /// + /// This function bridges that gap, by allowing the external API to + /// translate "UUID" type into a "Vendor, Serial, Model" type which can + /// be used internally. + pub async fn physical_disk_id_to_name_no_auth( + &self, + id: Uuid, + ) -> Result<(String, String, String), Error> { + use db::schema::physical_disk::dsl; + + let conn = self.pool_connection_unauthorized().await?; + + dsl::physical_disk + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(id)) + .select((dsl::vendor, dsl::serial, dsl::model)) + .get_result_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::PhysicalDisk, + LookupType::ById(id), + ), + ) + }) + } + /// Stores a new physical disk in the database. /// /// - If the Vendor, Serial, and Model fields are the same as an existing @@ -109,6 +146,37 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } + pub async fn physical_disk_deactivate( + &self, + opctx: &OpContext, + authz_physical_disk: &authz::PhysicalDisk, + ) -> UpdateResult { + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + use db::schema::physical_disk::dsl; + + let (vendor, serial, model) = authz_physical_disk.id(); + + let conn = self.pool_connection_authorized(opctx).await?; + + diesel::update(dsl::physical_disk) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::vendor.eq(vendor)) + .filter(dsl::serial.eq(serial)) + .filter(dsl::model.eq(model)) + .filter(dsl::variant.eq(PhysicalDiskKind::U2)) + .filter(dsl::state.eq(PhysicalDiskState::Active)) + .set(dsl::state.eq(PhysicalDiskState::Draining)) + .returning(PhysicalDisk::as_returning()) + .get_result_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_physical_disk), + ) + }) + } + /// Deletes a disk from the database. pub async fn physical_disk_delete( &self, @@ -174,12 +242,10 @@ mod test { } // Only checking some fields: - // - The UUID of the disk may actually not be the same as the upserted one; - // the "vendor/serial/model" value is the more critical unique identifier. - // NOTE: Could we derive a UUID from the VSM values? // - The 'time' field precision can be modified slightly when inserted into // the DB. - fn assert_disks_equal_ignore_uuid(lhs: &PhysicalDisk, rhs: &PhysicalDisk) { + fn assert_disks_equal_ignore_time(lhs: &PhysicalDisk, rhs: &PhysicalDisk) { + assert_eq!(lhs.uuid(), rhs.uuid()); assert_eq!(lhs.time_deleted().is_some(), rhs.time_deleted().is_some()); assert_eq!(lhs.vendor, rhs.vendor); assert_eq!(lhs.serial, rhs.serial); @@ -189,9 +255,9 @@ mod test { } #[tokio::test] - async fn physical_disk_upsert_different_uuid_idempotent() { + async fn physical_disk_upsert_uuid_generation_deterministic() { let logctx = dev::test_setup_log( - "physical_disk_upsert_different_uuid_idempotent", + "physical_disk_upsert_uuid_generation_deterministic", ); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; @@ -212,7 +278,7 @@ mod test { .await .expect("Failed first attempt at upserting disk"); assert_eq!(disk.uuid(), first_observed_disk.uuid()); - assert_disks_equal_ignore_uuid(&disk, &first_observed_disk); + assert_disks_equal_ignore_time(&disk, &first_observed_disk); // Observe the inserted disk let pagparams = list_disk_params(); @@ -222,9 +288,12 @@ mod test { .expect("Failed to list physical disks"); assert_eq!(disks.len(), 1); assert_eq!(disk.uuid(), disks[0].uuid()); - assert_disks_equal_ignore_uuid(&disk, &disks[0]); + assert_disks_equal_ignore_time(&disk, &disks[0]); - // Insert the same disk, with a different UUID primary key + // Insert the same disk, but don't re-state the UUID. + // + // The rest of this test relies on the UUID derivation being + // deterministic. let disk_again = PhysicalDisk::new( String::from("Oxide"), String::from("123"), @@ -232,15 +301,14 @@ mod test { PhysicalDiskKind::U2, sled_id, ); + // With the same input parameters, the UUID should be deterministic. + assert_eq!(disk.uuid(), disk_again.uuid()); + let second_observed_disk = datastore .physical_disk_upsert(&opctx, disk_again.clone()) .await .expect("Failed second upsert of physical disk"); - // This check is pretty important - note that we return the original - // UUID, not the new one. - assert_ne!(disk_again.uuid(), second_observed_disk.uuid()); - assert_eq!(disk_again.id(), second_observed_disk.id()); - assert_disks_equal_ignore_uuid(&disk_again, &second_observed_disk); + assert_disks_equal_ignore_time(&disk_again, &second_observed_disk); assert!( first_observed_disk.time_modified() <= second_observed_disk.time_modified() @@ -250,13 +318,9 @@ mod test { .sled_list_physical_disks(&opctx, sled_id, &pagparams) .await .expect("Failed to re-list physical disks"); - - // We'll use the old primary key assert_eq!(disks.len(), 1); - assert_eq!(disk.uuid(), disks[0].uuid()); - assert_ne!(disk_again.uuid(), disks[0].uuid()); - assert_disks_equal_ignore_uuid(&disk, &disks[0]); - assert_disks_equal_ignore_uuid(&disk_again, &disks[0]); + assert_disks_equal_ignore_time(&disk, &disks[0]); + assert_disks_equal_ignore_time(&disk_again, &disks[0]); db.cleanup().await.unwrap(); logctx.cleanup_successful(); @@ -296,7 +360,7 @@ mod test { first_observed_disk.time_modified() <= second_observed_disk.time_modified() ); - assert_disks_equal_ignore_uuid( + assert_disks_equal_ignore_time( &first_observed_disk, &second_observed_disk, ); diff --git a/nexus/db-queries/src/db/lookup.rs b/nexus/db-queries/src/db/lookup.rs index 18ea369685..a04677837a 100644 --- a/nexus/db-queries/src/db/lookup.rs +++ b/nexus/db-queries/src/db/lookup.rs @@ -371,15 +371,15 @@ impl<'a> LookupPath<'a> { /// Select a resource of type PhysicalDisk, identified by its id pub fn physical_disk( self, - vendor: &str, - serial: &str, - model: &str, + vendor: String, + serial: String, + model: String, ) -> PhysicalDisk<'a> { PhysicalDisk::PrimaryKey( Root { lookup_root: self }, - vendor.to_string(), - serial.to_string(), - model.to_string(), + vendor, + serial, + model, ) } diff --git a/nexus/db-queries/src/db/queries/region_allocation.rs b/nexus/db-queries/src/db/queries/region_allocation.rs index 3c37bf6b2e..03a44b3301 100644 --- a/nexus/db-queries/src/db/queries/region_allocation.rs +++ b/nexus/db-queries/src/db/queries/region_allocation.rs @@ -7,7 +7,7 @@ use crate::db::alias::ExpressionAlias; use crate::db::cast_uuid_as_bytea::CastUuidToBytea; use crate::db::datastore::REGION_REDUNDANCY_THRESHOLD; -use crate::db::model::{Dataset, DatasetKind, Region}; +use crate::db::model::{Dataset, DatasetKind, PhysicalDiskState, Region}; use crate::db::pool::DbConnection; use crate::db::subquery::{AsQuerySource, Cte, CteBuilder, CteQuery}; use crate::db::true_or_cast_error::{matches_sentinel, TrueOrCastError}; @@ -107,6 +107,8 @@ struct CandidateDatasets { impl CandidateDatasets { fn new(candidate_zpools: &CandidateZpools, seed: u128) -> Self { use crate::db::schema::dataset::dsl as dataset_dsl; + use crate::db::schema::physical_disk::dsl as physical_disk_dsl; + use crate::db::schema::zpool::dsl as zpool_dsl; use candidate_zpools::dsl as candidate_zpool_dsl; let seed_bytes = seed.to_le_bytes(); @@ -114,12 +116,26 @@ impl CandidateDatasets { let query: Box> = Box::new( dataset_dsl::dataset - .inner_join(candidate_zpools.query_source().on( - dataset_dsl::pool_id.eq(candidate_zpool_dsl::pool_id), - )) + // Access non-deleted datasets for Crucible .filter(dataset_dsl::time_deleted.is_null()) .filter(dataset_dsl::size_used.is_not_null()) .filter(dataset_dsl::kind.eq(DatasetKind::Crucible)) + // Access datasets from disks that are "Active" + .inner_join( + zpool_dsl::zpool + .on(zpool_dsl::id.eq(dataset_dsl::pool_id)), + ) + .inner_join(physical_disk_dsl::physical_disk.on( + physical_disk_dsl::id.eq(zpool_dsl::physical_disk_id), + )) + .filter( + physical_disk_dsl::state.eq(PhysicalDiskState::Active), + ) + // Only consider datasets from distinct zpools (and + // therefore, distinct disks). + .inner_join(candidate_zpools.query_source().on( + dataset_dsl::pool_id.eq(candidate_zpool_dsl::pool_id), + )) .distinct_on(dataset_dsl::pool_id) .order_by(( dataset_dsl::pool_id, diff --git a/nexus/preprocessed_configs/config.xml b/nexus/preprocessed_configs/config.xml new file mode 100644 index 0000000000..9b13f12aea --- /dev/null +++ b/nexus/preprocessed_configs/config.xml @@ -0,0 +1,41 @@ + + + + + trace + true + + + 8123 + 9000 + 9004 + + ./ + + true + + + + + + + ::/0 + + + default + default + 1 + + + + + + + + + + + \ No newline at end of file diff --git a/nexus/src/app/sled.rs b/nexus/src/app/sled.rs index ec3f11dc6f..429c304fb0 100644 --- a/nexus/src/app/sled.rs +++ b/nexus/src/app/sled.rs @@ -4,6 +4,7 @@ //! Sleds, and the hardware and services within them. +use crate::external_api::params; use crate::internal_api::params::{ PhysicalDiskDeleteRequest, PhysicalDiskPutRequest, SledAgentStartupInfo, SledRole, ZpoolPutRequest, @@ -18,6 +19,7 @@ use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; +use omicron_common::api::external::UpdateResult; use sled_agent_client::Client as SledAgentClient; use std::net::SocketAddrV6; use std::sync::Arc; @@ -162,6 +164,19 @@ impl super::Nexus { // Physical disks + pub async fn physical_disk_lookup<'a>( + &'a self, + opctx: &'a OpContext, + disk_selector: ¶ms::PhysicalDiskPath, + ) -> Result, Error> { + let (v, s, m) = self + .db_datastore + .physical_disk_id_to_name_no_auth(disk_selector.disk_id) + .await?; + + Ok(LookupPath::new(&opctx, &self.db_datastore).physical_disk(v, s, m)) + } + pub(crate) async fn sled_list_physical_disks( &self, opctx: &OpContext, @@ -181,6 +196,24 @@ impl super::Nexus { self.db_datastore.physical_disk_list(&opctx, pagparams).await } + pub async fn physical_disk_update( + &self, + opctx: &OpContext, + physical_disk_lookup: &lookup::PhysicalDisk<'_>, + update_command: params::PhysicalDiskUpdate, + ) -> UpdateResult { + let (.., authz_physical_disk) = + physical_disk_lookup.lookup_for(authz::Action::Modify).await?; + + match update_command { + params::PhysicalDiskUpdate::Disable => { + self.db_datastore + .physical_disk_deactivate(&opctx, &authz_physical_disk) + .await + } + } + } + /// Upserts a physical disk into the database, updating it if it already exists. pub(crate) async fn upsert_physical_disk( &self, @@ -247,9 +280,9 @@ impl super::Nexus { let (_authz_disk, db_disk) = LookupPath::new(&opctx, &self.db_datastore) .physical_disk( - &info.disk_vendor, - &info.disk_serial, - &info.disk_model, + info.disk_vendor, + info.disk_serial, + info.disk_model, ) .fetch() .await?; diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index fd18cb2dab..b5d6cd8257 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -229,6 +229,8 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(sled_instance_list)?; api.register(sled_physical_disk_list)?; api.register(physical_disk_list)?; + api.register(physical_disk_view)?; + api.register(physical_disk_update)?; api.register(switch_list)?; api.register(switch_view)?; api.register(sled_list_uninitialized)?; @@ -5273,6 +5275,57 @@ async fn physical_disk_list( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } +/// Get a physical disk +#[endpoint { + method = GET, + path = "/v1/system/hardware/disks/{disk_id}", + tags = ["system/hardware"], +}] +async fn physical_disk_view( + rqctx: RequestContext>, + path_params: Path, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + + let (.., physical_disk) = + nexus.physical_disk_lookup(&opctx, &path).await?.fetch().await?; + Ok(HttpResponseOk(physical_disk.into())) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Update a physical disk's state +#[endpoint { + method = PUT, + path = "/v1/system/hardware/disks/{disk_id}", + tags = ["system/hardware"], +}] +async fn physical_disk_update( + rqctx: RequestContext>, + path_params: Path, + update_command: TypedBody, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.nexus; + let update_command = update_command.into_inner(); + let path = path_params.into_inner(); + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + + let physical_disk_lookup = + nexus.physical_disk_lookup(&opctx, &path).await?; + let physical_disk = nexus + .physical_disk_update(&opctx, &physical_disk_lookup, update_command) + .await?; + Ok(HttpResponseOk(physical_disk.into())) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + // Switches /// List switches diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 271025f7a7..91921c6e63 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -65,6 +65,14 @@ pub const RACK_UUID: &str = "c19a698f-c6f9-4a17-ae30-20d711b8f7dc"; pub const SWITCH_UUID: &str = "dae4e1f1-410e-4314-bff1-fec0504be07e"; pub const OXIMETER_UUID: &str = "39e6175b-4df2-4730-b11d-cbc1e60a2e78"; pub const PRODUCER_UUID: &str = "a6458b7d-87c3-4483-be96-854d814c20de"; + +/// This is not random: It's a v5 UUID derived from: +/// +/// Uuid::new_v5( +/// HARDWARE_UUID_NAMESPACE, +/// ("test-vendor", "test-serial", "test-model"), +/// ) +pub const PHYSICAL_DISK_UUID: &str = "25849923-2232-5d20-b939-ffee5bc3dd89"; pub const RACK_SUBNET: &str = "fd00:1122:3344:0100::/56"; /// Password for the user created by the test suite diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index cd04bb6018..b5ab125e5b 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -15,6 +15,7 @@ use nexus_db_queries::authn; use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO; use nexus_db_queries::db::identity::Resource; use nexus_test_utils::resource_helpers::DiskTest; +use nexus_test_utils::PHYSICAL_DISK_UUID; use nexus_test_utils::RACK_UUID; use nexus_test_utils::SLED_AGENT_UUID; use nexus_test_utils::SWITCH_UUID; @@ -57,7 +58,9 @@ pub static DEMO_SLED_PROVISION_STATE: Lazy = pub static HARDWARE_SWITCH_URL: Lazy = Lazy::new(|| format!("/v1/system/hardware/switches/{}", SWITCH_UUID)); -pub const HARDWARE_DISK_URL: &'static str = "/v1/system/hardware/disks"; +pub const HARDWARE_DISKS_URL: &'static str = "/v1/system/hardware/disks"; +pub static HARDWARE_DISK_URL: Lazy = + Lazy::new(|| format!("/v1/system/hardware/disks/{}", PHYSICAL_DISK_UUID)); pub static HARDWARE_SLED_DISK_URL: Lazy = Lazy::new(|| { format!("/v1/system/hardware/sleds/{}/disks", SLED_AGENT_UUID) }); @@ -1935,12 +1938,26 @@ pub static VERIFY_ENDPOINTS: Lazy> = Lazy::new(|| { }, VerifyEndpoint { - url: &HARDWARE_DISK_URL, + url: &HARDWARE_DISKS_URL, visibility: Visibility::Public, unprivileged_access: UnprivilegedAccess::None, allowed_methods: vec![AllowedMethod::Get], }, + VerifyEndpoint { + url: &HARDWARE_DISK_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Put( + serde_json::to_value( + params::PhysicalDiskUpdate::Disable, + ).unwrap() + ) + ], + }, + VerifyEndpoint { url: &HARDWARE_SLED_DISK_URL, visibility: Visibility::Public, diff --git a/nexus/tests/integration_tests/sleds.rs b/nexus/tests/integration_tests/sleds.rs index b551cf51b5..c543f822b0 100644 --- a/nexus/tests/integration_tests/sleds.rs +++ b/nexus/tests/integration_tests/sleds.rs @@ -12,13 +12,15 @@ use nexus_test_utils::resource_helpers::create_instance; use nexus_test_utils::resource_helpers::create_physical_disk; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::delete_physical_disk; +use nexus_test_utils::resource_helpers::object_put; use nexus_test_utils::resource_helpers::objects_list_page_authz; use nexus_test_utils::start_sled_agent; use nexus_test_utils::SLED_AGENT_UUID; use nexus_test_utils_macros::nexus_test; -use nexus_types::external_api::params::PhysicalDiskKind; -use nexus_types::external_api::views::SledInstance; -use nexus_types::external_api::views::{PhysicalDisk, Sled}; +use nexus_types::external_api::params::{PhysicalDiskKind, PhysicalDiskUpdate}; +use nexus_types::external_api::views::{ + PhysicalDisk, PhysicalDiskState, Sled, SledInstance, +}; use omicron_sled_agent::sim; use std::str::FromStr; use uuid::Uuid; @@ -37,6 +39,14 @@ async fn physical_disks_list( objects_list_page_authz::(client, url).await.items } +async fn physical_disks_update( + client: &ClientTestContext, + url: &str, + state: &PhysicalDiskUpdate, +) -> PhysicalDisk { + object_put::(client, url, state).await +} + async fn sled_instance_list( client: &ClientTestContext, url: &str, @@ -91,7 +101,7 @@ async fn test_sleds_list(cptestctx: &ControlPlaneTestContext) { } #[nexus_test] -async fn test_physical_disk_create_list_delete( +async fn test_physical_disk_create_list_disable_delete( cptestctx: &ControlPlaneTestContext, ) { let external_client = &cptestctx.external_client; @@ -119,7 +129,32 @@ async fn test_physical_disk_create_list_delete( .await; let disks = physical_disks_list(&external_client, &disks_url).await; assert_eq!(disks.len(), disks_initial.len() + 1); - let _new_disk = disks + let new_disk = disks + .iter() + .find(|found_disk| { + found_disk.vendor == "v" + && found_disk.serial == "s" + && found_disk.model == "m" + }) + .expect("did not find the new disk"); + assert_eq!(new_disk.state, PhysicalDiskState::Active); + + // Disable the disk, marking it as "not-for-use". + let disk_url = + format!("/v1/system/hardware/disks/{}", new_disk.identity.id); + let disk = physical_disks_update( + &external_client, + &disk_url, + &PhysicalDiskUpdate::Disable, + ) + .await; + assert_eq!(disk.state, PhysicalDiskState::Draining); + + // Confirm that listing the disks again shows this new state too + let disks = physical_disks_list(&external_client, &disks_url).await; + assert_eq!(disks.len(), 1); + assert_eq!(disks[0].state, PhysicalDiskState::Draining); + let new_disk = disks .iter() .find(|found_disk| { found_disk.vendor == "v" @@ -127,6 +162,7 @@ async fn test_physical_disk_create_list_delete( && found_disk.model == "m" }) .expect("did not find the new disk"); + assert_eq!(new_disk.state, PhysicalDiskState::Draining); // Delete that disk using the internal API, observe it in the external API delete_physical_disk(&internal_client, "v", "s", "m", sled_id).await; diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 7ed73fd30a..b6e90fa1b3 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -126,6 +126,8 @@ networking_switch_port_apply_settings POST /v1/system/hardware/switch-por networking_switch_port_clear_settings DELETE /v1/system/hardware/switch-port/{port}/settings networking_switch_port_list GET /v1/system/hardware/switch-port physical_disk_list GET /v1/system/hardware/disks +physical_disk_update PUT /v1/system/hardware/disks/{disk_id} +physical_disk_view GET /v1/system/hardware/disks/{disk_id} rack_list GET /v1/system/hardware/racks rack_view GET /v1/system/hardware/racks/{rack_id} sled_add POST /v1/system/hardware/sleds diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 6cb878084d..31554d0ed0 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -87,6 +87,18 @@ id_path_param!(GroupPath, group_id, "group"); // ID that can be used to deterministically generate the UUID. id_path_param!(SledPath, sled_id, "sled"); id_path_param!(SwitchPath, switch_id, "switch"); +id_path_param!(PhysicalDiskPath, disk_id, "physical_disk"); + +/// Updateable properties of a `PhysicalDisk`. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +#[serde(rename_all = "snake_case")] +pub enum PhysicalDiskUpdate { + /// Prevents the disk from being used for future provisioning. + /// + /// This does not immediately cause services to be evacuated from using + /// the underlying disk, that process may happen asynchronously. + Disable, +} // Internal API parameters id_path_param!(BlueprintPath, blueprint_id, "blueprint"); diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index 84648f109f..0a0c14281d 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -473,6 +473,18 @@ pub struct Switch { // PHYSICAL DISKS +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize, JsonSchema)] +#[serde(rename_all = "snake_case")] +pub enum PhysicalDiskState { + /// The disk is actively in-use. + Active, + /// The disk has been marked for removal, and is transitioning + /// to the Inactive state. + Draining, + /// The disk is not in-use by the system. + Inactive, +} + /// View of a Physical Disk /// /// Physical disks reside in a particular sled and are used to store both @@ -488,8 +500,9 @@ pub struct PhysicalDisk { pub vendor: String, pub serial: String, pub model: String, - pub form_factor: PhysicalDiskKind, + + pub state: PhysicalDiskState, } // SILO USERS diff --git a/openapi/nexus.json b/openapi/nexus.json index f42841dcf6..fb07abceca 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -3859,6 +3859,92 @@ } } }, + "/v1/system/hardware/disks/{disk_id}": { + "get": { + "tags": [ + "system/hardware" + ], + "summary": "Get a physical disk", + "operationId": "physical_disk_view", + "parameters": [ + { + "in": "path", + "name": "disk_id", + "description": "ID of the physical_disk", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PhysicalDisk" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "system/hardware" + ], + "summary": "Update a physical disk's state", + "operationId": "physical_disk_update", + "parameters": [ + { + "in": "path", + "name": "disk_id", + "description": "ID of the physical_disk", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PhysicalDiskUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PhysicalDisk" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/v1/system/hardware/racks": { "get": { "tags": [ @@ -13872,6 +13958,9 @@ "type": "string", "format": "uuid" }, + "state": { + "$ref": "#/components/schemas/PhysicalDiskState" + }, "time_created": { "description": "timestamp when this resource was created", "type": "string", @@ -13891,6 +13980,7 @@ "id", "model", "serial", + "state", "time_created", "time_modified", "vendor" @@ -13925,6 +14015,43 @@ "items" ] }, + "PhysicalDiskState": { + "oneOf": [ + { + "description": "The disk is actively in-use.", + "type": "string", + "enum": [ + "active" + ] + }, + { + "description": "The disk has been marked for removal, and is transitioning to the Inactive state.", + "type": "string", + "enum": [ + "draining" + ] + }, + { + "description": "The disk is not in-use by the system.", + "type": "string", + "enum": [ + "inactive" + ] + } + ] + }, + "PhysicalDiskUpdate": { + "description": "Updateable properties of a `PhysicalDisk`.", + "oneOf": [ + { + "description": "Prevents the disk from being used for future provisioning.\n\nThis does not immediately cause services to be evacuated from using the underlying disk, that process may happen asynchronously.", + "type": "string", + "enum": [ + "disable" + ] + } + ] + }, "Ping": { "type": "object", "properties": { diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 87a22d1adc..59d4bd215b 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -295,6 +295,20 @@ CREATE TYPE IF NOT EXISTS omicron.public.physical_disk_kind AS ENUM ( 'u2' ); +CREATE TYPE IF NOT EXISTS omicron.public.physical_disk_state AS ENUM ( + -- The disk is actively being used, and should be a target + -- for future allocations. + 'active', + -- The disk may still be in usage, but should not be used + -- for subsequent allocations. + -- + -- This state could be set when we have, for example, datasets + -- actively being used by the disk which we haven't fully retired. + 'draining', + -- The disk is not currently being used. + 'inactive' +); + -- A physical disk which exists inside the rack. CREATE TABLE IF NOT EXISTS omicron.public.physical_disk ( id UUID PRIMARY KEY, @@ -312,6 +326,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.physical_disk ( -- FK into the Sled table sled_id UUID NOT NULL, + -- Describes how the control plane manages this disk + state omicron.public.physical_disk_state NOT NULL, + -- This constraint should be upheld, even for deleted disks -- in the fleet. CONSTRAINT vendor_serial_model_unique UNIQUE ( diff --git a/schema/crdb/xx.xx.xx/up01.sql b/schema/crdb/xx.xx.xx/up01.sql new file mode 100644 index 0000000000..78edca8882 --- /dev/null +++ b/schema/crdb/xx.xx.xx/up01.sql @@ -0,0 +1,13 @@ +CREATE TYPE IF NOT EXISTS omicron.public.physical_disk_state AS ENUM ( + -- The disk is actively being used, and should be a target + -- for future allocations. + 'active', + -- The disk may still be in usage, but should not be used + -- for subsequent allocations. + -- + -- This state could be set when we have, for example, datasets + -- actively being used by the disk which we haven't fully retired. + 'draining', + -- The disk is not currently being used. + 'inactive' +); diff --git a/schema/crdb/xx.xx.xx/up02.sql b/schema/crdb/xx.xx.xx/up02.sql new file mode 100644 index 0000000000..8e72f0d3d4 --- /dev/null +++ b/schema/crdb/xx.xx.xx/up02.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.physical_disk ADD COLUMN IF NOT EXISTS state omicron.public.physical_disk_state NOT NULL DEFAULT 'active'; diff --git a/schema/crdb/xx.xx.xx/up03.sql b/schema/crdb/xx.xx.xx/up03.sql new file mode 100644 index 0000000000..4aa981f058 --- /dev/null +++ b/schema/crdb/xx.xx.xx/up03.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.physical_disk ALTER COLUMN state DROP DEFAULT; diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index ebe683e51a..e5a873534d 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -14,8 +14,7 @@ publish = false ### BEGIN HAKARI SECTION [dependencies] -ahash = { version = "0.8.7" } -aho-corasick = { version = "1.0.4" } +ahash = { version = "0.8.6" } anyhow = { version = "1.0.75", features = ["backtrace"] } base16ct = { version = "0.2.0", default-features = false, features = ["alloc"] } bit-set = { version = "0.5.3" } @@ -28,49 +27,48 @@ byteorder = { version = "1.5.0" } bytes = { version = "1.5.0", features = ["serde"] } chrono = { version = "0.4.31", features = ["alloc", "serde"] } cipher = { version = "0.4.4", default-features = false, features = ["block-padding", "zeroize"] } -clap = { version = "4.5.0", features = ["cargo", "derive", "env", "wrap_help"] } -clap_builder = { version = "4.5.0", default-features = false, features = ["cargo", "color", "env", "std", "suggestions", "usage", "wrap_help"] } -console = { version = "0.15.8" } +clap = { version = "4.4.3", features = ["derive", "env", "wrap_help"] } +clap_builder = { version = "4.4.2", default-features = false, features = ["color", "env", "std", "suggestions", "usage", "wrap_help"] } +console = { version = "0.15.7" } const-oid = { version = "0.9.5", default-features = false, features = ["db", "std"] } crossbeam-epoch = { version = "0.9.15" } crossbeam-utils = { version = "0.8.16" } -crossterm = { version = "0.27.0", features = ["event-stream", "serde"] } crypto-common = { version = "0.1.6", default-features = false, features = ["getrandom", "std"] } der = { version = "0.7.8", default-features = false, features = ["derive", "flagset", "oid", "pem", "std"] } diesel = { version = "2.1.4", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } digest = { version = "0.10.7", features = ["mac", "oid", "std"] } -either = { version = "1.10.0" } +either = { version = "1.9.0" } elliptic-curve = { version = "0.13.8", features = ["ecdh", "hazmat", "pem", "std"] } ff = { version = "0.13.0", default-features = false, features = ["alloc"] } flate2 = { version = "1.0.28" } -futures = { version = "0.3.30" } -futures-channel = { version = "0.3.30", features = ["sink"] } -futures-core = { version = "0.3.30" } -futures-io = { version = "0.3.30", default-features = false, features = ["std"] } -futures-sink = { version = "0.3.30" } -futures-task = { version = "0.3.30", default-features = false, features = ["std"] } -futures-util = { version = "0.3.30", features = ["channel", "io", "sink"] } +futures = { version = "0.3.29" } +futures-channel = { version = "0.3.29", features = ["sink"] } +futures-core = { version = "0.3.29" } +futures-io = { version = "0.3.29", default-features = false, features = ["std"] } +futures-sink = { version = "0.3.29" } +futures-task = { version = "0.3.29", default-features = false, features = ["std"] } +futures-util = { version = "0.3.29", features = ["channel", "io", "sink"] } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "2739c18e80697aa6bc235c935176d14b4d757ee9", features = ["std"] } generic-array = { version = "0.14.7", default-features = false, features = ["more_lengths", "zeroize"] } getrandom = { version = "0.2.10", default-features = false, features = ["js", "rdrand", "std"] } group = { version = "0.13.0", default-features = false, features = ["alloc"] } -hashbrown = { version = "0.14.3", features = ["raw"] } +hashbrown = { version = "0.13.2" } hex = { version = "0.4.3", features = ["serde"] } hmac = { version = "0.12.1", default-features = false, features = ["reset"] } hyper = { version = "0.14.27", features = ["full"] } -indexmap = { version = "2.2.3", features = ["serde"] } +indexmap = { version = "2.1.0", features = ["serde"] } inout = { version = "0.1.3", default-features = false, features = ["std"] } ipnetwork = { version = "0.20.0", features = ["schemars"] } itertools = { version = "0.10.5" } lalrpop-util = { version = "0.19.12" } lazy_static = { version = "1.4.0", default-features = false, features = ["spin_no_std"] } -libc = { version = "0.2.153", features = ["extra_traits"] } +libc = { version = "0.2.151", features = ["extra_traits"] } log = { version = "0.4.20", default-features = false, features = ["std"] } managed = { version = "0.8.0", default-features = false, features = ["alloc", "map"] } memchr = { version = "2.6.3" } nom = { version = "7.1.3" } num-bigint = { version = "0.4.4", features = ["rand"] } -num-integer = { version = "0.1.46", features = ["i128"] } +num-integer = { version = "0.1.45", features = ["i128"] } num-iter = { version = "0.1.43", default-features = false, features = ["i128"] } num-traits = { version = "0.2.16", features = ["i128", "libm"] } openapiv3 = { version = "2.0.0", default-features = false, features = ["skip_serializing_defaults"] } @@ -78,51 +76,48 @@ pem-rfc7468 = { version = "0.7.0", default-features = false, features = ["std"] petgraph = { version = "0.6.4", features = ["serde-1"] } postgres-types = { version = "0.2.6", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } ppv-lite86 = { version = "0.2.17", default-features = false, features = ["simd", "std"] } -predicates = { version = "3.1.0" } -proc-macro2 = { version = "1.0.78" } +predicates = { version = "3.0.4" } +proc-macro2 = { version = "1.0.69" } rand = { version = "0.8.5" } rand_chacha = { version = "0.3.1", default-features = false, features = ["std"] } -regex = { version = "1.10.3" } -regex-automata = { version = "0.4.4", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } +regex = { version = "1.10.2" } +regex-automata = { version = "0.4.3", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } regex-syntax = { version = "0.8.2" } reqwest = { version = "0.11.22", features = ["blocking", "json", "rustls-tls", "stream"] } -ring = { version = "0.17.8", features = ["std"] } -schemars = { version = "0.8.16", features = ["bytes", "chrono", "uuid1"] } -semver = { version = "1.0.22", features = ["serde"] } -serde = { version = "1.0.196", features = ["alloc", "derive", "rc"] } -serde_json = { version = "1.0.114", features = ["raw_value", "unbounded_depth"] } +ring = { version = "0.17.7", features = ["std"] } +schemars = { version = "0.8.13", features = ["bytes", "chrono", "uuid1"] } +semver = { version = "1.0.20", features = ["serde"] } +serde = { version = "1.0.193", features = ["alloc", "derive", "rc"] } +serde_json = { version = "1.0.108", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } similar = { version = "2.3.0", features = ["inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } -socket2 = { version = "0.5.5", default-features = false, features = ["all"] } +snafu = { version = "0.7.5", features = ["futures"] } spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } -strum = { version = "0.25.0", features = ["derive"] } subtle = { version = "2.5.0" } syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit"] } -syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.48", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } +syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.32", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } time = { version = "0.3.27", features = ["formatting", "local-offset", "macros", "parsing"] } -tokio = { version = "1.36.0", features = ["full", "test-util"] } +tokio = { version = "1.35.0", features = ["full", "test-util"] } tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } tokio-stream = { version = "0.1.14", features = ["net"] } tokio-util = { version = "0.7.10", features = ["codec", "io-util"] } toml = { version = "0.7.8" } -toml_edit-3c51e837cfc5589a = { package = "toml_edit", version = "0.22.6", features = ["serde"] } +toml_edit-647d43efb71741da = { package = "toml_edit", version = "0.21.0", features = ["serde"] } tracing = { version = "0.1.37", features = ["log"] } trust-dns-proto = { version = "0.22.0" } unicode-bidi = { version = "0.3.13" } unicode-normalization = { version = "0.1.22" } usdt = { version = "0.3.5" } -usdt-impl = { version = "0.5.0", default-features = false, features = ["asm", "des"] } -uuid = { version = "1.7.0", features = ["serde", "v4"] } +uuid = { version = "1.6.1", features = ["serde", "v4", "v5"] } yasna = { version = "0.5.2", features = ["bit-vec", "num-bigint", "std", "time"] } -zerocopy = { version = "0.7.32", features = ["derive", "simd"] } +zerocopy = { version = "0.7.31", features = ["derive", "simd"] } zeroize = { version = "1.7.0", features = ["std", "zeroize_derive"] } zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } [build-dependencies] -ahash = { version = "0.8.7" } -aho-corasick = { version = "1.0.4" } +ahash = { version = "0.8.6" } anyhow = { version = "1.0.75", features = ["backtrace"] } base16ct = { version = "0.2.0", default-features = false, features = ["alloc"] } bit-set = { version = "0.5.3" } @@ -135,49 +130,48 @@ byteorder = { version = "1.5.0" } bytes = { version = "1.5.0", features = ["serde"] } chrono = { version = "0.4.31", features = ["alloc", "serde"] } cipher = { version = "0.4.4", default-features = false, features = ["block-padding", "zeroize"] } -clap = { version = "4.5.0", features = ["cargo", "derive", "env", "wrap_help"] } -clap_builder = { version = "4.5.0", default-features = false, features = ["cargo", "color", "env", "std", "suggestions", "usage", "wrap_help"] } -console = { version = "0.15.8" } +clap = { version = "4.4.3", features = ["derive", "env", "wrap_help"] } +clap_builder = { version = "4.4.2", default-features = false, features = ["color", "env", "std", "suggestions", "usage", "wrap_help"] } +console = { version = "0.15.7" } const-oid = { version = "0.9.5", default-features = false, features = ["db", "std"] } crossbeam-epoch = { version = "0.9.15" } crossbeam-utils = { version = "0.8.16" } -crossterm = { version = "0.27.0", features = ["event-stream", "serde"] } crypto-common = { version = "0.1.6", default-features = false, features = ["getrandom", "std"] } der = { version = "0.7.8", default-features = false, features = ["derive", "flagset", "oid", "pem", "std"] } diesel = { version = "2.1.4", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } digest = { version = "0.10.7", features = ["mac", "oid", "std"] } -either = { version = "1.10.0" } +either = { version = "1.9.0" } elliptic-curve = { version = "0.13.8", features = ["ecdh", "hazmat", "pem", "std"] } ff = { version = "0.13.0", default-features = false, features = ["alloc"] } flate2 = { version = "1.0.28" } -futures = { version = "0.3.30" } -futures-channel = { version = "0.3.30", features = ["sink"] } -futures-core = { version = "0.3.30" } -futures-io = { version = "0.3.30", default-features = false, features = ["std"] } -futures-sink = { version = "0.3.30" } -futures-task = { version = "0.3.30", default-features = false, features = ["std"] } -futures-util = { version = "0.3.30", features = ["channel", "io", "sink"] } +futures = { version = "0.3.29" } +futures-channel = { version = "0.3.29", features = ["sink"] } +futures-core = { version = "0.3.29" } +futures-io = { version = "0.3.29", default-features = false, features = ["std"] } +futures-sink = { version = "0.3.29" } +futures-task = { version = "0.3.29", default-features = false, features = ["std"] } +futures-util = { version = "0.3.29", features = ["channel", "io", "sink"] } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "2739c18e80697aa6bc235c935176d14b4d757ee9", features = ["std"] } generic-array = { version = "0.14.7", default-features = false, features = ["more_lengths", "zeroize"] } getrandom = { version = "0.2.10", default-features = false, features = ["js", "rdrand", "std"] } group = { version = "0.13.0", default-features = false, features = ["alloc"] } -hashbrown = { version = "0.14.3", features = ["raw"] } +hashbrown = { version = "0.13.2" } hex = { version = "0.4.3", features = ["serde"] } hmac = { version = "0.12.1", default-features = false, features = ["reset"] } hyper = { version = "0.14.27", features = ["full"] } -indexmap = { version = "2.2.3", features = ["serde"] } +indexmap = { version = "2.1.0", features = ["serde"] } inout = { version = "0.1.3", default-features = false, features = ["std"] } ipnetwork = { version = "0.20.0", features = ["schemars"] } itertools = { version = "0.10.5" } lalrpop-util = { version = "0.19.12" } lazy_static = { version = "1.4.0", default-features = false, features = ["spin_no_std"] } -libc = { version = "0.2.153", features = ["extra_traits"] } +libc = { version = "0.2.151", features = ["extra_traits"] } log = { version = "0.4.20", default-features = false, features = ["std"] } managed = { version = "0.8.0", default-features = false, features = ["alloc", "map"] } memchr = { version = "2.6.3" } nom = { version = "7.1.3" } num-bigint = { version = "0.4.4", features = ["rand"] } -num-integer = { version = "0.1.46", features = ["i128"] } +num-integer = { version = "0.1.45", features = ["i128"] } num-iter = { version = "0.1.43", default-features = false, features = ["i128"] } num-traits = { version = "0.2.16", features = ["i128", "libm"] } openapiv3 = { version = "2.0.0", default-features = false, features = ["skip_serializing_defaults"] } @@ -185,102 +179,110 @@ pem-rfc7468 = { version = "0.7.0", default-features = false, features = ["std"] petgraph = { version = "0.6.4", features = ["serde-1"] } postgres-types = { version = "0.2.6", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } ppv-lite86 = { version = "0.2.17", default-features = false, features = ["simd", "std"] } -predicates = { version = "3.1.0" } -proc-macro2 = { version = "1.0.78" } +predicates = { version = "3.0.4" } +proc-macro2 = { version = "1.0.69" } rand = { version = "0.8.5" } rand_chacha = { version = "0.3.1", default-features = false, features = ["std"] } -regex = { version = "1.10.3" } -regex-automata = { version = "0.4.4", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } +regex = { version = "1.10.2" } +regex-automata = { version = "0.4.3", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } regex-syntax = { version = "0.8.2" } reqwest = { version = "0.11.22", features = ["blocking", "json", "rustls-tls", "stream"] } -ring = { version = "0.17.8", features = ["std"] } -schemars = { version = "0.8.16", features = ["bytes", "chrono", "uuid1"] } -semver = { version = "1.0.22", features = ["serde"] } -serde = { version = "1.0.196", features = ["alloc", "derive", "rc"] } -serde_json = { version = "1.0.114", features = ["raw_value", "unbounded_depth"] } +ring = { version = "0.17.7", features = ["std"] } +schemars = { version = "0.8.13", features = ["bytes", "chrono", "uuid1"] } +semver = { version = "1.0.20", features = ["serde"] } +serde = { version = "1.0.193", features = ["alloc", "derive", "rc"] } +serde_json = { version = "1.0.108", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } similar = { version = "2.3.0", features = ["inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } -socket2 = { version = "0.5.5", default-features = false, features = ["all"] } +snafu = { version = "0.7.5", features = ["futures"] } spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } -strum = { version = "0.25.0", features = ["derive"] } subtle = { version = "2.5.0" } syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit"] } -syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.48", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } +syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.32", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } time = { version = "0.3.27", features = ["formatting", "local-offset", "macros", "parsing"] } time-macros = { version = "0.2.13", default-features = false, features = ["formatting", "parsing"] } -tokio = { version = "1.36.0", features = ["full", "test-util"] } +tokio = { version = "1.35.0", features = ["full", "test-util"] } tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } tokio-stream = { version = "0.1.14", features = ["net"] } tokio-util = { version = "0.7.10", features = ["codec", "io-util"] } toml = { version = "0.7.8" } -toml_edit-3c51e837cfc5589a = { package = "toml_edit", version = "0.22.6", features = ["serde"] } +toml_edit-647d43efb71741da = { package = "toml_edit", version = "0.21.0", features = ["serde"] } tracing = { version = "0.1.37", features = ["log"] } trust-dns-proto = { version = "0.22.0" } unicode-bidi = { version = "0.3.13" } unicode-normalization = { version = "0.1.22" } usdt = { version = "0.3.5" } -usdt-impl = { version = "0.5.0", default-features = false, features = ["asm", "des"] } -uuid = { version = "1.7.0", features = ["serde", "v4"] } +uuid = { version = "1.6.1", features = ["serde", "v4", "v5"] } yasna = { version = "0.5.2", features = ["bit-vec", "num-bigint", "std", "time"] } -zerocopy = { version = "0.7.32", features = ["derive", "simd"] } +zerocopy = { version = "0.7.31", features = ["derive", "simd"] } zeroize = { version = "1.7.0", features = ["std", "zeroize_derive"] } zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } [target.x86_64-unknown-linux-gnu.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -dof = { version = "0.3.0", default-features = false, features = ["des"] } +hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } -once_cell = { version = "1.19.0" } -rustix = { version = "0.38.31", features = ["fs", "termios"] } +once_cell = { version = "1.19.0", features = ["unstable"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.x86_64-unknown-linux-gnu.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -dof = { version = "0.3.0", default-features = false, features = ["des"] } +hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } -once_cell = { version = "1.19.0" } -rustix = { version = "0.38.31", features = ["fs", "termios"] } +once_cell = { version = "1.19.0", features = ["unstable"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.x86_64-apple-darwin.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } -once_cell = { version = "1.19.0" } -rustix = { version = "0.38.31", features = ["fs", "termios"] } +once_cell = { version = "1.19.0", features = ["unstable"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.x86_64-apple-darwin.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } -once_cell = { version = "1.19.0" } -rustix = { version = "0.38.31", features = ["fs", "termios"] } +once_cell = { version = "1.19.0", features = ["unstable"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.aarch64-apple-darwin.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } -once_cell = { version = "1.19.0" } -rustix = { version = "0.38.31", features = ["fs", "termios"] } +once_cell = { version = "1.19.0", features = ["unstable"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.aarch64-apple-darwin.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } -once_cell = { version = "1.19.0" } -rustix = { version = "0.38.31", features = ["fs", "termios"] } +once_cell = { version = "1.19.0", features = ["unstable"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.x86_64-unknown-illumos.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -dof = { version = "0.3.0", default-features = false, features = ["des"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } -once_cell = { version = "1.19.0" } -rustix = { version = "0.38.31", features = ["fs", "termios"] } +once_cell = { version = "1.19.0", features = ["unstable"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] } [target.x86_64-unknown-illumos.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -dof = { version = "0.3.0", default-features = false, features = ["des"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } -once_cell = { version = "1.19.0" } -rustix = { version = "0.38.31", features = ["fs", "termios"] } +once_cell = { version = "1.19.0", features = ["unstable"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] }