diff --git a/Cargo.lock b/Cargo.lock index 24aba98c6a..2e3406717a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1690,6 +1690,28 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +[[package]] +name = "httptest" +version = "0.15.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f25cfb6def593d43fae1ead24861f217e93bc70768a45cc149a69b5f049df4" +dependencies = [ + "bstr", + "bytes", + "crossbeam-channel", + "form_urlencoded", + "futures", + "http", + "hyper", + "log", + "once_cell", + "regex", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", +] + [[package]] name = "hubpack" version = "0.1.0" @@ -2427,6 +2449,7 @@ dependencies = [ "headers", "hex", "http", + "httptest", "hyper", "ipnetwork", "lazy_static", diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 8ecbf493db..a8dc64ba16 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -517,6 +517,7 @@ pub enum ResourceType { Silo, SiloUser, ConsoleSession, + GlobalImage, Organization, Project, Dataset, @@ -1691,6 +1692,56 @@ pub struct NetworkInterface { // V6 address, at least one of which must be specified. } +#[derive( + Clone, + Debug, + Deserialize, + Serialize, + JsonSchema, + Eq, + PartialEq, + Ord, + PartialOrd, +)] +pub enum Digest { + Sha256(String), +} + +impl FromStr for Digest { + type Err = anyhow::Error; + fn from_str(s: &str) -> Result { + if s.starts_with("sha256:") { + let parts: Vec<&str> = s.split(':').collect(); + if parts.len() != 2 { + anyhow::bail!("digest string {} should have two parts", s); + } + + if parts[1].len() != 64 { + anyhow::bail!("sha256 length must be 64"); + } + + return Ok(Digest::Sha256(parts[1].to_string())); + } + + anyhow::bail!("invalid digest string {}", s); + } +} + +impl std::fmt::Display for Digest { + fn fmt( + &self, + f: &mut std::fmt::Formatter<'_>, + ) -> Result<(), std::fmt::Error> { + write!( + f, + "{}", + match self { + Digest::Sha256(value) => format!("sha256:{}", value), + } + ) + } +} + #[cfg(test)] mod test { use super::RouteDestination; @@ -1698,8 +1749,8 @@ mod test { use super::VpcFirewallRuleHostFilter; use super::VpcFirewallRuleTarget; use super::{ - ByteCount, L4Port, L4PortRange, Name, RoleName, VpcFirewallRuleAction, - VpcFirewallRuleDirection, VpcFirewallRuleFilter, + ByteCount, Digest, L4Port, L4PortRange, Name, RoleName, + VpcFirewallRuleAction, VpcFirewallRuleDirection, VpcFirewallRuleFilter, VpcFirewallRulePriority, VpcFirewallRuleProtocol, VpcFirewallRuleStatus, VpcFirewallRuleUpdate, VpcFirewallRuleUpdateParams, @@ -2176,4 +2227,28 @@ mod test { assert!("foo:foo".parse::().is_err()); assert!("foo".parse::().is_err()); } + + #[test] + fn test_digest() { + // No prefix + assert!( + "5cc9d1620911c280b0b1dad1413603702baccf340a1e74ade9d0521bcd826acf" + .parse::() + .is_err() + ); + + // Valid sha256 + let actual: Digest = + "sha256:5cc9d1620911c280b0b1dad1413603702baccf340a1e74ade9d0521bcd826acf".to_string().parse().unwrap(); + assert_eq!( + actual, + Digest::Sha256("5cc9d1620911c280b0b1dad1413603702baccf340a1e74ade9d0521bcd826acf".to_string()), + ); + + // Too short for sha256 + assert!("sha256:5cc9d1620911c280b".parse::().is_err()); + + // Bad prefix + assert!("hash:super_random".parse::().is_err()); + } } diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index 5d7b9f7e71..3bd2683542 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -439,12 +439,13 @@ CREATE TABLE omicron.public.image ( /* Indicates that the object has been deleted */ time_deleted TIMESTAMPTZ, - /* Optional project UUID: Images may or may not be global */ - project_id UUID, - /* Optional volume ID: Images may exist without backing volumes */ - volume_id UUID, - /* Optional URL: Images may be backed by either a URL or a volume */ + project_id UUID NOT NULL, + volume_id UUID NOT NULL, + url STRING(8192), + version STRING(64), + digest TEXT, + block_size omicron.public.block_size NOT NULL, size_bytes INT NOT NULL ); @@ -454,6 +455,30 @@ CREATE UNIQUE INDEX on omicron.public.image ( ) WHERE time_deleted is NULL; +CREATE TABLE omicron.public.global_image ( + /* Identity metadata (resource) */ + id UUID PRIMARY KEY, + name STRING(63) NOT NULL, + description STRING(512) NOT NULL, + time_created TIMESTAMPTZ NOT NULL, + time_modified TIMESTAMPTZ NOT NULL, + /* Indicates that the object has been deleted */ + time_deleted TIMESTAMPTZ, + + volume_id UUID NOT NULL, + + url STRING(8192), + version STRING(64), + digest TEXT, + block_size omicron.public.block_size NOT NULL, + size_bytes INT NOT NULL +); + +CREATE UNIQUE INDEX on omicron.public.global_image ( + name +) WHERE + time_deleted is NULL; + CREATE TABLE omicron.public.snapshot ( /* Identity metadata (resource) */ id UUID PRIMARY KEY, diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 4ff012bc8c..57c4cc2178 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -127,6 +127,7 @@ openapiv3 = "1.0" regex = "1.5.5" subprocess = "0.2.8" term = "0.7" +httptest = "0.15.4" [dev-dependencies.openapi-lint] git = "https://github.com/oxidecomputer/openapi-lint" diff --git a/nexus/src/authz/api_resources.rs b/nexus/src/authz/api_resources.rs index a1c6f1e114..c9017ec3c5 100644 --- a/nexus/src/authz/api_resources.rs +++ b/nexus/src/authz/api_resources.rs @@ -250,6 +250,67 @@ impl AuthorizedResource for ConsoleSessionList { } } +#[derive(Clone, Copy, Debug)] +pub struct GlobalImageList; +/// Singleton representing the [`GlobalImageList`] itself for authz purposes +pub const GLOBAL_IMAGE_LIST: GlobalImageList = GlobalImageList; + +impl Eq for GlobalImageList {} +impl PartialEq for GlobalImageList { + fn eq(&self, _: &Self) -> bool { + // There is only one GlobalImageList. + true + } +} + +impl oso::PolarClass for GlobalImageList { + fn get_polar_class_builder() -> oso::ClassBuilder { + oso::Class::builder() + .with_equality_check() + .add_attribute_getter("fleet", |_x: &GlobalImageList| FLEET) + } +} + +impl AuthorizedResource for GlobalImageList { + fn load_roles<'a, 'b, 'c, 'd, 'e, 'f>( + &'a self, + opctx: &'b OpContext, + datastore: &'c DataStore, + authn: &'d authn::Context, + roleset: &'e mut RoleSet, + ) -> futures::future::BoxFuture<'f, Result<(), Error>> + where + 'a: 'f, + 'b: 'f, + 'c: 'f, + 'd: 'f, + 'e: 'f, + { + // there's no roles related to GlobalImageList, just permissions but we + // still need to load the fleet related roles to find if the actor has + // the "admin" role on the fleet + load_roles_for_resource( + opctx, + datastore, + authn, + ResourceType::Fleet, + *FLEET_ID, + roleset, + ) + .boxed() + } + + fn on_unauthorized( + &self, + _: &Authz, + error: Error, + _: AnyActor, + _: Action, + ) -> Error { + error + } +} + // Main resource hierarchy: Organizations, Projects, and their resources authz_resource! { @@ -389,3 +450,11 @@ authz_resource! { roles_allowed = false, polar_snippet = FleetChild, } + +authz_resource! { + name = "GlobalImage", + parent = "Fleet", + primary_key = Uuid, + roles_allowed = false, + polar_snippet = FleetChild, +} diff --git a/nexus/src/authz/omicron.polar b/nexus/src/authz/omicron.polar index 633cada063..ed3312d6ba 100644 --- a/nexus/src/authz/omicron.polar +++ b/nexus/src/authz/omicron.polar @@ -80,7 +80,7 @@ has_role(actor: AuthenticatedActor, role: String, resource: Resource) # # - fleet.admin (superuser for the whole system) # - fleet.collaborator (can create and own silos) -# - fleet.viewer (can read fleet-wide data) +# - fleet.viewer (can read fleet-wide data) # - silo.admin (superuser for the silo) # - silo.collaborator (can create and own orgs) # - silo.viewer (can read silo-wide data) @@ -91,7 +91,7 @@ has_role(actor: AuthenticatedActor, role: String, resource: Resource) # the project, but cannot modify or delete the project # itself) # - project.viewer (can see everything in the project, but cannot modify -# anything) +# anything) # # At the top level is the "Fleet" resource. @@ -215,6 +215,23 @@ resource Project { has_relation(organization: Organization, "parent_organization", project: Project) if project.organization = organization; +resource GlobalImageList { + permissions = [ + "list_children", + "modify", + "create_child", + ]; + + # Only admins can create or modify the global images list + relations = { parent_fleet: Fleet }; + "modify" if "admin" on "parent_fleet"; + "create_child" if "admin" on "parent_fleet"; + + # Anyone with viewer can list global images + "list_children" if "viewer" on "parent_fleet"; +} +has_relation(fleet: Fleet, "parent_fleet", global_image_list: GlobalImageList) + if global_image_list.fleet = fleet; # ConsoleSessionList is a synthetic resource used for modeling who has access # to create sessions. diff --git a/nexus/src/authz/oso_generic.rs b/nexus/src/authz/oso_generic.rs index 755f84e77b..7669786f84 100644 --- a/nexus/src/authz/oso_generic.rs +++ b/nexus/src/authz/oso_generic.rs @@ -43,6 +43,7 @@ pub fn make_omicron_oso(log: &slog::Logger) -> Result { AuthenticatedActor::get_polar_class(), Database::get_polar_class(), Fleet::get_polar_class(), + GlobalImageList::get_polar_class(), ConsoleSessionList::get_polar_class(), ]; for c in classes { @@ -70,6 +71,7 @@ pub fn make_omicron_oso(log: &slog::Logger) -> Result { Sled::init(), UpdateAvailableArtifact::init(), UserBuiltin::init(), + GlobalImage::init(), ]; let polar_config = std::iter::once(OMICRON_AUTHZ_CONFIG_BASE) diff --git a/nexus/src/db/datastore.rs b/nexus/src/db/datastore.rs index 706a56d3a5..dbdd2114b3 100644 --- a/nexus/src/db/datastore.rs +++ b/nexus/src/db/datastore.rs @@ -38,13 +38,13 @@ use crate::db::{ error::{public_error_from_diesel_pool, ErrorHandler, TransactionError}, model::{ ConsoleSession, Dataset, DatasetKind, Disk, DiskRuntimeState, - Generation, IncompleteNetworkInterface, Instance, InstanceRuntimeState, - Name, NetworkInterface, Organization, OrganizationUpdate, OximeterInfo, - ProducerEndpoint, Project, ProjectUpdate, Region, - RoleAssignmentBuiltin, RoleBuiltin, RouterRoute, RouterRouteUpdate, - Silo, SiloUser, Sled, UpdateAvailableArtifact, UserBuiltin, Volume, - Vpc, VpcFirewallRule, VpcRouter, VpcRouterUpdate, VpcSubnet, - VpcSubnetUpdate, VpcUpdate, Zpool, + Generation, GlobalImage, IncompleteNetworkInterface, Instance, + InstanceRuntimeState, Name, NetworkInterface, Organization, + OrganizationUpdate, OximeterInfo, ProducerEndpoint, Project, + ProjectUpdate, Region, RoleAssignmentBuiltin, RoleBuiltin, RouterRoute, + RouterRouteUpdate, Silo, SiloUser, Sled, UpdateAvailableArtifact, + UserBuiltin, Volume, Vpc, VpcFirewallRule, VpcRouter, VpcRouterUpdate, + VpcSubnet, VpcSubnetUpdate, VpcUpdate, Zpool, }, pagination::paginated, pagination::paginated_multicolumn, @@ -296,12 +296,52 @@ impl DataStore { .limit(REGION_REDUNDANCY_THRESHOLD.try_into().unwrap()) } + async fn get_block_size_from_disk_create( + &self, + opctx: &OpContext, + disk_create: ¶ms::DiskCreate, + ) -> Result { + match &disk_create.disk_source { + params::DiskSource::Blank { block_size } => { + Ok(db::model::BlockSize::try_from(*block_size) + .map_err(|e| Error::invalid_request(&e.to_string()))?) + } + params::DiskSource::Snapshot { snapshot_id: _ } => { + // Until we implement snapshots, do not allow disks to be + // created from a snapshot. + return Err(Error::InvalidValue { + label: String::from("snapshot"), + message: String::from("snapshots are not yet supported"), + }); + } + params::DiskSource::Image { image_id: _ } => { + // Until we implement project images, do not allow disks to be + // created from a project image. + return Err(Error::InvalidValue { + label: String::from("image"), + message: String::from( + "project image are not yet supported", + ), + }); + } + params::DiskSource::GlobalImage { image_id } => { + let (.., db_global_image) = LookupPath::new(opctx, &self) + .global_image_id(*image_id) + .fetch() + .await?; + + Ok(db_global_image.block_size) + } + } + } + /// Idempotently allocates enough regions to back a disk. /// /// Returns the allocated regions, as well as the datasets to which they /// belong. pub async fn region_allocate( &self, + opctx: &OpContext, volume_id: Uuid, params: ¶ms::DiskCreate, ) -> Result, Error> { @@ -333,7 +373,13 @@ impl DataStore { NotEnoughDatasets(usize), } type TxnError = TransactionError; + let params: params::DiskCreate = params.clone(); + let block_size = + self.get_block_size_from_disk_create(opctx, ¶ms).await?; + let blocks_per_extent = + params.extent_size() / block_size.to_bytes() as i64; + self.pool() .transaction(move |conn| { // First, for idempotency, check if regions are already @@ -367,8 +413,8 @@ impl DataStore { Region::new( dataset.id(), volume_id, - params.block_size().into(), - params.blocks_per_extent(), + block_size.into(), + blocks_per_extent, params.extent_count(), ) }) @@ -380,8 +426,8 @@ impl DataStore { // Update the tallied sizes in the source datasets containing // those regions. - let region_size = i64::from(params.block_size()) - * params.blocks_per_extent() + let region_size = i64::from(block_size.to_bytes()) + * blocks_per_extent * params.extent_count(); for dataset in source_datasets.iter_mut() { dataset.size_used = @@ -2679,6 +2725,50 @@ impl DataStore { }), } } + + pub async fn global_image_list_images( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Name>, + ) -> ListResultVec { + opctx + .authorize(authz::Action::ListChildren, &authz::GLOBAL_IMAGE_LIST) + .await?; + + use db::schema::global_image::dsl; + paginated(dsl::global_image, dsl::name, pagparams) + .filter(dsl::time_deleted.is_null()) + .select(GlobalImage::as_select()) + .load_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + pub async fn global_image_create_image( + &self, + opctx: &OpContext, + image: GlobalImage, + ) -> CreateResult { + opctx + .authorize(authz::Action::CreateChild, &authz::GLOBAL_IMAGE_LIST) + .await?; + + use db::schema::global_image::dsl; + let name = image.name().clone(); + diesel::insert_into(dsl::global_image) + .values(image) + .on_conflict(dsl::id) + .do_nothing() + .returning(GlobalImage::as_returning()) + .get_result_async(self.pool()) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict(ResourceType::Image, name.as_str()), + ) + }) + } } /// Constructs a DataStore for use in test suites that has preloaded the @@ -2916,10 +3006,10 @@ mod test { name: Name::try_from(name.to_string()).unwrap(), description: name.to_string(), }, - snapshot_id: None, - image_id: None, + disk_source: params::DiskSource::Blank { + block_size: params::BlockSize::try_from(4096).unwrap(), + }, size, - block_size: params::BlockSize::try_from(4096).unwrap(), } } @@ -2929,7 +3019,9 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&cfg); - let datastore = DataStore::new(Arc::new(pool)); + let datastore = Arc::new(DataStore::new(Arc::new(pool))); + let opctx = + OpContext::for_tests(logctx.log.new(o!()), datastore.clone()); // Create a sled... let sled_id = create_test_sled(&datastore).await; @@ -2957,8 +3049,10 @@ mod test { let volume1_id = Uuid::new_v4(); // Currently, we only allocate one Region Set per volume. let expected_region_count = REGION_REDUNDANCY_THRESHOLD; - let dataset_and_regions = - datastore.region_allocate(volume1_id, ¶ms).await.unwrap(); + let dataset_and_regions = datastore + .region_allocate(&opctx, volume1_id, ¶ms) + .await + .unwrap(); // Verify the allocation. assert_eq!(expected_region_count, dataset_and_regions.len()); @@ -2966,8 +3060,8 @@ mod test { for (dataset, region) in dataset_and_regions { assert!(disk1_datasets.insert(dataset.id())); assert_eq!(volume1_id, region.volume_id()); - assert_eq!(params.block_size(), region.block_size()); - assert_eq!(params.blocks_per_extent(), region.blocks_per_extent()); + assert_eq!(ByteCount::from(4096), region.block_size()); + assert_eq!(params.extent_size() / 4096, region.blocks_per_extent()); assert_eq!(params.extent_count(), region.extent_count()); } @@ -2978,15 +3072,17 @@ mod test { ByteCount::from_mebibytes_u32(500), ); let volume2_id = Uuid::new_v4(); - let dataset_and_regions = - datastore.region_allocate(volume2_id, ¶ms).await.unwrap(); + let dataset_and_regions = datastore + .region_allocate(&opctx, volume2_id, ¶ms) + .await + .unwrap(); assert_eq!(expected_region_count, dataset_and_regions.len()); let mut disk2_datasets = HashSet::new(); for (dataset, region) in dataset_and_regions { assert!(disk2_datasets.insert(dataset.id())); assert_eq!(volume2_id, region.volume_id()); - assert_eq!(params.block_size(), region.block_size()); - assert_eq!(params.blocks_per_extent(), region.blocks_per_extent()); + assert_eq!(ByteCount::from(4096), region.block_size()); + assert_eq!(params.extent_size() / 4096, region.blocks_per_extent()); assert_eq!(params.extent_count(), region.extent_count()); } @@ -3004,7 +3100,9 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&cfg); - let datastore = DataStore::new(Arc::new(pool)); + let datastore = Arc::new(DataStore::new(Arc::new(pool))); + let opctx = + OpContext::for_tests(logctx.log.new(o!()), datastore.clone()); // Create a sled... let sled_id = create_test_sled(&datastore).await; @@ -3030,10 +3128,14 @@ mod test { ByteCount::from_mebibytes_u32(500), ); let volume_id = Uuid::new_v4(); - let mut dataset_and_regions1 = - datastore.region_allocate(volume_id, ¶ms).await.unwrap(); - let mut dataset_and_regions2 = - datastore.region_allocate(volume_id, ¶ms).await.unwrap(); + let mut dataset_and_regions1 = datastore + .region_allocate(&opctx, volume_id, ¶ms) + .await + .unwrap(); + let mut dataset_and_regions2 = datastore + .region_allocate(&opctx, volume_id, ¶ms) + .await + .unwrap(); // Give them a consistent order so we can easily compare them. let sort_vec = |v: &mut Vec<(Dataset, Region)>| { @@ -3064,7 +3166,9 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&cfg); - let datastore = DataStore::new(Arc::new(pool)); + let datastore = Arc::new(DataStore::new(Arc::new(pool))); + let opctx = + OpContext::for_tests(logctx.log.new(o!()), datastore.clone()); // Create a sled... let sled_id = create_test_sled(&datastore).await; @@ -3090,8 +3194,10 @@ mod test { ByteCount::from_mebibytes_u32(500), ); let volume1_id = Uuid::new_v4(); - let err = - datastore.region_allocate(volume1_id, ¶ms).await.unwrap_err(); + let err = datastore + .region_allocate(&opctx, volume1_id, ¶ms) + .await + .unwrap_err(); assert!(err .to_string() .contains("Not enough datasets to allocate disks")); @@ -3111,7 +3217,9 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&cfg); - let datastore = DataStore::new(Arc::new(pool)); + let datastore = Arc::new(DataStore::new(Arc::new(pool))); + let opctx = + OpContext::for_tests(logctx.log.new(o!()), datastore.clone()); // Create a sled... let sled_id = create_test_sled(&datastore).await; @@ -3140,7 +3248,7 @@ mod test { let volume1_id = Uuid::new_v4(); // NOTE: This *should* be an error, rather than succeeding. - datastore.region_allocate(volume1_id, ¶ms).await.unwrap(); + datastore.region_allocate(&opctx, volume1_id, ¶ms).await.unwrap(); let _ = db.cleanup().await; } diff --git a/nexus/src/db/lookup.rs b/nexus/src/db/lookup.rs index 5860c1a64e..210e67a8c9 100644 --- a/nexus/src/db/lookup.rs +++ b/nexus/src/db/lookup.rs @@ -353,6 +353,27 @@ impl<'a> LookupPath<'a> { key: UserBuiltinKey::Name(Root { lookup_root: self }, name), } } + + /// Select a resource of type GlobalImage, identified by its name + pub fn global_image_name<'b, 'c>(self, name: &'b Name) -> GlobalImage<'c> + where + 'a: 'c, + 'b: 'c, + { + GlobalImage { + key: GlobalImageKey::Name(Root { lookup_root: self }, name), + } + } + + /// Select a resource of type GlobalImage, identified by its id + pub fn global_image_id<'b>(self, id: Uuid) -> GlobalImage<'b> + where + 'a: 'b, + { + GlobalImage { + key: GlobalImageKey::PrimaryKey(Root { lookup_root: self }, id), + } + } } /// Represents the head of the selection path for a resource @@ -528,6 +549,15 @@ lookup_resource! { primary_key_columns = [ { column_name = "id", rust_type = Uuid } ] } +lookup_resource! { + name = "GlobalImage", + ancestors = [], + children = [], + lookup_by_name = true, + soft_deletes = true, + primary_key_columns = [ { column_name = "id", rust_type = Uuid } ] +} + #[cfg(test)] mod test { use super::Instance; diff --git a/nexus/src/db/model.rs b/nexus/src/db/model.rs index d3aaa8be70..34470714c9 100644 --- a/nexus/src/db/model.rs +++ b/nexus/src/db/model.rs @@ -8,11 +8,11 @@ use crate::db::collection_insert::DatastoreCollection; use crate::db::identity::{Asset, Resource}; use crate::db::ipv6; use crate::db::schema::{ - console_session, dataset, disk, image, instance, metric_producer, - network_interface, organization, oximeter, project, rack, region, - role_assignment_builtin, role_builtin, router_route, silo, silo_user, sled, - snapshot, update_available_artifact, user_builtin, volume, vpc, - vpc_firewall_rule, vpc_router, vpc_subnet, zpool, + console_session, dataset, disk, global_image, image, instance, + metric_producer, network_interface, organization, oximeter, project, rack, + region, role_assignment_builtin, role_builtin, router_route, silo, + silo_user, sled, snapshot, update_available_artifact, user_builtin, volume, + vpc, vpc_firewall_rule, vpc_router, vpc_subnet, zpool, }; use crate::defaults; use crate::external_api::params; @@ -272,6 +272,12 @@ impl From for sled_agent_client::types::ByteCount { } } +impl From for ByteCount { + fn from(bs: BlockSize) -> Self { + Self(bs.to_bytes().into()) + } +} + #[derive( Copy, Clone, @@ -1398,9 +1404,23 @@ impl Disk { project_id: Uuid, volume_id: Uuid, params: params::DiskCreate, + block_size: BlockSize, runtime_initial: DiskRuntimeState, ) -> Result { let identity = DiskIdentity::new(disk_id, params.identity); + + let create_snapshot_id = match params.disk_source { + params::DiskSource::Snapshot { snapshot_id } => Some(snapshot_id), + _ => None, + }; + + // XXX further enum here for different image types? + let create_image_id = match params.disk_source { + params::DiskSource::Image { image_id } => Some(image_id), + params::DiskSource::GlobalImage { image_id } => Some(image_id), + _ => None, + }; + Ok(Self { identity, rcgen: external::Generation::new().into(), @@ -1408,9 +1428,9 @@ impl Disk { volume_id, runtime_state: runtime_initial, size: params.size.into(), - block_size: params.block_size.try_into()?, - create_snapshot_id: params.snapshot_id, - create_image_id: params.image_id, + block_size: block_size, + create_snapshot_id, + create_image_id, }) } @@ -1582,6 +1602,57 @@ impl Into for DiskState { } } +/// Newtype wrapper around [external::Digest] +#[derive( + Clone, + Debug, + Display, + AsExpression, + FromSqlRow, + Eq, + PartialEq, + Ord, + PartialOrd, + RefCast, + JsonSchema, + Serialize, + Deserialize, +)] +#[sql_type = "sql_types::Text"] +#[serde(transparent)] +#[repr(transparent)] +#[display("{0}")] +pub struct Digest(pub external::Digest); + +NewtypeFrom! { () pub struct Digest(external::Digest); } +NewtypeDeref! { () pub struct Digest(external::Digest); } + +impl ToSql for Digest +where + DB: Backend, + str: ToSql, +{ + fn to_sql( + &self, + out: &mut serialize::Output, + ) -> serialize::Result { + self.to_string().as_str().to_sql(out) + } +} + +impl FromSql for Digest +where + DB: Backend, + String: FromSql, +{ + fn from_sql(bytes: RawValue) -> deserialize::Result { + let digest: external::Digest = String::from_sql(bytes)?.parse()?; + Ok(Digest(digest)) + } +} + +// Project images + #[derive( Queryable, Insertable, @@ -1595,13 +1666,18 @@ impl Into for DiskState { #[table_name = "image"] pub struct Image { #[diesel(embed)] - identity: ImageIdentity, + pub identity: ImageIdentity, + + pub project_id: Uuid, + pub volume_id: Uuid, + pub url: Option, + pub version: Option, + pub digest: Option, + + pub block_size: BlockSize, - project_id: Option, - volume_id: Option, - url: Option, #[column_name = "size_bytes"] - size: ByteCount, + pub size: ByteCount, } impl From for views::Image { @@ -1610,6 +1686,50 @@ impl From for views::Image { identity: image.identity(), project_id: image.project_id, url: image.url, + version: image.version, + digest: image.digest.map(|x| x.into()), + block_size: image.block_size.into(), + size: image.size.into(), + } + } +} + +// Global images + +#[derive( + Queryable, + Insertable, + Selectable, + Clone, + Debug, + Resource, + Serialize, + Deserialize, +)] +#[table_name = "global_image"] +pub struct GlobalImage { + #[diesel(embed)] + pub identity: GlobalImageIdentity, + + pub volume_id: Uuid, + pub url: Option, + pub version: Option, + pub digest: Option, + + pub block_size: BlockSize, + + #[column_name = "size_bytes"] + pub size: ByteCount, +} + +impl From for views::GlobalImage { + fn from(image: GlobalImage) -> Self { + Self { + identity: image.identity(), + url: image.url, + version: image.version, + digest: image.digest.map(|x| x.into()), + block_size: image.block_size.into(), size: image.size.into(), } } diff --git a/nexus/src/db/schema.rs b/nexus/src/db/schema.rs index b0f7860bc9..2b3fa000c1 100644 --- a/nexus/src/db/schema.rs +++ b/nexus/src/db/schema.rs @@ -36,9 +36,29 @@ table! { time_created -> Timestamptz, time_modified -> Timestamptz, time_deleted -> Nullable, - project_id -> Nullable, + project_id -> Uuid, volume_id -> Uuid, - url -> Text, + url -> Nullable, + version -> Nullable, + digest -> Nullable, + block_size -> crate::db::model::BlockSizeEnum, + size_bytes -> Int8, + } +} + +table! { + global_image (id) { + id -> Uuid, + name -> Text, + description -> Text, + time_created -> Timestamptz, + time_modified -> Timestamptz, + time_deleted -> Nullable, + volume_id -> Uuid, + url -> Nullable, + version -> Nullable, + digest -> Nullable, + block_size -> crate::db::model::BlockSizeEnum, size_bytes -> Int8, } } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 90046ff4c8..369f7c7674 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -11,8 +11,8 @@ use crate::ServerContext; use super::{ console_api, params, views::{ - Image, Organization, Project, Rack, Role, Silo, Sled, Snapshot, User, - Vpc, VpcRouter, VpcSubnet, + GlobalImage, Image, Organization, Project, Rack, Role, Silo, Sled, + Snapshot, User, Vpc, VpcRouter, VpcSubnet, }, }; use crate::context::OpContext; @@ -1192,14 +1192,14 @@ async fn instance_disks_detach( async fn images_get( rqctx: Arc>>, query_params: Query, -) -> Result>, HttpError> { +) -> Result>, HttpError> { let apictx = rqctx.context(); let nexus = &apictx.nexus; let query = query_params.into_inner(); let handler = async { let opctx = OpContext::for_external_api(&rqctx).await?; let images = nexus - .images_list( + .global_images_list( &opctx, &data_page_params_for(&rqctx, &query)? .map_name(|n| Name::ref_cast(n)), @@ -1225,13 +1225,14 @@ async fn images_get( async fn images_post( rqctx: Arc>>, new_image: TypedBody, -) -> Result, HttpError> { +) -> Result, HttpError> { let apictx = rqctx.context(); let nexus = &apictx.nexus; let new_image_params = &new_image.into_inner(); let handler = async { let opctx = OpContext::for_external_api(&rqctx).await?; - let image = nexus.image_create(&opctx, &new_image_params).await?; + let image = + nexus.global_image_create(&opctx, &new_image_params).await?; Ok(HttpResponseCreated(image.into())) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await @@ -1254,14 +1255,14 @@ struct GlobalImagePathParam { async fn images_get_image( rqctx: Arc>>, path_params: Path, -) -> Result, HttpError> { +) -> Result, HttpError> { let apictx = rqctx.context(); let nexus = &apictx.nexus; let path = path_params.into_inner(); let image_name = &path.image_name; let handler = async { let opctx = OpContext::for_external_api(&rqctx).await?; - let image = nexus.image_fetch(&opctx, &image_name).await?; + let image = nexus.global_image_fetch(&opctx, &image_name).await?; Ok(HttpResponseOk(image.into())) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await @@ -1287,7 +1288,7 @@ async fn images_delete_image( let image_name = &path.image_name; let handler = async { let opctx = OpContext::for_external_api(&rqctx).await?; - nexus.image_delete(&opctx, &image_name).await?; + nexus.global_image_delete(&opctx, &image_name).await?; Ok(HttpResponseDeleted()) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await diff --git a/nexus/src/external_api/params.rs b/nexus/src/external_api/params.rs index 9a83bab95c..d9c93629b5 100644 --- a/nexus/src/external_api/params.rs +++ b/nexus/src/external_api/params.rs @@ -277,6 +277,7 @@ pub struct VpcRouterUpdate { // DISKS #[derive(Copy, Clone, Debug, Deserialize, Serialize)] +#[serde(try_from = "u32")] // invoke the try_from validation routine below pub struct BlockSize(pub u32); impl TryFrom for BlockSize { @@ -296,6 +297,12 @@ impl Into for BlockSize { } } +impl From for u64 { + fn from(bs: BlockSize) -> u64 { + bs.0 as u64 + } +} + impl JsonSchema for BlockSize { fn schema_name() -> String { "BlockSize".to_string() @@ -336,31 +343,40 @@ impl JsonSchema for BlockSize { } } +/// Different sources for a disk +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +#[serde(tag = "type")] +pub enum DiskSource { + /// Create a blank disk + Blank { + /// size of blocks for this Disk. valid values are: 512, 2048, or 4096 + block_size: BlockSize, + }, + /// Create a disk from a disk snapshot + Snapshot { snapshot_id: Uuid }, + /// Create a disk from a project image + Image { image_id: Uuid }, + /// Create a disk from a global image + GlobalImage { image_id: Uuid }, +} + /// Create-time parameters for a [`Disk`](omicron_common::api::external::Disk) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct DiskCreate { /// common identifying metadata #[serde(flatten)] pub identity: IdentityMetadataCreateParams, - /// id for snapshot from which the Disk should be created, if any - pub snapshot_id: Option, - /// id for image from which the Disk should be created, if any - pub image_id: Option, + /// initial source for this disk + pub disk_source: DiskSource, /// total size of the Disk in bytes pub size: ByteCount, - /// size of blocks for this Disk. valid values are: 512, 2048, or 4096 - pub block_size: BlockSize, } const EXTENT_SIZE: u32 = 1_u32 << 20; impl DiskCreate { - pub fn block_size(&self) -> ByteCount { - ByteCount::from(self.block_size.0) - } - - pub fn blocks_per_extent(&self) -> i64 { - EXTENT_SIZE as i64 / i64::from(self.block_size.0) + pub fn extent_size(&self) -> i64 { + EXTENT_SIZE as i64 } pub fn extent_count(&self) -> i64 { @@ -403,6 +419,9 @@ pub struct ImageCreate { #[serde(flatten)] pub identity: IdentityMetadataCreateParams, + /// block size in bytes + pub block_size: BlockSize, + /// The source of the image's contents. pub source: ImageSource, } @@ -444,10 +463,10 @@ mod test { name: Name::try_from("myobject".to_string()).unwrap(), description: "desc".to_string(), }, - snapshot_id: None, - image_id: None, + disk_source: DiskSource::Blank { + block_size: BlockSize::try_from(4096).unwrap(), + }, size, - block_size: BlockSize::try_from(4096).unwrap(), } } @@ -472,13 +491,24 @@ mod test { assert_eq!(2, params.extent_count()); // Mostly just checking we don't blow up on an unwrap here. - let params = + let _params = new_disk_create_params(ByteCount::try_from(i64::MAX).unwrap()); + + // Note that i64::MAX bytes is an invalid disk size as it's not + // divisible by 4096. + let max_disk_size = i64::MAX - (i64::MAX % 4096); + let params = + new_disk_create_params(ByteCount::try_from(max_disk_size).unwrap()); + let block_size: u64 = 4096; + let blocks_per_extent: u64 = params.extent_size() as u64 / block_size; + assert_eq!(params.extent_count() as u64, 8796093022208_u64); + + // Assert that the regions allocated will fit this disk assert!( - params.size.to_bytes() + params.size.to_bytes() as u64 <= (params.extent_count() as u64) - * (params.blocks_per_extent() as u64) - * params.block_size().to_bytes() + * blocks_per_extent + * block_size ); } } diff --git a/nexus/src/external_api/views.rs b/nexus/src/external_api/views.rs index 5fc4b22a39..5891974b94 100644 --- a/nexus/src/external_api/views.rs +++ b/nexus/src/external_api/views.rs @@ -9,8 +9,8 @@ use crate::db::identity::{Asset, Resource}; use crate::db::model; use api_identity::ObjectIdentity; use omicron_common::api::external::{ - ByteCount, IdentityMetadata, Ipv4Net, Ipv6Net, Name, ObjectIdentity, - RoleName, + ByteCount, Digest, IdentityMetadata, Ipv4Net, Ipv6Net, Name, + ObjectIdentity, RoleName, }; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -75,14 +75,50 @@ impl From for Project { // IMAGES -/// Client view of Images +/// Client view of global Images +#[derive(ObjectIdentity, Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct GlobalImage { + #[serde(flatten)] + pub identity: IdentityMetadata, + + /// URL source of this image, if any + pub url: Option, + + /// Version of this, if any + pub version: Option, + + /// Hash of the image contents, if applicable + pub digest: Option, + + /// size of blocks in bytes + pub block_size: ByteCount, + + /// total size in bytes + pub size: ByteCount, +} + +/// Client view of project Images #[derive(ObjectIdentity, Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct Image { #[serde(flatten)] pub identity: IdentityMetadata, - pub project_id: Option, + /// The project the disk belongs to + pub project_id: Uuid, + + /// URL source of this image, if any pub url: Option, + + /// Version of this, if any + pub version: Option, + + /// Hash of the image contents, if applicable + pub digest: Option, + + /// size of blocks in bytes + pub block_size: ByteCount, + + /// total size in bytes pub size: ByteCount, } diff --git a/nexus/src/nexus.rs b/nexus/src/nexus.rs index 98caab8a5f..365d3c08e3 100644 --- a/nexus/src/nexus.rs +++ b/nexus/src/nexus.rs @@ -76,6 +76,7 @@ use std::net::SocketAddr; use std::net::SocketAddrV6; use std::num::NonZeroU32; use std::path::Path; +use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use steno::SagaId; @@ -977,33 +978,70 @@ impl Nexus { .lookup_for(authz::Action::CreateChild) .await?; - // Reject disks where the block size doesn't evenly divide the total - // size - if (params.size.to_bytes() % params.block_size().to_bytes()) != 0 { - return Err(Error::InvalidValue { - label: String::from("size and block_size"), - message: String::from( - "total size must be a multiple of block size", - ), - }); - } - - // Until we implement snapshots, do not allow disks to be created from a - // snapshot. - if params.snapshot_id.is_some() { - return Err(Error::InvalidValue { - label: String::from("snapshot_id"), - message: String::from("snapshots are not yet supported"), - }); - } + match ¶ms.disk_source { + params::DiskSource::Blank { block_size } => { + // Reject disks where the block size doesn't evenly divide the + // total size + if (params.size.to_bytes() % block_size.0 as u64) != 0 { + return Err(Error::InvalidValue { + label: String::from("size and block_size"), + message: String::from( + "total size must be a multiple of block size", + ), + }); + } + } + params::DiskSource::Snapshot { snapshot_id: _ } => { + // Until we implement snapshots, do not allow disks to be + // created from a snapshot. + return Err(Error::InvalidValue { + label: String::from("snapshot"), + message: String::from("snapshots are not yet supported"), + }); + } + params::DiskSource::Image { image_id: _ } => { + // Until we implement project images, do not allow disks to be + // created from a project image. + return Err(Error::InvalidValue { + label: String::from("image"), + message: String::from( + "project image are not yet supported", + ), + }); + } + params::DiskSource::GlobalImage { image_id } => { + let (.., db_global_image) = + LookupPath::new(opctx, &self.db_datastore) + .global_image_id(*image_id) + .fetch() + .await?; + + // Reject disks where the block size doesn't evenly divide the + // total size + if (params.size.to_bytes() + % db_global_image.block_size.to_bytes() as u64) + != 0 + { + return Err(Error::InvalidValue { + label: String::from("size and block_size"), + message: String::from( + "total size must be a multiple of global image's block size", + ), + }); + } - // Until we implement images, do not allow disks to be created from an - // image. - if params.image_id.is_some() { - return Err(Error::InvalidValue { - label: String::from("image_id"), - message: String::from("images are not yet supported"), - }); + // If the size of the image is greater than the size of the + // disk, return an error. + if db_global_image.size.to_bytes() > params.size.to_bytes() { + return Err(Error::invalid_request( + &format!( + "disk size {} must be greater than or equal to image size {}", + params.size.to_bytes(), + db_global_image.size.to_bytes(), + ), + )); + } + } } let saga_params = Arc::new(sagas::ParamsDiskCreate { @@ -1068,35 +1106,156 @@ impl Nexus { Ok(()) } - pub async fn images_list( + pub async fn global_images_list( &self, opctx: &OpContext, - _pagparams: &DataPageParams<'_, Name>, - ) -> ListResultVec { - Err(self.unimplemented_todo(opctx, Unimpl::Public).await) + pagparams: &DataPageParams<'_, Name>, + ) -> ListResultVec { + self.db_datastore.global_image_list_images(opctx, pagparams).await } - pub async fn image_create( + pub async fn global_image_create( self: &Arc, opctx: &OpContext, - _params: ¶ms::ImageCreate, - ) -> CreateResult { - Err(self.unimplemented_todo(opctx, Unimpl::Public).await) + params: ¶ms::ImageCreate, + ) -> CreateResult { + let new_image = match ¶ms.source { + params::ImageSource::Url(url) => { + let db_block_size = db::model::BlockSize::try_from( + params.block_size, + ) + .map_err(|e| Error::InvalidValue { + label: String::from("block_size"), + message: format!("block_size is invalid: {}", e), + })?; + + let volume_construction_request = sled_agent_client::types::VolumeConstructionRequest::Volume { + block_size: db_block_size.to_bytes().into(), + sub_volumes: vec![ + sled_agent_client::types::VolumeConstructionRequest::Url { + block_size: db_block_size.to_bytes().into(), + url: url.clone(), + } + ], + read_only_parent: None, + }; + + let volume_data = + serde_json::to_string(&volume_construction_request)?; + + // use reqwest to query url for size + let response = + reqwest::Client::new().head(url).send().await.map_err( + |e| Error::InvalidValue { + label: String::from("url"), + message: format!("error querying url: {}", e), + }, + )?; + + if !response.status().is_success() { + return Err(Error::InvalidValue { + label: String::from("url"), + message: format!( + "querying url returned: {}", + response.status() + ), + }); + } + + // grab total size from content length + let content_length = response + .headers() + .get(reqwest::header::CONTENT_LENGTH) + .ok_or("no content length!") + .map_err(|e| Error::InvalidValue { + label: String::from("url"), + message: format!("error querying url: {}", e), + })?; + + let total_size = + u64::from_str(content_length.to_str().map_err(|e| { + Error::InvalidValue { + label: String::from("url"), + message: format!("content length invalid: {}", e), + } + })?) + .map_err(|e| { + Error::InvalidValue { + label: String::from("url"), + message: format!("content length invalid: {}", e), + } + })?; + + let size: external::ByteCount = total_size.try_into().map_err( + |e: external::ByteCountRangeError| Error::InvalidValue { + label: String::from("size"), + message: format!("total size is invalid: {}", e), + }, + )?; + + // validate total size is divisible by block size + let block_size: u64 = params.block_size.into(); + if (size.to_bytes() % block_size) != 0 { + return Err(Error::InvalidValue { + label: String::from("size"), + message: format!( + "total size {} must be divisible by block size {}", + size.to_bytes(), + block_size + ), + }); + } + + // for images backed by a url, store the ETag as the version + let etag = response + .headers() + .get(reqwest::header::ETAG) + .map(|x| x.to_str().ok()) + .flatten() + .map(|x| x.to_string()); + + let new_image_volume = + db::model::Volume::new(Uuid::new_v4(), volume_data); + let volume = + self.db_datastore.volume_create(new_image_volume).await?; + + db::model::GlobalImage { + identity: db::model::GlobalImageIdentity::new( + Uuid::new_v4(), + params.identity.clone(), + ), + volume_id: volume.id(), + url: Some(url.clone()), + version: etag, + digest: None, // not computed for URL type + block_size: db_block_size, + size: size.into(), + } + } + + params::ImageSource::Snapshot(_id) => { + return Err(Error::unavail( + &"creating images from snapshots not supported", + )); + } + }; + + self.db_datastore.global_image_create_image(opctx, new_image).await } - pub async fn image_fetch( + pub async fn global_image_fetch( &self, opctx: &OpContext, image_name: &Name, - ) -> LookupResult { - let lookup_type = LookupType::ByName(image_name.to_string()); - let error = lookup_type.into_not_found(ResourceType::Image); - Err(self - .unimplemented_todo(opctx, Unimpl::ProtectedLookup(error)) - .await) + ) -> LookupResult { + let (.., db_disk) = LookupPath::new(opctx, &self.db_datastore) + .global_image_name(image_name) + .fetch() + .await?; + Ok(db_disk) } - pub async fn image_delete( + pub async fn global_image_delete( self: &Arc, opctx: &OpContext, image_name: &Name, diff --git a/nexus/src/sagas.rs b/nexus/src/sagas.rs index 8f46710815..5e2447dd37 100644 --- a/nexus/src/sagas.rs +++ b/nexus/src/sagas.rs @@ -1109,12 +1109,53 @@ async fn sdc_create_disk_record( // but this should be acceptable because the disk remains in a "Creating" // state until the saga has completed. let volume_id = sagactx.lookup::("volume_id")?; + let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); + + let block_size: db::model::BlockSize = match ¶ms + .create_params + .disk_source + { + params::DiskSource::Blank { block_size } => { + db::model::BlockSize::try_from(*block_size).map_err(|e| { + ActionError::action_failed(Error::internal_error( + &e.to_string(), + )) + })? + } + params::DiskSource::Snapshot { snapshot_id: _ } => { + // Until we implement snapshots, do not allow disks to be + // created from a snapshot. + return Err(ActionError::action_failed(Error::InvalidValue { + label: String::from("snapshot"), + message: String::from("snapshots are not yet supported"), + })); + } + params::DiskSource::Image { image_id: _ } => { + // Until we implement project images, do not allow disks to be + // created from a project image. + return Err(ActionError::action_failed(Error::InvalidValue { + label: String::from("image"), + message: String::from("project image are not yet supported"), + })); + } + params::DiskSource::GlobalImage { image_id } => { + let (.., global_image) = + LookupPath::new(&opctx, &osagactx.datastore()) + .global_image_id(*image_id) + .fetch() + .await + .map_err(ActionError::action_failed)?; + + global_image.block_size + } + }; let disk = db::model::Disk::new( disk_id, params.project_id, volume_id, params.create_params.clone(), + block_size, db::model::DiskRuntimeState::new(), ) .map_err(|e| { @@ -1157,9 +1198,10 @@ async fn sdc_alloc_regions( // https://github.com/oxidecomputer/omicron/issues/613 , we // should consider using a paginated API to access regions, rather than // returning all of them at once. + let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); let datasets_and_regions = osagactx .datastore() - .region_allocate(volume_id, ¶ms.create_params) + .region_allocate(&opctx, volume_id, ¶ms.create_params) .await .map_err(ActionError::action_failed)?; Ok(datasets_and_regions) @@ -1291,6 +1333,75 @@ async fn sdc_regions_ensure( let block_size = datasets_and_regions[0].1.block_size; + // If requested, back disk by image + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params(); + let log = osagactx.log(); + let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); + + let read_only_parent: Option< + Box, + > = match ¶ms.create_params.disk_source { + params::DiskSource::Blank { block_size: _ } => None, + params::DiskSource::Snapshot { snapshot_id: _ } => { + // Until we implement snapshots, do not allow disks to be + // created from a snapshot. + return Err(ActionError::action_failed(Error::InvalidValue { + label: String::from("snapshot"), + message: String::from("snapshots are not yet supported"), + })); + } + params::DiskSource::Image { image_id: _ } => { + // Until we implement project images, do not allow disks to be + // created from a project image. + return Err(ActionError::action_failed(Error::InvalidValue { + label: String::from("image"), + message: String::from("project image are not yet supported"), + })); + } + params::DiskSource::GlobalImage { image_id } => { + warn!(log, "grabbing image {}", image_id); + + let (.., global_image) = + LookupPath::new(&opctx, &osagactx.datastore()) + .global_image_id(*image_id) + .fetch() + .await + .map_err(ActionError::action_failed)?; + + debug!(log, "retrieved global image {}", global_image.id()); + + debug!( + log, + "grabbing global image {} volume {}", + global_image.id(), + global_image.volume_id + ); + + let volume = osagactx + .datastore() + .volume_get(global_image.volume_id) + .await + .map_err(ActionError::action_failed)?; + + debug!( + log, + "grabbed volume {}, with data {}", + volume.id(), + volume.data() + ); + + Some(Box::new(serde_json::from_str(volume.data()).map_err( + |e| { + ActionError::action_failed(Error::internal_error(&format!( + "failed to deserialize volume data: {}", + e, + ))) + }, + )?)) + } + }; + // Store volume details in db let mut rng = StdRng::from_entropy(); let volume_construction_request = @@ -1335,7 +1446,7 @@ async fn sdc_regions_ensure( }, }, ], - read_only_parent: None, + read_only_parent, }; let volume_data = serde_json::to_string(&volume_construction_request) diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 015883a29d..6df0329a19 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -132,10 +132,10 @@ pub async fn create_disk( name: disk_name.parse().unwrap(), description: String::from("sells rainsticks"), }, - snapshot_id: None, - image_id: None, + disk_source: params::DiskSource::Blank { + block_size: params::BlockSize::try_from(512).unwrap(), + }, size: ByteCount::from_gibibytes_u32(1), - block_size: params::BlockSize::try_from(512).unwrap(), }, ) .await diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index f9a0979012..a68504681b 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -267,10 +267,10 @@ async fn test_disk_create_disk_that_already_exists_fails( name: DISK_NAME.parse().unwrap(), description: String::from("sells rainsticks"), }, - snapshot_id: None, - image_id: None, + disk_source: params::DiskSource::Blank { + block_size: params::BlockSize::try_from(512).unwrap(), + }, size: ByteCount::from_gibibytes_u32(1), - block_size: params::BlockSize::try_from(512).unwrap(), }; let _ = create_disk(&client, ORG_NAME, PROJECT_NAME, DISK_NAME).await; let disk_url = format!("{}/{}", disks_url, DISK_NAME); @@ -648,10 +648,10 @@ async fn test_disk_region_creation_failure( name: DISK_NAME.parse().unwrap(), description: String::from("sells rainsticks"), }, - snapshot_id: None, - image_id: None, + disk_source: params::DiskSource::Blank { + block_size: params::BlockSize::try_from(512).unwrap(), + }, size: disk_size, - block_size: params::BlockSize::try_from(512).unwrap(), }; // Unfortunately, the error message is only posted internally to the @@ -720,10 +720,10 @@ async fn test_disk_invalid_block_size_rejected( name: DISK_NAME.parse().unwrap(), description: String::from("sells rainsticks"), }, - snapshot_id: None, - image_id: None, + disk_source: params::DiskSource::Blank { + block_size: params::BlockSize(1024), + }, size: disk_size, - block_size: params::BlockSize(1024), }; NexusRequest::new( @@ -764,10 +764,10 @@ async fn test_disk_reject_total_size_not_divisible_by_block_size( name: DISK_NAME.parse().unwrap(), description: String::from("sells rainsticks"), }, - snapshot_id: None, - image_id: None, + disk_source: params::DiskSource::Blank { + block_size: params::BlockSize::try_from(512).unwrap(), + }, size: disk_size, - block_size: params::BlockSize::try_from(512).unwrap(), }; NexusRequest::new( diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 0cac4b1ca9..d79af1e519 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -159,10 +159,8 @@ lazy_static! { name: DEMO_DISK_NAME.clone(), description: "".parse().unwrap(), }, - snapshot_id: None, - image_id: None, + disk_source: params::DiskSource::Blank { block_size: params::BlockSize::try_from(4096).unwrap() }, size: ByteCount::from_gibibytes_u32(16), - block_size: params::BlockSize::try_from(4096).unwrap(), }; // Instance used for testing @@ -215,10 +213,8 @@ lazy_static! { ip: None, }; - // Images + // Project Images pub static ref DEMO_IMAGE_NAME: Name = "demo-image".parse().unwrap(); - pub static ref DEMO_IMAGE_URL: String = - format!("/images/{}", *DEMO_IMAGE_NAME); pub static ref DEMO_PROJECT_IMAGE_URL: String = format!("{}/{}", *DEMO_PROJECT_URL_IMAGES, *DEMO_IMAGE_NAME); pub static ref DEMO_IMAGE_CREATE: params::ImageCreate = @@ -227,9 +223,14 @@ lazy_static! { name: DEMO_IMAGE_NAME.clone(), description: String::from(""), }, - source: params::ImageSource::Url(String::from("dummy")) + source: params::ImageSource::Url(String::from("http://127.0.0.1:5555/image.raw")), + block_size: params::BlockSize::try_from(4096).unwrap(), }; + // Global Images + pub static ref DEMO_GLOBAL_IMAGE_URL: String = + format!("/images/{}", *DEMO_IMAGE_NAME); + // Snapshots pub static ref DEMO_SNAPSHOT_NAME: Name = "demo-snapshot".parse().unwrap(); pub static ref DEMO_SNAPSHOT_URL: String = @@ -848,23 +849,24 @@ lazy_static! { )], }, - /* Images */ + /* Global Images */ VerifyEndpoint { url: "/images", visibility: Visibility::Public, allowed_methods: vec![ - AllowedMethod::GetUnimplemented, + AllowedMethod::Get, AllowedMethod::Post( serde_json::to_value(&*DEMO_IMAGE_CREATE).unwrap() ), ], }, + VerifyEndpoint { - url: &*DEMO_IMAGE_URL, + url: &*DEMO_GLOBAL_IMAGE_URL, visibility: Visibility::Protected, allowed_methods: vec![ - AllowedMethod::GetUnimplemented, + AllowedMethod::Get, AllowedMethod::Delete, ], }, diff --git a/nexus/tests/integration_tests/images.rs b/nexus/tests/integration_tests/images.rs new file mode 100644 index 0000000000..cbb8b02479 --- /dev/null +++ b/nexus/tests/integration_tests/images.rs @@ -0,0 +1,383 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Tests images support in the API + +use http::method::Method; +use http::StatusCode; +use nexus_test_utils::http_testing::AuthnMode; +use nexus_test_utils::http_testing::NexusRequest; +use nexus_test_utils::http_testing::RequestBuilder; +use nexus_test_utils::resource_helpers::create_organization; +use nexus_test_utils::resource_helpers::create_project; +use nexus_test_utils::resource_helpers::DiskTest; +use nexus_test_utils::ControlPlaneTestContext; +use nexus_test_utils_macros::nexus_test; + +use omicron_common::api::external::{ByteCount, IdentityMetadataCreateParams}; +use omicron_nexus::external_api::params; +use omicron_nexus::external_api::views::GlobalImage; + +use httptest::{matchers::*, responders::*, Expectation, ServerBuilder}; + +#[nexus_test] +async fn test_global_image_create(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + DiskTest::new(&cptestctx).await; + + let server = ServerBuilder::new().run().unwrap(); + server.expect( + Expectation::matching(request::method_path("HEAD", "/image.raw")) + .times(1..) + .respond_with( + status_code(200).append_header( + "Content-Length", + format!("{}", 4096 * 1000), + ), + ), + ); + + // No global images yet + let global_images: Vec = + NexusRequest::iter_collection_authn(client, "/images", "", None) + .await + .expect("failed to list images") + .all_items; + + assert_eq!(global_images.len(), 0); + + // Create one! + let image_create_params = params::ImageCreate { + identity: IdentityMetadataCreateParams { + name: "alpine-edge".parse().unwrap(), + description: String::from( + "you can boot any image, as long as it's alpine", + ), + }, + source: params::ImageSource::Url(server.url("/image.raw").to_string()), + block_size: params::BlockSize::try_from(512).unwrap(), + }; + + NexusRequest::objects_post(client, "/images", &image_create_params) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap(); + + // Verify one global image + let global_images: Vec = + NexusRequest::iter_collection_authn(client, "/images", "", None) + .await + .expect("failed to list images") + .all_items; + + assert_eq!(global_images.len(), 1); + assert_eq!(global_images[0].identity.name, "alpine-edge"); +} + +#[nexus_test] +async fn test_global_image_create_url_404(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + DiskTest::new(&cptestctx).await; + + let server = ServerBuilder::new().run().unwrap(); + server.expect( + Expectation::matching(request::method_path("HEAD", "/image.raw")) + .times(1..) + .respond_with(status_code(404)), + ); + + let image_create_params = params::ImageCreate { + identity: IdentityMetadataCreateParams { + name: "alpine-edge".parse().unwrap(), + description: String::from( + "you can boot any image, as long as it's alpine", + ), + }, + source: params::ImageSource::Url(server.url("/image.raw").to_string()), + block_size: params::BlockSize::try_from(512).unwrap(), + }; + + let error = NexusRequest::new( + RequestBuilder::new(client, Method::POST, &"/images") + .body(Some(&image_create_params)) + .expect_status(Some(StatusCode::BAD_REQUEST)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("unexpected success") + .parsed_body::() + .unwrap(); + assert_eq!( + error.message, + format!("unsupported value for \"url\": querying url returned: 404 Not Found") + ); +} + +#[nexus_test] +async fn test_global_image_create_bad_url(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + DiskTest::new(&cptestctx).await; + + let image_create_params = params::ImageCreate { + identity: IdentityMetadataCreateParams { + name: "alpine-edge".parse().unwrap(), + description: String::from( + "you can boot any image, as long as it's alpine", + ), + }, + source: params::ImageSource::Url("not_a_url".to_string()), + block_size: params::BlockSize::try_from(512).unwrap(), + }; + + let error = NexusRequest::new( + RequestBuilder::new(client, Method::POST, &"/images") + .body(Some(&image_create_params)) + .expect_status(Some(StatusCode::BAD_REQUEST)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("unexpected success") + .parsed_body::() + .unwrap(); + assert_eq!( + error.message, + format!("unsupported value for \"url\": error querying url: builder error: relative URL without a base") + ); +} + +#[nexus_test] +async fn test_global_image_create_bad_content_length( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + DiskTest::new(&cptestctx).await; + + let server = ServerBuilder::new().run().unwrap(); + server.expect( + Expectation::matching(request::method_path("HEAD", "/image.raw")) + .times(1..) + .respond_with( + status_code(200).append_header("Content-Length", "bad"), + ), + ); + + let image_create_params = params::ImageCreate { + identity: IdentityMetadataCreateParams { + name: "alpine-edge".parse().unwrap(), + description: String::from( + "you can boot any image, as long as it's alpine", + ), + }, + source: params::ImageSource::Url(server.url("/image.raw").to_string()), + block_size: params::BlockSize::try_from(512).unwrap(), + }; + + let error = NexusRequest::new( + RequestBuilder::new(client, Method::POST, &"/images") + .body(Some(&image_create_params)) + .expect_status(Some(StatusCode::BAD_REQUEST)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("unexpected success") + .parsed_body::() + .unwrap(); + assert_eq!( + error.message, + format!("unsupported value for \"url\": content length invalid: invalid digit found in string") + ); +} + +#[nexus_test] +async fn test_global_image_create_bad_image_size( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + DiskTest::new(&cptestctx).await; + + let server = ServerBuilder::new().run().unwrap(); + server.expect( + Expectation::matching(request::method_path("HEAD", "/image.raw")) + .times(1..) + .respond_with(status_code(200).append_header( + "Content-Length", + format!("{}", 4096 * 1000 + 100), + )), + ); + + let image_create_params = params::ImageCreate { + identity: IdentityMetadataCreateParams { + name: "alpine-edge".parse().unwrap(), + description: String::from( + "you can boot any image, as long as it's alpine", + ), + }, + source: params::ImageSource::Url(server.url("/image.raw").to_string()), + block_size: params::BlockSize::try_from(512).unwrap(), + }; + + let error = NexusRequest::new( + RequestBuilder::new(client, Method::POST, &"/images") + .body(Some(&image_create_params)) + .expect_status(Some(StatusCode::BAD_REQUEST)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("unexpected success") + .parsed_body::() + .unwrap(); + assert_eq!( + error.message, + format!("unsupported value for \"size\": total size {} must be divisible by block size {}", 4096*1000 + 100, 512) + ); +} + +#[nexus_test] +async fn test_make_disk_from_global_image(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + DiskTest::new(&cptestctx).await; + + let server = ServerBuilder::new().run().unwrap(); + server.expect( + Expectation::matching(request::method_path("HEAD", "/alpine/edge.raw")) + .times(1..) + .respond_with( + status_code(200).append_header( + "Content-Length", + format!("{}", 4096 * 1000), + ), + ), + ); + + let image_create_params = params::ImageCreate { + identity: IdentityMetadataCreateParams { + name: "alpine-edge".parse().unwrap(), + description: String::from( + "you can boot any image, as long as it's alpine", + ), + }, + source: params::ImageSource::Url( + server.url("/alpine/edge.raw").to_string(), + ), + block_size: params::BlockSize::try_from(512).unwrap(), + }; + + let alpine_image: GlobalImage = + NexusRequest::objects_post(client, "/images", &image_create_params) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + create_organization(&client, "myorg").await; + create_project(client, "myorg", "myproj").await; + + let new_disk = params::DiskCreate { + identity: IdentityMetadataCreateParams { + name: "disk".parse().unwrap(), + description: String::from("sells rainsticks"), + }, + disk_source: params::DiskSource::GlobalImage { + image_id: alpine_image.identity.id, + }, + size: ByteCount::from_gibibytes_u32(1), + }; + + NexusRequest::objects_post( + client, + "/organizations/myorg/projects/myproj/disks", + &new_disk, + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap(); +} + +#[nexus_test] +async fn test_make_disk_from_global_image_too_small( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + DiskTest::new(&cptestctx).await; + + let server = ServerBuilder::new().run().unwrap(); + server.expect( + Expectation::matching(request::method_path("HEAD", "/alpine/edge.raw")) + .times(1..) + .respond_with( + status_code(200).append_header( + "Content-Length", + format!("{}", 4096 * 1000), + ), + ), + ); + + let image_create_params = params::ImageCreate { + identity: IdentityMetadataCreateParams { + name: "alpine-edge".parse().unwrap(), + description: String::from( + "you can boot any image, as long as it's alpine", + ), + }, + source: params::ImageSource::Url( + server.url("/alpine/edge.raw").to_string(), + ), + block_size: params::BlockSize::try_from(512).unwrap(), + }; + + let alpine_image: GlobalImage = + NexusRequest::objects_post(client, "/images", &image_create_params) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + create_organization(&client, "myorg").await; + create_project(client, "myorg", "myproj").await; + + let new_disk = params::DiskCreate { + identity: IdentityMetadataCreateParams { + name: "disk".parse().unwrap(), + description: String::from("sells rainsticks"), + }, + disk_source: params::DiskSource::GlobalImage { + image_id: alpine_image.identity.id, + }, + size: ByteCount::from(4096 * 500), + }; + + let error = NexusRequest::new( + RequestBuilder::new( + client, + Method::POST, + &"/organizations/myorg/projects/myproj/disks", + ) + .body(Some(&new_disk)) + .expect_status(Some(StatusCode::BAD_REQUEST)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("unexpected success") + .parsed_body::() + .unwrap(); + assert_eq!( + error.message, + format!( + "disk size {} must be greater than or equal to image size {}", + 4096 * 500, + 4096 * 1000, + ) + ); +} diff --git a/nexus/tests/integration_tests/mod.rs b/nexus/tests/integration_tests/mod.rs index 6c9ba5a904..0697b343a3 100644 --- a/nexus/tests/integration_tests/mod.rs +++ b/nexus/tests/integration_tests/mod.rs @@ -9,6 +9,7 @@ mod commands; mod console_api; mod datasets; mod disks; +mod images; mod instances; mod organizations; mod oximeter; diff --git a/nexus/tests/integration_tests/unauthorized.rs b/nexus/tests/integration_tests/unauthorized.rs index ff5fc64787..80da08f1c5 100644 --- a/nexus/tests/integration_tests/unauthorized.rs +++ b/nexus/tests/integration_tests/unauthorized.rs @@ -11,6 +11,7 @@ use dropshot::HttpErrorResponseBody; use headers::authorization::Credentials; use http::method::Method; use http::StatusCode; +use httptest::{matchers::*, responders::*, Expectation, ServerBuilder}; use lazy_static::lazy_static; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; @@ -54,6 +55,24 @@ async fn test_unauthorized(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; let log = &cptestctx.logctx.log; + // Run a httptest server + let server = ServerBuilder::new() + .bind_addr("127.0.0.1:5555".parse().unwrap()) + .run() + .unwrap(); + + // Fake some data + server.expect( + Expectation::matching(request::method_path("HEAD", "/image.raw")) + .times(1..) + .respond_with( + status_code(200).append_header( + "Content-Length", + format!("{}", 4096 * 1000), + ), + ), + ); + // Create test data. info!(log, "setting up resource hierarchy"); for request in &*SETUP_REQUESTS { @@ -169,6 +188,11 @@ lazy_static! { url: &*DEMO_PROJECT_URL_INSTANCES, body: serde_json::to_value(&*DEMO_INSTANCE_CREATE).unwrap(), }, + // Create a GlobalImage + SetupReq { + url: "/images", + body: serde_json::to_value(&*DEMO_IMAGE_CREATE).unwrap(), + } ]; } diff --git a/openapi/nexus.json b/openapi/nexus.json index 4fd7f6787a..c5a0966fd1 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -256,7 +256,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ImageResultsPage" + "$ref": "#/components/schemas/GlobalImageResultsPage" } } } @@ -293,7 +293,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Image" + "$ref": "#/components/schemas/GlobalImage" } } } @@ -332,7 +332,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Image" + "$ref": "#/components/schemas/GlobalImage" } } } @@ -4779,6 +4779,22 @@ "HistogramF64" ] }, + "Digest": { + "oneOf": [ + { + "type": "object", + "properties": { + "Sha256": { + "type": "string" + } + }, + "required": [ + "Sha256" + ], + "additionalProperties": false + } + ] + }, "Disk": { "description": "Client view of an [`Disk`]", "type": "object", @@ -4854,23 +4870,17 @@ "description": "Create-time parameters for a [`Disk`](omicron_common::api::external::Disk)", "type": "object", "properties": { - "block_size": { - "description": "size of blocks for this Disk. valid values are: 512, 2048, or 4096", + "description": { + "type": "string" + }, + "disk_source": { + "description": "initial source for this disk", "allOf": [ { - "$ref": "#/components/schemas/BlockSize" + "$ref": "#/components/schemas/DiskSource" } ] }, - "description": { - "type": "string" - }, - "image_id": { - "nullable": true, - "description": "id for image from which the Disk should be created, if any", - "type": "string", - "format": "uuid" - }, "name": { "$ref": "#/components/schemas/Name" }, @@ -4881,17 +4891,11 @@ "$ref": "#/components/schemas/ByteCount" } ] - }, - "snapshot_id": { - "nullable": true, - "description": "id for snapshot from which the Disk should be created, if any", - "type": "string", - "format": "uuid" } }, "required": [ - "block_size", "description", + "disk_source", "name", "size" ] @@ -4929,6 +4933,95 @@ "items" ] }, + "DiskSource": { + "description": "Different sources for a disk", + "oneOf": [ + { + "description": "Create a blank disk", + "type": "object", + "properties": { + "block_size": { + "description": "size of blocks for this Disk. valid values are: 512, 2048, or 4096", + "allOf": [ + { + "$ref": "#/components/schemas/BlockSize" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "Blank" + ] + } + }, + "required": [ + "block_size", + "type" + ] + }, + { + "description": "Create a disk from a disk snapshot", + "type": "object", + "properties": { + "snapshot_id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "Snapshot" + ] + } + }, + "required": [ + "snapshot_id", + "type" + ] + }, + { + "description": "Create a disk from a project image", + "type": "object", + "properties": { + "image_id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "Image" + ] + } + }, + "required": [ + "image_id", + "type" + ] + }, + { + "description": "Create a disk from a global image", + "type": "object", + "properties": { + "image_id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "GlobalImage" + ] + } + }, + "required": [ + "image_id", + "type" + ] + } + ] + }, "DiskState": { "description": "State of a Disk (primarily: attached or not)", "oneOf": [ @@ -5112,14 +5205,129 @@ "Bool" ] }, + "GlobalImage": { + "description": "Client view of global Images", + "type": "object", + "properties": { + "block_size": { + "description": "size of blocks in bytes", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + }, + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "digest": { + "nullable": true, + "description": "Hash of the image contents, if applicable", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "size": { + "description": "total size in bytes", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + }, + "url": { + "nullable": true, + "description": "URL source of this image, if any", + "type": "string" + }, + "version": { + "nullable": true, + "description": "Version of this, if any", + "type": "string" + } + }, + "required": [ + "block_size", + "description", + "id", + "name", + "size", + "time_created", + "time_modified" + ] + }, + "GlobalImageResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/GlobalImage" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, "Image": { - "description": "Client view of Images", + "description": "Client view of project Images", "type": "object", "properties": { + "block_size": { + "description": "size of blocks in bytes", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + }, "description": { "description": "human-readable free-form text about a resource", "type": "string" }, + "digest": { + "nullable": true, + "description": "Hash of the image contents, if applicable", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, "id": { "description": "unique, immutable, system-controlled identifier for each resource", "type": "string", @@ -5134,12 +5342,17 @@ ] }, "project_id": { - "nullable": true, + "description": "The project the disk belongs to", "type": "string", "format": "uuid" }, "size": { - "$ref": "#/components/schemas/ByteCount" + "description": "total size in bytes", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] }, "time_created": { "description": "timestamp when this resource was created", @@ -5153,13 +5366,21 @@ }, "url": { "nullable": true, + "description": "URL source of this image, if any", + "type": "string" + }, + "version": { + "nullable": true, + "description": "Version of this, if any", "type": "string" } }, "required": [ + "block_size", "description", "id", "name", + "project_id", "size", "time_created", "time_modified" @@ -5169,6 +5390,14 @@ "description": "Create-time parameters for an [`Image`](omicron_common::api::external::Image)", "type": "object", "properties": { + "block_size": { + "description": "block size in bytes", + "allOf": [ + { + "$ref": "#/components/schemas/BlockSize" + } + ] + }, "description": { "type": "string" }, @@ -5185,6 +5414,7 @@ } }, "required": [ + "block_size", "description", "name", "source" @@ -5379,23 +5609,17 @@ "description": "During instance creation, create and attach disks", "type": "object", "properties": { - "block_size": { - "description": "size of blocks for this Disk. valid values are: 512, 2048, or 4096", + "description": { + "type": "string" + }, + "disk_source": { + "description": "initial source for this disk", "allOf": [ { - "$ref": "#/components/schemas/BlockSize" + "$ref": "#/components/schemas/DiskSource" } ] }, - "description": { - "type": "string" - }, - "image_id": { - "nullable": true, - "description": "id for image from which the Disk should be created, if any", - "type": "string", - "format": "uuid" - }, "name": { "$ref": "#/components/schemas/Name" }, @@ -5407,12 +5631,6 @@ } ] }, - "snapshot_id": { - "nullable": true, - "description": "id for snapshot from which the Disk should be created, if any", - "type": "string", - "format": "uuid" - }, "type": { "type": "string", "enum": [ @@ -5421,8 +5639,8 @@ } }, "required": [ - "block_size", "description", + "disk_source", "name", "size", "type"