diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 3b05c58df3..312d400d2f 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -739,6 +739,7 @@ pub enum ResourceType { LoopbackAddress, SwitchPortSettings, IpPool, + IpPoolResource, InstanceNetworkInterface, PhysicalDisk, Rack, diff --git a/end-to-end-tests/src/bin/bootstrap.rs b/end-to-end-tests/src/bin/bootstrap.rs index 9ddd872bc2..21e59647ae 100644 --- a/end-to-end-tests/src/bin/bootstrap.rs +++ b/end-to-end-tests/src/bin/bootstrap.rs @@ -4,7 +4,8 @@ use end_to_end_tests::helpers::{generate_name, get_system_ip_pool}; use omicron_test_utils::dev::poll::{wait_for_condition, CondCheckError}; use oxide_client::types::{ ByteCount, DeviceAccessTokenRequest, DeviceAuthRequest, DeviceAuthVerify, - DiskCreate, DiskSource, IpRange, Ipv4Range, SiloQuotasUpdate, + DiskCreate, DiskSource, IpPoolCreate, IpPoolSiloLink, IpRange, Ipv4Range, + NameOrId, SiloQuotasUpdate, }; use oxide_client::{ ClientDisksExt, ClientHiddenExt, ClientProjectsExt, @@ -38,9 +39,27 @@ async fn main() -> Result<()> { // ===== CREATE IP POOL ===== // eprintln!("creating IP pool... {:?} - {:?}", first, last); + let pool_name = "default"; + client + .ip_pool_create() + .body(IpPoolCreate { + name: pool_name.parse().unwrap(), + description: "Default IP pool".to_string(), + }) + .send() + .await?; + client + .ip_pool_silo_link() + .pool(pool_name) + .body(IpPoolSiloLink { + silo: NameOrId::Name(params.silo_name().parse().unwrap()), + is_default: true, + }) + .send() + .await?; client .ip_pool_range_add() - .pool("default") + .pool(pool_name) .body(IpRange::V4(Ipv4Range { first, last })) .send() .await?; diff --git a/end-to-end-tests/src/helpers/ctx.rs b/end-to-end-tests/src/helpers/ctx.rs index 0132feafeb..e4bf61356c 100644 --- a/end-to-end-tests/src/helpers/ctx.rs +++ b/end-to-end-tests/src/helpers/ctx.rs @@ -287,6 +287,10 @@ impl ClientParams { .build()?; Ok(Client::new_with_client(&base_url, reqwest_client)) } + + pub fn silo_name(&self) -> String { + self.rss_config.recovery_silo.silo_name.to_string() + } } async fn wait_for_records( diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 8ad78af07b..bec1113151 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -5,8 +5,10 @@ //! Model types for IP Pools and the CIDR blocks therein. use crate::collection::DatastoreCollectionConfig; +use crate::impl_enum_type; use crate::schema::ip_pool; use crate::schema::ip_pool_range; +use crate::schema::ip_pool_resource; use crate::Name; use chrono::DateTime; use chrono::Utc; @@ -35,42 +37,23 @@ pub struct IpPool { /// Child resource generation number, for optimistic concurrency control of /// the contained ranges. pub rcgen: i64, - - /// Silo, if IP pool is associated with a particular silo. One special use - /// for this is associating a pool with the internal silo oxide-internal, - /// which is used for internal services. If there is no silo ID, the - /// pool is considered a fleet-wide pool and will be used for allocating - /// instance IPs in silos that don't have their own pool. - pub silo_id: Option, - - pub is_default: bool, } impl IpPool { - pub fn new( - pool_identity: &external::IdentityMetadataCreateParams, - silo_id: Option, - is_default: bool, - ) -> Self { + pub fn new(pool_identity: &external::IdentityMetadataCreateParams) -> Self { Self { identity: IpPoolIdentity::new( Uuid::new_v4(), pool_identity.clone(), ), rcgen: 0, - silo_id, - is_default, } } } impl From for views::IpPool { fn from(pool: IpPool) -> Self { - Self { - identity: pool.identity(), - silo_id: pool.silo_id, - is_default: pool.is_default, - } + Self { identity: pool.identity() } } } @@ -93,6 +76,37 @@ impl From for IpPoolUpdate { } } +impl_enum_type!( + #[derive(SqlType, Debug, Clone, Copy, QueryId)] + #[diesel(postgres_type(name = "ip_pool_resource_type"))] + pub struct IpPoolResourceTypeEnum; + + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, PartialEq)] + #[diesel(sql_type = IpPoolResourceTypeEnum)] + pub enum IpPoolResourceType; + + Silo => b"silo" +); + +#[derive(Queryable, Insertable, Selectable, Clone, Debug)] +#[diesel(table_name = ip_pool_resource)] +pub struct IpPoolResource { + pub ip_pool_id: Uuid, + pub resource_type: IpPoolResourceType, + pub resource_id: Uuid, + pub is_default: bool, +} + +impl From for views::IpPoolSilo { + fn from(assoc: IpPoolResource) -> Self { + Self { + ip_pool_id: assoc.ip_pool_id, + silo_id: assoc.resource_id, + is_default: assoc.is_default, + } + } +} + /// A range of IP addresses for an IP Pool. #[derive(Queryable, Insertable, Selectable, Clone, Debug)] #[diesel(table_name = ip_pool_range)] diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 791afa6de4..02bdd2c349 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -13,7 +13,7 @@ use omicron_common::api::external::SemverVersion; /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(22, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(23, 0, 1); table! { disk (id) { @@ -504,7 +504,14 @@ table! { time_modified -> Timestamptz, time_deleted -> Nullable, rcgen -> Int8, - silo_id -> Nullable, + } +} + +table! { + ip_pool_resource (ip_pool_id, resource_type, resource_id) { + ip_pool_id -> Uuid, + resource_type -> crate::IpPoolResourceTypeEnum, + resource_id -> Uuid, is_default -> Bool, } } @@ -1426,8 +1433,9 @@ allow_tables_to_appear_in_same_query!( ); joinable!(system_update_component_update -> component_update (component_update_id)); -allow_tables_to_appear_in_same_query!(ip_pool_range, ip_pool); +allow_tables_to_appear_in_same_query!(ip_pool_range, ip_pool, ip_pool_resource); joinable!(ip_pool_range -> ip_pool (ip_pool_id)); +joinable!(ip_pool_resource -> ip_pool (ip_pool_id)); allow_tables_to_appear_in_same_query!(inv_collection, inv_collection_error); joinable!(inv_collection_error -> inv_collection (inv_collection_id)); @@ -1478,6 +1486,11 @@ allow_tables_to_appear_in_same_query!( allow_tables_to_appear_in_same_query!(dns_zone, dns_version, dns_name); allow_tables_to_appear_in_same_query!(external_ip, service); +// used for query to check whether an IP pool association has any allocated IPs before deleting +allow_tables_to_appear_in_same_query!(external_ip, instance); +allow_tables_to_appear_in_same_query!(external_ip, project); +allow_tables_to_appear_in_same_query!(external_ip, ip_pool_resource); + allow_tables_to_appear_in_same_query!( switch_port, switch_port_settings_route_config diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 2adeebd819..02ce950118 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -76,22 +76,18 @@ impl DataStore { .fetch_for(authz::Action::CreateChild) .await?; - // If the named pool conflicts with user's current scope, i.e., - // if it has a silo and it's different from the current silo, - // then as far as IP allocation is concerned, that pool doesn't - // exist. If the pool has no silo, it's fleet-scoped and can - // always be used. - let authz_silo_id = opctx.authn.silo_required()?.id(); - if let Some(pool_silo_id) = pool.silo_id { - if pool_silo_id != authz_silo_id { - return Err(authz_pool.not_found()); - } + // If this pool is not linked to the current silo, 404 + if self.ip_pool_fetch_link(opctx, pool.id()).await.is_err() { + return Err(authz_pool.not_found()); } pool } // If no name given, use the default logic - None => self.ip_pools_fetch_default(&opctx).await?, + None => { + let (.., pool) = self.ip_pools_fetch_default(&opctx).await?; + pool + } }; let pool_id = pool.identity.id; @@ -147,36 +143,29 @@ impl DataStore { ) -> CreateResult { let ip_id = Uuid::new_v4(); - // See `allocate_instance_ephemeral_ip`: we're replicating - // its strucutre to prevent cross-silo pool access. - let pool_id = if let Some(name_or_id) = params.pool { - let (.., authz_pool, pool) = match name_or_id { - NameOrId::Name(name) => { - LookupPath::new(opctx, self) - .ip_pool_name(&Name(name)) - .fetch_for(authz::Action::CreateChild) - .await? - } - NameOrId::Id(id) => { - LookupPath::new(opctx, self) - .ip_pool_id(id) - .fetch_for(authz::Action::CreateChild) - .await? - } - }; - - let authz_silo_id = opctx.authn.silo_required()?.id(); - if let Some(pool_silo_id) = pool.silo_id { - if pool_silo_id != authz_silo_id { - return Err(authz_pool.not_found()); - } + // TODO: NameOrId resolution should happen a level higher, in the nexus function + let (.., authz_pool, pool) = match params.pool { + Some(NameOrId::Name(name)) => { + LookupPath::new(opctx, self) + .ip_pool_name(&Name(name)) + .fetch_for(authz::Action::Read) + .await? + } + Some(NameOrId::Id(id)) => { + LookupPath::new(opctx, self) + .ip_pool_id(id) + .fetch_for(authz::Action::Read) + .await? } + None => self.ip_pools_fetch_default(opctx).await?, + }; - pool - } else { - self.ip_pools_fetch_default(opctx).await? + let pool_id = pool.id(); + + // If this pool is not linked to the current silo, 404 + if self.ip_pool_fetch_link(opctx, pool_id).await.is_err() { + return Err(authz_pool.not_found()); } - .id(); let data = if let Some(ip) = params.address { IncompleteExternalIp::for_floating_explicit( diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 4497e3f2b4..f51f54d592 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -11,19 +11,27 @@ use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; use crate::db::error::public_error_from_diesel; +use crate::db::error::public_error_from_diesel_lookup; use crate::db::error::ErrorHandler; use crate::db::fixed_data::silo::INTERNAL_SILO_ID; use crate::db::identity::Resource; +use crate::db::model::ExternalIp; +use crate::db::model::IpKind; use crate::db::model::IpPool; use crate::db::model::IpPoolRange; +use crate::db::model::IpPoolResource; +use crate::db::model::IpPoolResourceType; use crate::db::model::IpPoolUpdate; use crate::db::model::Name; use crate::db::pagination::paginated; use crate::db::pool::DbConnection; use crate::db::queries::ip_pool::FilterOverlappingIpRanges; +use crate::db::TransactionError; +use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; +use diesel::result::Error as DieselError; use ipnetwork::IpNetwork; use nexus_types::external_api::shared::IpRange; use omicron_common::api::external::http_pagination::PaginatedBy; @@ -31,6 +39,7 @@ use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; +use omicron_common::api::external::InternalContext; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::LookupType; @@ -46,29 +55,110 @@ impl DataStore { opctx: &OpContext, pagparams: &PaginatedBy<'_>, ) -> ListResultVec { - use db::schema::ip_pool::dsl; + use db::schema::ip_pool; + use db::schema::ip_pool_resource; + opctx .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) .await?; match pagparams { PaginatedBy::Id(pagparams) => { - paginated(dsl::ip_pool, dsl::id, pagparams) + paginated(ip_pool::table, ip_pool::id, pagparams) + } + PaginatedBy::Name(pagparams) => paginated( + ip_pool::table, + ip_pool::name, + &pagparams.map_name(|n| Name::ref_cast(n)), + ), + } + .left_outer_join(ip_pool_resource::table) + .filter( + ip_pool_resource::resource_id + .ne(*INTERNAL_SILO_ID) + // resource_id is not nullable -- null here means the + // pool has no entry in the join table + .or(ip_pool_resource::resource_id.is_null()), + ) + .filter(ip_pool::time_deleted.is_null()) + .select(IpPool::as_select()) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// List IP pools linked to the current silo + pub async fn silo_ip_pools_list( + &self, + opctx: &OpContext, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + use db::schema::ip_pool; + use db::schema::ip_pool_resource; + + // From the developer user's point of view, we treat IP pools linked to + // their silo as silo resources, so they can list them if they can list + // silo children + let authz_silo = + opctx.authn.silo_required().internal_context("listing IP pools")?; + opctx.authorize(authz::Action::ListChildren, &authz_silo).await?; + + let silo_id = authz_silo.id(); + + match pagparams { + PaginatedBy::Id(pagparams) => { + paginated(ip_pool::table, ip_pool::id, pagparams) } PaginatedBy::Name(pagparams) => paginated( - dsl::ip_pool, - dsl::name, + ip_pool::table, + ip_pool::name, &pagparams.map_name(|n| Name::ref_cast(n)), ), } - // != excludes nulls so we explicitly include them - .filter(dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null())) - .filter(dsl::time_deleted.is_null()) + .inner_join(ip_pool_resource::table) + .filter( + ip_pool_resource::resource_type + .eq(IpPoolResourceType::Silo) + .and(ip_pool_resource::resource_id.eq(silo_id)), + ) + .filter(ip_pool::time_deleted.is_null()) .select(db::model::IpPool::as_select()) .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } + /// Look up whether the given pool is available to users in the current + /// silo, i.e., whether there is an entry in the association table linking + /// the pool with that silo + pub async fn ip_pool_fetch_link( + &self, + opctx: &OpContext, + ip_pool_id: Uuid, + ) -> LookupResult { + use db::schema::ip_pool; + use db::schema::ip_pool_resource; + + let authz_silo = opctx.authn.silo_required().internal_context( + "fetching link from an IP pool to current silo", + )?; + + ip_pool::table + .inner_join(ip_pool_resource::table) + .filter( + ip_pool_resource::resource_type + .eq(IpPoolResourceType::Silo) + .and(ip_pool_resource::resource_id.eq(authz_silo.id())), + ) + .filter(ip_pool::id.eq(ip_pool_id)) + .filter(ip_pool::time_deleted.is_null()) + .select(IpPoolResource::as_select()) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + /// Look up the default IP pool for the current silo. If there is no default /// at silo scope, fall back to the next level up, namely the fleet default. /// There should always be a default pool at the fleet level, though this @@ -77,8 +167,9 @@ impl DataStore { pub async fn ip_pools_fetch_default( &self, opctx: &OpContext, - ) -> LookupResult { - use db::schema::ip_pool::dsl; + ) -> LookupResult<(authz::IpPool, IpPool)> { + use db::schema::ip_pool; + use db::schema::ip_pool_resource; let authz_silo_id = opctx.authn.silo_required()?.id(); @@ -91,23 +182,47 @@ impl DataStore { // .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) // .await?; - dsl::ip_pool - .filter(dsl::silo_id.eq(authz_silo_id).or(dsl::silo_id.is_null())) - .filter(dsl::is_default.eq(true)) - .filter(dsl::time_deleted.is_null()) - // this will sort by most specific first, i.e., - // - // (silo) - // (null) - // - // then by only taking the first result, we get the most specific one - .order(dsl::silo_id.asc().nulls_last()) + // join ip_pool to ip_pool_resource and filter + + // used in both success and error outcomes + let lookup_type = LookupType::ByCompositeId( + "Default pool for current silo".to_string(), + ); + + ip_pool::table + .inner_join(ip_pool_resource::table) + .filter( + ip_pool_resource::resource_type.eq(IpPoolResourceType::Silo), + ) + .filter(ip_pool_resource::resource_id.eq(authz_silo_id)) + .filter(ip_pool_resource::is_default.eq(true)) + .filter(ip_pool::time_deleted.is_null()) + // Order by most specific first so we get the most specific. + // resource_type is an enum in the DB and therefore gets its order + // from the definition; it's not lexicographic. So correctness here + // relies on the types being most-specific-first in the definition. + // There are tests for this. + .order(ip_pool_resource::resource_type.asc()) .select(IpPool::as_select()) .first_async::( &*self.pool_connection_authorized(opctx).await?, ) .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + .map_err(|e| { + // janky to do this manually, but this is an unusual kind of + // lookup in that it is by (silo_id, is_default=true), which is + // arguably a composite ID. + public_error_from_diesel_lookup( + e, + ResourceType::IpPool, + &lookup_type, + ) + }) + .map(|ip_pool| { + let authz_pool = + authz::IpPool::new(authz::FLEET, ip_pool.id(), lookup_type); + (authz_pool, ip_pool) + }) } /// Looks up an IP pool intended for internal services. @@ -117,16 +232,24 @@ impl DataStore { &self, opctx: &OpContext, ) -> LookupResult<(authz::IpPool, IpPool)> { - use db::schema::ip_pool::dsl; + use db::schema::ip_pool; + use db::schema::ip_pool_resource; opctx .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) .await?; - // Look up this IP pool by rack ID. - let (authz_pool, pool) = dsl::ip_pool - .filter(dsl::silo_id.eq(*INTERNAL_SILO_ID)) - .filter(dsl::time_deleted.is_null()) + // Look up IP pool by its association with the internal silo. + // We assume there is only one pool for that silo, or at least, + // if there is more than one, it doesn't matter which one we pick. + let (authz_pool, pool) = ip_pool::table + .inner_join(ip_pool_resource::table) + .filter(ip_pool::time_deleted.is_null()) + .filter( + ip_pool_resource::resource_type + .eq(IpPoolResourceType::Silo) + .and(ip_pool_resource::resource_id.eq(*INTERNAL_SILO_ID)), + ) .select(IpPool::as_select()) .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await @@ -179,6 +302,7 @@ impl DataStore { ) -> DeleteResult { use db::schema::ip_pool::dsl; use db::schema::ip_pool_range; + use db::schema::ip_pool_resource; opctx.authorize(authz::Action::Delete, authz_pool).await?; // Verify there are no IP ranges still in this pool @@ -199,15 +323,28 @@ impl DataStore { )); } + // Verify there are no linked silos + let silo_link = ip_pool_resource::table + .filter(ip_pool_resource::ip_pool_id.eq(authz_pool.id())) + .select(ip_pool_resource::resource_id) + .limit(1) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + if silo_link.is_some() { + return Err(Error::invalid_request( + "IP Pool cannot be deleted while it is linked to a silo", + )); + } + // Delete the pool, conditional on the rcgen not having changed. This // protects the delete from occuring if clients created a new IP range // in between the above check for children and this query. let now = Utc::now(); let updated_rows = diesel::update(dsl::ip_pool) - // != excludes nulls so we explicitly include them - .filter( - dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null()), - ) .filter(dsl::time_deleted.is_null()) .filter(dsl::id.eq(authz_pool.id())) .filter(dsl::rcgen.eq(db_pool.rcgen)) @@ -229,6 +366,36 @@ impl DataStore { Ok(()) } + /// Check whether the pool is internal by checking that it exists and is + /// associated with the internal silo + pub async fn ip_pool_is_internal( + &self, + opctx: &OpContext, + authz_pool: &authz::IpPool, + ) -> LookupResult { + use db::schema::ip_pool; + use db::schema::ip_pool_resource; + + ip_pool::table + .inner_join(ip_pool_resource::table) + .filter(ip_pool::id.eq(authz_pool.id())) + .filter( + ip_pool_resource::resource_type.eq(IpPoolResourceType::Silo), + ) + .filter(ip_pool_resource::resource_id.eq(*INTERNAL_SILO_ID)) + .filter(ip_pool::time_deleted.is_null()) + .select(ip_pool::id) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .optional() + // if there is a result, the pool is associated with the internal silo, + // which makes it the internal pool + .map(|result| Ok(result.is_some())) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? + } + pub async fn ip_pool_update( &self, opctx: &OpContext, @@ -237,11 +404,8 @@ impl DataStore { ) -> UpdateResult { use db::schema::ip_pool::dsl; opctx.authorize(authz::Action::Modify, authz_pool).await?; + diesel::update(dsl::ip_pool) - // != excludes nulls so we explicitly include them - .filter( - dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null()), - ) .filter(dsl::id.eq(authz_pool.id())) .filter(dsl::time_deleted.is_null()) .set(updates) @@ -256,6 +420,296 @@ impl DataStore { }) } + pub async fn ip_pool_silo_list( + &self, + opctx: &OpContext, + authz_pool: &authz::IpPool, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + use db::schema::ip_pool; + use db::schema::ip_pool_resource; + + paginated( + ip_pool_resource::table, + ip_pool_resource::ip_pool_id, + pagparams, + ) + .inner_join(ip_pool::table) + .filter(ip_pool::id.eq(authz_pool.id())) + .filter(ip_pool::time_deleted.is_null()) + .select(IpPoolResource::as_select()) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + pub async fn ip_pool_link_silo( + &self, + opctx: &OpContext, + ip_pool_resource: IpPoolResource, + ) -> CreateResult { + use db::schema::ip_pool_resource::dsl; + opctx + .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) + .await?; + + diesel::insert_into(dsl::ip_pool_resource) + .values(ip_pool_resource.clone()) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::IpPoolResource, + &format!( + "ip_pool_id: {:?}, resource_id: {:?}, resource_type: {:?}", + ip_pool_resource.ip_pool_id, + ip_pool_resource.resource_id, + ip_pool_resource.resource_type, + ) + ), + ) + }) + } + + pub async fn ip_pool_set_default( + &self, + opctx: &OpContext, + authz_ip_pool: &authz::IpPool, + authz_silo: &authz::Silo, + is_default: bool, + ) -> UpdateResult { + use db::schema::ip_pool_resource::dsl; + + opctx.authorize(authz::Action::Modify, authz_ip_pool).await?; + opctx.authorize(authz::Action::Modify, authz_silo).await?; + + let ip_pool_id = authz_ip_pool.id(); + let silo_id = authz_silo.id(); + + let conn = self.pool_connection_authorized(opctx).await?; + + // if we're making is_default false, we can just do that without + // checking any other stuff + if !is_default { + let updated_link = diesel::update(dsl::ip_pool_resource) + .filter(dsl::resource_id.eq(silo_id)) + .filter(dsl::ip_pool_id.eq(ip_pool_id)) + .filter(dsl::resource_type.eq(IpPoolResourceType::Silo)) + .set(dsl::is_default.eq(false)) + .returning(IpPoolResource::as_returning()) + .get_result_async(&*conn) + .await + .map_err(|e| { + Error::internal_error(&format!( + "Transaction error: {:?}", + e + )) + })?; + return Ok(updated_link); + } + + // Errors returned from the below transactions. + #[derive(Debug)] + enum IpPoolResourceUpdateError { + FailedToUnsetDefault(DieselError), + } + type TxnError = TransactionError; + + conn.transaction_async(|conn| async move { + // note this is matching the specified silo, but could be any pool + let existing_default_for_silo = dsl::ip_pool_resource + .filter(dsl::resource_type.eq(IpPoolResourceType::Silo)) + .filter(dsl::resource_id.eq(silo_id)) + .filter(dsl::is_default.eq(true)) + .select(IpPoolResource::as_select()) + .get_result_async(&conn) + .await; + + // if there is an existing default, we need to unset it before we can + // set the new default + if let Ok(existing_default) = existing_default_for_silo { + // if the pool we're making default is already default for this + // silo, don't error: just noop + if existing_default.ip_pool_id == ip_pool_id { + return Ok(existing_default); + } + + let unset_default = diesel::update(dsl::ip_pool_resource) + .filter(dsl::resource_id.eq(existing_default.resource_id)) + .filter(dsl::ip_pool_id.eq(existing_default.ip_pool_id)) + .filter( + dsl::resource_type.eq(existing_default.resource_type), + ) + .set(dsl::is_default.eq(false)) + .execute_async(&conn) + .await; + if let Err(e) = unset_default { + return Err(TxnError::CustomError( + IpPoolResourceUpdateError::FailedToUnsetDefault(e), + )); + } + } + + let updated_link = diesel::update(dsl::ip_pool_resource) + .filter(dsl::resource_id.eq(silo_id)) + .filter(dsl::ip_pool_id.eq(ip_pool_id)) + .filter(dsl::resource_type.eq(IpPoolResourceType::Silo)) + .set(dsl::is_default.eq(true)) + .returning(IpPoolResource::as_returning()) + .get_result_async(&conn) + .await?; + Ok(updated_link) + }) + .await + .map_err(|e| match e { + TransactionError::CustomError( + IpPoolResourceUpdateError::FailedToUnsetDefault(e), + ) => public_error_from_diesel(e, ErrorHandler::Server), + TransactionError::Database(e) => public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::IpPoolResource, + // TODO: would be nice to put the actual names and/or ids in + // here but LookupType on each of the two silos doesn't have + // a nice to_string yet or a way of composing them + LookupType::ByCompositeId("(pool, silo)".to_string()), + ), + ), + }) + } + + /// Ephemeral and snat IPs are associated with a silo through an instance, + /// so in order to see if there are any such IPs outstanding in the given + /// silo, we have to join IP -> Instance -> Project -> Silo + async fn ensure_no_instance_ips_outstanding( + &self, + opctx: &OpContext, + authz_pool: &authz::IpPool, + authz_silo: &authz::Silo, + ) -> Result<(), Error> { + use db::schema::external_ip; + use db::schema::instance; + use db::schema::project; + + let existing_ips = external_ip::table + .inner_join( + instance::table + .on(external_ip::parent_id.eq(instance::id.nullable())), + ) + .inner_join(project::table.on(instance::project_id.eq(project::id))) + .filter(external_ip::is_service.eq(false)) + .filter(external_ip::parent_id.is_not_null()) + .filter(external_ip::time_deleted.is_null()) + .filter(external_ip::ip_pool_id.eq(authz_pool.id())) + // important, floating IPs are handled separately + .filter(external_ip::kind.eq(IpKind::Ephemeral).or(external_ip::kind.eq(IpKind::SNat))) + .filter(instance::time_deleted.is_null()) + // we have to join through IPs to instances to projects to get the silo ID + .filter(project::silo_id.eq(authz_silo.id())) + .select(ExternalIp::as_select()) + .limit(1) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|e| { + Error::internal_error(&format!( + "error checking for outstanding IPs before deleting IP pool association to resource: {:?}", + e + )) + })?; + + if !existing_ips.is_empty() { + return Err(Error::invalid_request( + "IP addresses from this pool are in use in the linked silo", + )); + } + + Ok(()) + } + + /// Floating IPs are associated with a silo through a project, so this one + /// is a little simpler than ephemeral. We join IP -> Project -> Silo. + async fn ensure_no_floating_ips_outstanding( + &self, + opctx: &OpContext, + authz_pool: &authz::IpPool, + authz_silo: &authz::Silo, + ) -> Result<(), Error> { + use db::schema::external_ip; + use db::schema::project; + + let existing_ips = external_ip::table + .inner_join(project::table.on(external_ip::project_id.eq(project::id.nullable()))) + .filter(external_ip::is_service.eq(false)) + .filter(external_ip::time_deleted.is_null()) + // all floating IPs have a project + .filter(external_ip::project_id.is_not_null()) + .filter(external_ip::ip_pool_id.eq(authz_pool.id())) + .filter(external_ip::kind.eq(IpKind::Floating)) + // we have to join through IPs to projects to get the silo ID + .filter(project::silo_id.eq(authz_silo.id())) + .filter(project::time_deleted.is_null()) + .select(ExternalIp::as_select()) + .limit(1) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|e| { + Error::internal_error(&format!( + "error checking for outstanding IPs before deleting IP pool association to resource: {:?}", + e + )) + })?; + + if !existing_ips.is_empty() { + return Err(Error::invalid_request( + "IP addresses from this pool are in use in the linked silo", + )); + } + + Ok(()) + } + + /// Delete IP pool assocation with resource unless there are outstanding + /// IPs allocated from the pool in the associated silo + pub async fn ip_pool_unlink_silo( + &self, + opctx: &OpContext, + authz_pool: &authz::IpPool, + authz_silo: &authz::Silo, + ) -> DeleteResult { + use db::schema::ip_pool_resource; + + opctx.authorize(authz::Action::Modify, authz_pool).await?; + opctx.authorize(authz::Action::Modify, authz_silo).await?; + + // We can only delete the association if there are no IPs allocated + // from this pool in the associated resource. + self.ensure_no_instance_ips_outstanding(opctx, authz_pool, authz_silo) + .await?; + self.ensure_no_floating_ips_outstanding(opctx, authz_pool, authz_silo) + .await?; + + diesel::delete(ip_pool_resource::table) + .filter(ip_pool_resource::ip_pool_id.eq(authz_pool.id())) + .filter(ip_pool_resource::resource_id.eq(authz_silo.id())) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map(|_rows_deleted| ()) + .map_err(|e| { + Error::internal_error(&format!( + "error deleting IP pool association to resource: {:?}", + e + )) + }) + } + pub async fn ip_pool_list_ranges( &self, opctx: &OpContext, @@ -422,12 +876,18 @@ impl DataStore { #[cfg(test)] mod test { + use std::num::NonZeroU32; + + use crate::authz; use crate::db::datastore::datastore_test; - use crate::db::model::IpPool; + use crate::db::model::{IpPool, IpPoolResource, IpPoolResourceType}; use assert_matches::assert_matches; use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Resource; - use omicron_common::api::external::{Error, IdentityMetadataCreateParams}; + use omicron_common::api::external::http_pagination::PaginatedBy; + use omicron_common::api::external::{ + DataPageParams, Error, IdentityMetadataCreateParams, LookupType, + }; use omicron_test_utils::dev; #[tokio::test] @@ -436,83 +896,212 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - // we start out with the default fleet-level pool already created, - // so when we ask for a default silo, we get it back - let fleet_default_pool = - datastore.ip_pools_fetch_default(&opctx).await.unwrap(); + // we start out with no default pool, so we expect not found + let error = datastore.ip_pools_fetch_default(&opctx).await.unwrap_err(); + assert_matches!(error, Error::ObjectNotFound { .. }); - assert_eq!(fleet_default_pool.identity.name.as_str(), "default"); - assert!(fleet_default_pool.is_default); - assert_eq!(fleet_default_pool.silo_id, None); + let pagparams_id = DataPageParams { + marker: None, + limit: NonZeroU32::new(100).unwrap(), + direction: dropshot::PaginationOrder::Ascending, + }; + let pagbyid = PaginatedBy::Id(pagparams_id); + + let all_pools = datastore + .ip_pools_list(&opctx, &pagbyid) + .await + .expect("Should list IP pools"); + assert_eq!(all_pools.len(), 0); + let silo_pools = datastore + .silo_ip_pools_list(&opctx, &pagbyid) + .await + .expect("Should list silo IP pools"); + assert_eq!(silo_pools.len(), 0); - // unique index prevents second fleet-level default + let authz_silo = opctx.authn.silo_required().unwrap(); + let silo_id = authz_silo.id(); + + // create a non-default pool for the silo let identity = IdentityMetadataCreateParams { - name: "another-fleet-default".parse().unwrap(), + name: "pool1-for-silo".parse().unwrap(), description: "".to_string(), }; - let err = datastore - .ip_pool_create( - &opctx, - IpPool::new(&identity, None, /*default= */ true), - ) + let pool1_for_silo = datastore + .ip_pool_create(&opctx, IpPool::new(&identity)) .await - .expect_err("Failed to fail to create a second default fleet pool"); - assert_matches!(err, Error::ObjectAlreadyExists { .. }); + .expect("Failed to create IP pool"); - // when we fetch the default pool for a silo, if those scopes do not - // have a default IP pool, we will still get back the fleet default + // shows up in full list but not silo list + let all_pools = datastore + .ip_pools_list(&opctx, &pagbyid) + .await + .expect("Should list IP pools"); + assert_eq!(all_pools.len(), 1); + let silo_pools = datastore + .silo_ip_pools_list(&opctx, &pagbyid) + .await + .expect("Should list silo IP pools"); + assert_eq!(silo_pools.len(), 0); - let silo_id = opctx.authn.silo_required().unwrap().id(); + // make default should fail when there is no link yet + let authz_pool = authz::IpPool::new( + authz::FLEET, + pool1_for_silo.id(), + LookupType::ById(pool1_for_silo.id()), + ); + let error = datastore + .ip_pool_set_default(&opctx, &authz_pool, &authz_silo, true) + .await + .expect_err("Should not be able to make non-existent link default"); + assert_matches!(error, Error::ObjectNotFound { .. }); - // create a non-default pool for the silo - let identity = IdentityMetadataCreateParams { - name: "non-default-for-silo".parse().unwrap(), - description: "".to_string(), + // now link to silo + let link_body = IpPoolResource { + ip_pool_id: pool1_for_silo.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: silo_id, + is_default: false, }; datastore - .ip_pool_create( - &opctx, - IpPool::new(&identity, Some(silo_id), /*default= */ false), - ) + .ip_pool_link_silo(&opctx, link_body.clone()) .await - .expect("Failed to create silo non-default IP pool"); + .expect("Failed to associate IP pool with silo"); // because that one was not a default, when we ask for the silo default - // pool, we still get the fleet default - let ip_pool = datastore - .ip_pools_fetch_default(&opctx) + // pool, we still get nothing + let error = datastore.ip_pools_fetch_default(&opctx).await.unwrap_err(); + assert_matches!(error, Error::ObjectNotFound { .. }); + + // now it shows up in the silo list + let silo_pools = datastore + .silo_ip_pools_list(&opctx, &pagbyid) .await - .expect("Failed to get silo default IP pool"); - assert_eq!(ip_pool.id(), fleet_default_pool.id()); + .expect("Should list silo IP pools"); + assert_eq!(silo_pools.len(), 1); + assert_eq!(silo_pools[0].id(), pool1_for_silo.id()); - // now create a default pool for the silo - let identity = IdentityMetadataCreateParams { - name: "default-for-silo".parse().unwrap(), - description: "".to_string(), - }; + // linking an already linked silo errors due to PK conflict + let err = datastore + .ip_pool_link_silo(&opctx, link_body) + .await + .expect_err("Creating the same link again should conflict"); + assert_matches!(err, Error::ObjectAlreadyExists { .. }); + + // now make it default + datastore + .ip_pool_set_default(&opctx, &authz_pool, &authz_silo, true) + .await + .expect("Should be able to make pool default"); + + // setting default if already default is allowed datastore - .ip_pool_create(&opctx, IpPool::new(&identity, Some(silo_id), true)) + .ip_pool_set_default(&opctx, &authz_pool, &authz_silo, true) .await - .expect("Failed to create silo default IP pool"); + .expect("Should be able to make pool default again"); - // now when we ask for the default pool, we get the one we just made - let ip_pool = datastore + // now when we ask for the default pool again, we get that one + let (authz_pool1_for_silo, ip_pool) = datastore .ip_pools_fetch_default(&opctx) .await .expect("Failed to get silo's default IP pool"); - assert_eq!(ip_pool.name().as_str(), "default-for-silo"); + assert_eq!(ip_pool.name().as_str(), "pool1-for-silo"); // and we can't create a second default pool for the silo let identity = IdentityMetadataCreateParams { name: "second-default-for-silo".parse().unwrap(), description: "".to_string(), }; + let second_silo_default = datastore + .ip_pool_create(&opctx, IpPool::new(&identity)) + .await + .expect("Failed to create pool"); let err = datastore - .ip_pool_create(&opctx, IpPool::new(&identity, Some(silo_id), true)) + .ip_pool_link_silo( + &opctx, + IpPoolResource { + ip_pool_id: second_silo_default.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: silo_id, + is_default: true, + }, + ) .await - .expect_err("Failed to fail to create second default pool"); + .expect_err("Failed to fail to set a second default pool for silo"); assert_matches!(err, Error::ObjectAlreadyExists { .. }); + // now remove the association and we should get nothing again + let authz_silo = + authz::Silo::new(authz::Fleet, silo_id, LookupType::ById(silo_id)); + datastore + .ip_pool_unlink_silo(&opctx, &authz_pool1_for_silo, &authz_silo) + .await + .expect("Failed to unlink IP pool from silo"); + + // no default + let error = datastore.ip_pools_fetch_default(&opctx).await.unwrap_err(); + assert_matches!(error, Error::ObjectNotFound { .. }); + + // and silo pools list is empty again + let silo_pools = datastore + .silo_ip_pools_list(&opctx, &pagbyid) + .await + .expect("Should list silo IP pools"); + assert_eq!(silo_pools.len(), 0); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_internal_ip_pool() { + let logctx = dev::test_setup_log("test_internal_ip_pool"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // confirm internal pool appears as internal + let (authz_pool, _pool) = + datastore.ip_pools_service_lookup(&opctx).await.unwrap(); + + let is_internal = + datastore.ip_pool_is_internal(&opctx, &authz_pool).await; + assert_eq!(is_internal, Ok(true)); + + // another random pool should not be considered internal + let identity = IdentityMetadataCreateParams { + name: "other-pool".parse().unwrap(), + description: "".to_string(), + }; + let other_pool = datastore + .ip_pool_create(&opctx, IpPool::new(&identity)) + .await + .expect("Failed to create IP pool"); + + let authz_other_pool = authz::IpPool::new( + authz::FLEET, + other_pool.id(), + LookupType::ById(other_pool.id()), + ); + let is_internal = + datastore.ip_pool_is_internal(&opctx, &authz_other_pool).await; + assert_eq!(is_internal, Ok(false)); + + // now link it to the current silo, and it is still not internal + let silo_id = opctx.authn.silo_required().unwrap().id(); + let link = IpPoolResource { + ip_pool_id: other_pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: silo_id, + is_default: true, + }; + datastore + .ip_pool_link_silo(&opctx, link) + .await + .expect("Failed to make IP pool default for silo"); + + let is_internal = + datastore.ip_pool_is_internal(&opctx, &authz_other_pool).await; + assert_eq!(is_internal, Ok(false)); + db.cleanup().await.unwrap(); logctx.cleanup_successful(); } diff --git a/nexus/db-queries/src/db/datastore/project.rs b/nexus/db-queries/src/db/datastore/project.rs index a9015ea943..e3927fdfc1 100644 --- a/nexus/db-queries/src/db/datastore/project.rs +++ b/nexus/db-queries/src/db/datastore/project.rs @@ -347,34 +347,4 @@ impl DataStore { ) }) } - - /// List IP Pools accessible to a project - pub async fn project_ip_pools_list( - &self, - opctx: &OpContext, - authz_project: &authz::Project, - pagparams: &PaginatedBy<'_>, - ) -> ListResultVec { - use db::schema::ip_pool::dsl; - opctx.authorize(authz::Action::ListChildren, authz_project).await?; - match pagparams { - PaginatedBy::Id(pagparams) => { - paginated(dsl::ip_pool, dsl::id, pagparams) - } - PaginatedBy::Name(pagparams) => paginated( - dsl::ip_pool, - dsl::name, - &pagparams.map_name(|n| Name::ref_cast(n)), - ), - } - // TODO(2148, 2056): filter only pools accessible by the given - // project, once specific projects for pools are implemented - // != excludes nulls so we explicitly include them - .filter(dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null())) - .filter(dsl::time_deleted.is_null()) - .select(db::model::IpPool::as_select()) - .get_results_async(&*self.pool_connection_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) - } } diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 728da0b0d1..50bae03c2d 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -753,36 +753,37 @@ impl DataStore { self.rack_insert(opctx, &db::model::Rack::new(rack_id)).await?; - let internal_pool = db::model::IpPool::new( - &IdentityMetadataCreateParams { + let internal_pool = + db::model::IpPool::new(&IdentityMetadataCreateParams { name: SERVICE_IP_POOL_NAME.parse::().unwrap(), description: String::from("IP Pool for Oxide Services"), - }, - Some(*INTERNAL_SILO_ID), - true, // default for internal silo - ); + }); - self.ip_pool_create(opctx, internal_pool).await.map(|_| ()).or_else( - |e| match e { - Error::ObjectAlreadyExists { .. } => Ok(()), - _ => Err(e), - }, - )?; + let internal_pool_id = internal_pool.id(); - let default_pool = db::model::IpPool::new( - &IdentityMetadataCreateParams { - name: "default".parse::().unwrap(), - description: String::from("default IP pool"), - }, - None, // no silo ID, fleet scoped - true, // default for fleet - ); - self.ip_pool_create(opctx, default_pool).await.map(|_| ()).or_else( - |e| match e { - Error::ObjectAlreadyExists { .. } => Ok(()), + let internal_created = self + .ip_pool_create(opctx, internal_pool) + .await + .map(|_| true) + .or_else(|e| match e { + Error::ObjectAlreadyExists { .. } => Ok(false), _ => Err(e), - }, - )?; + })?; + + // make default for the internal silo. only need to do this if + // the create went through, i.e., if it wasn't already there + if internal_created { + self.ip_pool_link_silo( + opctx, + db::model::IpPoolResource { + ip_pool_id: internal_pool_id, + resource_type: db::model::IpPoolResourceType::Silo, + resource_id: *INTERNAL_SILO_ID, + is_default: true, + }, + ) + .await?; + } Ok(()) } @@ -1329,7 +1330,7 @@ mod test { // been allocated as a part of the service IP pool. let (.., svc_pool) = datastore.ip_pools_service_lookup(&opctx).await.unwrap(); - assert_eq!(svc_pool.silo_id, Some(*INTERNAL_SILO_ID)); + assert_eq!(svc_pool.name().as_str(), "oxide-service-pool"); let observed_ip_pool_ranges = get_all_ip_pool_ranges(&datastore).await; assert_eq!(observed_ip_pool_ranges.len(), 1); @@ -1531,7 +1532,7 @@ mod test { // allocated as a part of the service IP pool. let (.., svc_pool) = datastore.ip_pools_service_lookup(&opctx).await.unwrap(); - assert_eq!(svc_pool.silo_id, Some(*INTERNAL_SILO_ID)); + assert_eq!(svc_pool.name().as_str(), "oxide-service-pool"); let observed_ip_pool_ranges = get_all_ip_pool_ranges(&datastore).await; assert_eq!(observed_ip_pool_ranges.len(), 1); diff --git a/nexus/db-queries/src/db/pool_connection.rs b/nexus/db-queries/src/db/pool_connection.rs index 6fb951de84..090c6865b7 100644 --- a/nexus/db-queries/src/db/pool_connection.rs +++ b/nexus/db-queries/src/db/pool_connection.rs @@ -48,6 +48,7 @@ static CUSTOM_TYPE_KEYS: &'static [&'static str] = &[ "identity_type", "instance_state", "ip_kind", + "ip_pool_resource_type", "network_interface_kind", "physical_disk_kind", "producer_kind", diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 2a76ea7408..49403aac61 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -833,6 +833,8 @@ mod tests { use async_bb8_diesel::AsyncRunQueryDsl; use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; use dropshot::test_util::LogContext; + use nexus_db_model::IpPoolResource; + use nexus_db_model::IpPoolResourceType; use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::shared::IpRange; use omicron_common::address::NUM_SOURCE_NAT_PORTS; @@ -870,34 +872,34 @@ mod tests { Self { logctx, opctx, db, db_datastore } } + /// Create pool, associate with current silo async fn create_ip_pool( &self, name: &str, range: IpRange, is_default: bool, ) { - let silo_id = self.opctx.authn.silo_required().unwrap().id(); - let pool = IpPool::new( - &IdentityMetadataCreateParams { - name: String::from(name).parse().unwrap(), - description: format!("ip pool {}", name), - }, - Some(silo_id), - is_default, - ); + let pool = IpPool::new(&IdentityMetadataCreateParams { + name: String::from(name).parse().unwrap(), + description: format!("ip pool {}", name), + }); - let conn = self - .db_datastore - .pool_connection_authorized(&self.opctx) + self.db_datastore + .ip_pool_create(&self.opctx, pool.clone()) .await - .unwrap(); + .expect("Failed to create IP pool"); - use crate::db::schema::ip_pool::dsl as ip_pool_dsl; - diesel::insert_into(ip_pool_dsl::ip_pool) - .values(pool.clone()) - .execute_async(&*conn) + let silo_id = self.opctx.authn.silo_required().unwrap().id(); + let association = IpPoolResource { + resource_id: silo_id, + resource_type: IpPoolResourceType::Silo, + ip_pool_id: pool.id(), + is_default, + }; + self.db_datastore + .ip_pool_link_silo(&self.opctx, association) .await - .expect("Failed to create IP Pool"); + .expect("Failed to associate IP pool with silo"); self.initialize_ip_pool(name, range).await; } @@ -936,7 +938,7 @@ mod tests { } async fn default_pool_id(&self) -> Uuid { - let pool = self + let (.., pool) = self .db_datastore .ip_pools_fetch_default(&self.opctx) .await @@ -960,7 +962,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 1), )) .unwrap(); - context.initialize_ip_pool("default", range).await; + context.create_ip_pool("default", range, true).await; for first_port in (0..super::MAX_PORT).step_by(NUM_SOURCE_NAT_PORTS.into()) { @@ -1015,7 +1017,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 1), )) .unwrap(); - context.initialize_ip_pool("default", range).await; + context.create_ip_pool("default", range, true).await; // Allocate an Ephemeral IP, which should take the entire port range of // the only address in the pool. @@ -1098,7 +1100,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.initialize_ip_pool("default", range).await; + context.create_ip_pool("default", range, true).await; // TODO-completeness: Implementing Iterator for IpRange would be nice. let addresses = [ @@ -1199,7 +1201,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.initialize_ip_pool("default", range).await; + context.create_ip_pool("default", range, true).await; let instance_id = Uuid::new_v4(); let id = Uuid::new_v4(); @@ -1659,7 +1661,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.initialize_ip_pool("default", range).await; + context.create_ip_pool("default", range, true).await; // Create one SNAT IP address. let instance_id = Uuid::new_v4(); @@ -1721,13 +1723,13 @@ mod tests { Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.initialize_ip_pool("default", first_range).await; + context.create_ip_pool("default", first_range, true).await; let second_range = IpRange::try_from(( Ipv4Addr::new(10, 0, 0, 4), Ipv4Addr::new(10, 0, 0, 6), )) .unwrap(); - context.create_ip_pool("p1", second_range, /*default*/ false).await; + context.create_ip_pool("p1", second_range, false).await; // Allocating an address on an instance in the second pool should be // respected, even though there are IPs available in the first. @@ -1765,12 +1767,12 @@ mod tests { Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.initialize_ip_pool("default", first_range).await; + context.create_ip_pool("default", first_range, true).await; let first_address = Ipv4Addr::new(10, 0, 0, 4); let last_address = Ipv4Addr::new(10, 0, 0, 6); let second_range = IpRange::try_from((first_address, last_address)).unwrap(); - context.create_ip_pool("p1", second_range, /* default */ false).await; + context.create_ip_pool("p1", second_range, false).await; // Allocate all available addresses in the second pool. let instance_id = Uuid::new_v4(); diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 5efdaf7b6f..1d9b3e515e 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -7,14 +7,14 @@ use crate::external_api::params; use crate::external_api::shared::IpRange; use ipnetwork::IpNetwork; -use nexus_db_model::IpPool; use nexus_db_queries::authz; +use nexus_db_queries::authz::ApiResource; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; -use nexus_db_queries::db::fixed_data::silo::INTERNAL_SILO_ID; use nexus_db_queries::db::lookup; use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::db::model::Name; +use nexus_types::identity::Resource; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; @@ -26,9 +26,22 @@ use omicron_common::api::external::NameOrId; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use ref_cast::RefCast; +use uuid::Uuid; -fn is_internal(pool: &IpPool) -> bool { - pool.silo_id == Some(*INTERNAL_SILO_ID) +/// Helper to make it easier to 404 on attempts to manipulate internal pools +fn not_found_from_lookup(pool_lookup: &lookup::IpPool<'_>) -> Error { + match pool_lookup { + lookup::IpPool::Name(_, name) => { + Error::not_found_by_name(ResourceType::IpPool, &name) + } + lookup::IpPool::OwnedName(_, name) => { + Error::not_found_by_name(ResourceType::IpPool, &name) + } + lookup::IpPool::PrimaryKey(_, id) => { + Error::not_found_by_id(ResourceType::IpPool, &id) + } + lookup::IpPool::Error(_, error) => error.to_owned(), + } } impl super::Nexus { @@ -56,24 +69,112 @@ impl super::Nexus { opctx: &OpContext, pool_params: ¶ms::IpPoolCreate, ) -> CreateResult { - let silo_id = match pool_params.clone().silo { - Some(silo) => { - let (.., authz_silo) = self - .silo_lookup(&opctx, silo)? - .lookup_for(authz::Action::Read) - .await?; - Some(authz_silo.id()) - } - _ => None, - }; - let pool = db::model::IpPool::new( - &pool_params.identity, - silo_id, - pool_params.is_default, - ); + let pool = db::model::IpPool::new(&pool_params.identity); self.db_datastore.ip_pool_create(opctx, pool).await } + /// List IP pools in current silo + pub(crate) async fn silo_ip_pools_list( + &self, + opctx: &OpContext, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + self.db_datastore.silo_ip_pools_list(opctx, pagparams).await + } + + // Look up pool by name or ID, but only return it if it's linked to the + // current silo + pub async fn silo_ip_pool_fetch<'a>( + &'a self, + opctx: &'a OpContext, + pool: &'a NameOrId, + ) -> LookupResult { + let (authz_pool, pool) = + self.ip_pool_lookup(opctx, pool)?.fetch().await?; + + // 404 if no link is found in the current silo + let link = self.db_datastore.ip_pool_fetch_link(opctx, pool.id()).await; + if link.is_err() { + return Err(authz_pool.not_found()); + } + + Ok(pool) + } + + pub(crate) async fn ip_pool_silo_list( + &self, + opctx: &OpContext, + pool_lookup: &lookup::IpPool<'_>, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + let (.., authz_pool) = + pool_lookup.lookup_for(authz::Action::ListChildren).await?; + self.db_datastore.ip_pool_silo_list(opctx, &authz_pool, pagparams).await + } + + pub(crate) async fn ip_pool_link_silo( + &self, + opctx: &OpContext, + pool_lookup: &lookup::IpPool<'_>, + silo_link: ¶ms::IpPoolSiloLink, + ) -> CreateResult { + let (authz_pool,) = + pool_lookup.lookup_for(authz::Action::Modify).await?; + let (authz_silo,) = self + .silo_lookup(&opctx, silo_link.silo.clone())? + .lookup_for(authz::Action::Modify) + .await?; + self.db_datastore + .ip_pool_link_silo( + opctx, + db::model::IpPoolResource { + ip_pool_id: authz_pool.id(), + resource_type: db::model::IpPoolResourceType::Silo, + resource_id: authz_silo.id(), + is_default: silo_link.is_default, + }, + ) + .await + } + + pub(crate) async fn ip_pool_unlink_silo( + &self, + opctx: &OpContext, + pool_lookup: &lookup::IpPool<'_>, + silo_lookup: &lookup::Silo<'_>, + ) -> DeleteResult { + let (.., authz_pool) = + pool_lookup.lookup_for(authz::Action::Modify).await?; + let (.., authz_silo) = + silo_lookup.lookup_for(authz::Action::Modify).await?; + + self.db_datastore + .ip_pool_unlink_silo(opctx, &authz_pool, &authz_silo) + .await + } + + pub(crate) async fn ip_pool_silo_update( + &self, + opctx: &OpContext, + pool_lookup: &lookup::IpPool<'_>, + silo_lookup: &lookup::Silo<'_>, + update: ¶ms::IpPoolSiloUpdate, + ) -> CreateResult { + let (.., authz_pool) = + pool_lookup.lookup_for(authz::Action::Modify).await?; + let (.., authz_silo) = + silo_lookup.lookup_for(authz::Action::Modify).await?; + + self.db_datastore + .ip_pool_set_default( + opctx, + &authz_pool, + &authz_silo, + update.is_default, + ) + .await + } + pub(crate) async fn ip_pools_list( &self, opctx: &OpContext, @@ -89,6 +190,13 @@ impl super::Nexus { ) -> DeleteResult { let (.., authz_pool, db_pool) = pool_lookup.fetch_for(authz::Action::Delete).await?; + + let is_internal = + self.db_datastore.ip_pool_is_internal(opctx, &authz_pool).await?; + if is_internal { + return Err(not_found_from_lookup(pool_lookup)); + } + self.db_datastore.ip_pool_delete(opctx, &authz_pool, &db_pool).await } @@ -100,6 +208,13 @@ impl super::Nexus { ) -> UpdateResult { let (.., authz_pool) = pool_lookup.lookup_for(authz::Action::Modify).await?; + + let is_internal = + self.db_datastore.ip_pool_is_internal(opctx, &authz_pool).await?; + if is_internal { + return Err(not_found_from_lookup(pool_lookup)); + } + self.db_datastore .ip_pool_update(opctx, &authz_pool, updates.clone().into()) .await @@ -111,13 +226,13 @@ impl super::Nexus { pool_lookup: &lookup::IpPool<'_>, pagparams: &DataPageParams<'_, IpNetwork>, ) -> ListResultVec { - let (.., authz_pool, db_pool) = - pool_lookup.fetch_for(authz::Action::ListChildren).await?; - if is_internal(&db_pool) { - return Err(Error::not_found_by_name( - ResourceType::IpPool, - &db_pool.identity.name, - )); + let (.., authz_pool) = + pool_lookup.lookup_for(authz::Action::ListChildren).await?; + + let is_internal = + self.db_datastore.ip_pool_is_internal(opctx, &authz_pool).await?; + if is_internal { + return Err(not_found_from_lookup(pool_lookup)); } self.db_datastore @@ -131,13 +246,13 @@ impl super::Nexus { pool_lookup: &lookup::IpPool<'_>, range: &IpRange, ) -> UpdateResult { - let (.., authz_pool, db_pool) = + let (.., authz_pool, _db_pool) = pool_lookup.fetch_for(authz::Action::Modify).await?; - if is_internal(&db_pool) { - return Err(Error::not_found_by_name( - ResourceType::IpPool, - &db_pool.identity.name, - )); + + let is_internal = + self.db_datastore.ip_pool_is_internal(opctx, &authz_pool).await?; + if is_internal { + return Err(not_found_from_lookup(pool_lookup)); } self.db_datastore.ip_pool_add_range(opctx, &authz_pool, range).await } @@ -148,14 +263,16 @@ impl super::Nexus { pool_lookup: &lookup::IpPool<'_>, range: &IpRange, ) -> DeleteResult { - let (.., authz_pool, db_pool) = + let (.., authz_pool, _db_pool) = pool_lookup.fetch_for(authz::Action::Modify).await?; - if is_internal(&db_pool) { - return Err(Error::not_found_by_name( - ResourceType::IpPool, - &db_pool.identity.name, - )); + + let is_internal = + self.db_datastore.ip_pool_is_internal(opctx, &authz_pool).await?; + + if is_internal { + return Err(not_found_from_lookup(pool_lookup)); } + self.db_datastore.ip_pool_delete_range(opctx, &authz_pool, range).await } diff --git a/nexus/src/app/project.rs b/nexus/src/app/project.rs index 6e8727a889..2e852ba2d3 100644 --- a/nexus/src/app/project.rs +++ b/nexus/src/app/project.rs @@ -8,7 +8,6 @@ use crate::app::sagas; use crate::external_api::params; use crate::external_api::shared; use anyhow::Context; -use nexus_db_model::Name; use nexus_db_queries::authn; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; @@ -24,7 +23,6 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::NameOrId; use omicron_common::api::external::UpdateResult; -use ref_cast::RefCast; use std::sync::Arc; impl super::Nexus { @@ -149,40 +147,4 @@ impl super::Nexus { .collect::, _>>()?; Ok(shared::Policy { role_assignments }) } - - pub(crate) async fn project_ip_pools_list( - &self, - opctx: &OpContext, - project_lookup: &lookup::Project<'_>, - pagparams: &PaginatedBy<'_>, - ) -> ListResultVec { - let (.., authz_project) = - project_lookup.lookup_for(authz::Action::ListChildren).await?; - - self.db_datastore - .project_ip_pools_list(opctx, &authz_project, pagparams) - .await - } - - pub fn project_ip_pool_lookup<'a>( - &'a self, - opctx: &'a OpContext, - pool: &'a NameOrId, - _project_lookup: &Option>, - ) -> LookupResult> { - // TODO(2148, 2056): check that the given project has access (if one - // is provided to the call) once that relation is implemented - match pool { - NameOrId::Name(name) => { - let pool = LookupPath::new(opctx, &self.db_datastore) - .ip_pool_name(Name::ref_cast(name)); - Ok(pool) - } - NameOrId::Id(id) => { - let pool = - LookupPath::new(opctx, &self.db_datastore).ip_pool_id(*id); - Ok(pool) - } - } - } } diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index ab62977746..9d52ec1501 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -834,10 +834,8 @@ pub(crate) mod test { use diesel::{ ExpressionMethods, OptionalExtension, QueryDsl, SelectableHelper, }; - use dropshot::test_util::ClientTestContext; use nexus_db_queries::context::OpContext; use nexus_db_queries::{authn::saga::Serialized, db::datastore::DataStore}; - use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; @@ -853,12 +851,6 @@ pub(crate) mod test { const DISK_NAME: &str = "my-disk"; const PROJECT_NAME: &str = "springfield-squidport"; - async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None, None).await; - let project = create_project(client, PROJECT_NAME).await; - project.identity.id - } - pub fn new_disk_create_params() -> params::DiskCreate { params::DiskCreate { identity: IdentityMetadataCreateParams { @@ -896,7 +888,8 @@ pub(crate) mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx().nexus; - let project_id = create_org_and_project(&client).await; + let project_id = + create_project(&client, PROJECT_NAME).await.identity.id; // Build the saga DAG with the provided test parameters let opctx = test_opctx(cptestctx); @@ -1065,7 +1058,8 @@ pub(crate) mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx().nexus; - let project_id = create_org_and_project(&client).await; + let project_id = + create_project(&client, PROJECT_NAME).await.identity.id; let opctx = test_opctx(cptestctx); crate::app::sagas::test_helpers::action_failure_can_unwind::< @@ -1094,7 +1088,8 @@ pub(crate) mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx.nexus; - let project_id = create_org_and_project(&client).await; + let project_id = + create_project(&client, PROJECT_NAME).await.identity.id; let opctx = test_opctx(&cptestctx); crate::app::sagas::test_helpers::action_failure_can_unwind_idempotently::< @@ -1134,7 +1129,8 @@ pub(crate) mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx.nexus; - let project_id = create_org_and_project(&client).await; + let project_id = + create_project(&client, PROJECT_NAME).await.identity.id; // Build the saga DAG with the provided test parameters let opctx = test_opctx(&cptestctx); diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index f791d289db..333e6c1672 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -184,29 +184,20 @@ pub(crate) mod test { app::saga::create_saga_dag, app::sagas::disk_delete::Params, app::sagas::disk_delete::SagaDiskDelete, }; - use dropshot::test_util::ClientTestContext; use nexus_db_model::Disk; use nexus_db_queries::authn::saga::Serialized; use nexus_db_queries::context::OpContext; - use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; use omicron_common::api::external::Name; - use uuid::Uuid; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; const PROJECT_NAME: &str = "springfield-squidport"; - async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None, None).await; - let project = create_project(client, PROJECT_NAME).await; - project.identity.id - } - pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), @@ -242,7 +233,7 @@ pub(crate) mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx.nexus; - let project_id = create_org_and_project(&client).await; + let project_id = create_project(client, PROJECT_NAME).await.identity.id; let disk = create_disk(&cptestctx).await; // Build the saga DAG with the provided test parameters @@ -268,7 +259,7 @@ pub(crate) mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx.nexus; - let project_id = create_org_and_project(&client).await; + let project_id = create_project(client, PROJECT_NAME).await.identity.id; let disk = create_disk(&cptestctx).await; // Build the saga DAG with the provided test parameters diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index fd86e2052a..c4c9c4e083 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -564,7 +564,7 @@ async fn sic_allocate_instance_snat_ip( let instance_id = sagactx.lookup::("instance_id")?; let ip_id = sagactx.lookup::("snat_ip_id")?; - let pool = datastore + let (.., pool) = datastore .ip_pools_fetch_default(&opctx) .await .map_err(ActionError::action_failed)?; @@ -909,9 +909,9 @@ pub mod test { use nexus_db_queries::authn::saga::Serialized; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::datastore::DataStore; + use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_disk; use nexus_test_utils::resource_helpers::create_project; - use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use omicron_common::api::external::{ @@ -930,7 +930,7 @@ pub mod test { const DISK_NAME: &str = "my-disk"; async fn create_org_project_and_disk(client: &ClientTestContext) -> Uuid { - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(client, PROJECT_NAME).await; create_disk(&client, PROJECT_NAME, DISK_NAME).await; project.identity.id diff --git a/nexus/src/app/sagas/instance_delete.rs b/nexus/src/app/sagas/instance_delete.rs index 7802312b10..013bececee 100644 --- a/nexus/src/app/sagas/instance_delete.rs +++ b/nexus/src/app/sagas/instance_delete.rs @@ -178,9 +178,9 @@ mod test { use nexus_db_queries::{ authn::saga::Serialized, context::OpContext, db, db::lookup::LookupPath, }; + use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_disk; use nexus_test_utils::resource_helpers::create_project; - use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::identity::Resource; @@ -199,7 +199,7 @@ mod test { const DISK_NAME: &str = "my-disk"; async fn create_org_project_and_disk(client: &ClientTestContext) -> Uuid { - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(client, PROJECT_NAME).await; create_disk(&client, PROJECT_NAME, DISK_NAME).await; project.identity.id diff --git a/nexus/src/app/sagas/instance_migrate.rs b/nexus/src/app/sagas/instance_migrate.rs index 7a417a5781..29c189efb4 100644 --- a/nexus/src/app/sagas/instance_migrate.rs +++ b/nexus/src/app/sagas/instance_migrate.rs @@ -501,10 +501,10 @@ mod tests { use camino::Utf8Path; use dropshot::test_util::ClientTestContext; use nexus_test_interface::NexusServer; - use nexus_test_utils::{ - resource_helpers::{create_project, object_create, populate_ip_pool}, - start_sled_agent, + use nexus_test_utils::resource_helpers::{ + create_default_ip_pool, create_project, object_create, }; + use nexus_test_utils::start_sled_agent; use nexus_test_utils_macros::nexus_test; use omicron_common::api::external::{ ByteCount, IdentityMetadataCreateParams, InstanceCpuCount, @@ -520,7 +520,7 @@ mod tests { const INSTANCE_NAME: &str = "test-instance"; async fn setup_test_project(client: &ClientTestContext) -> Uuid { - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(&client, PROJECT_NAME).await; project.identity.id } diff --git a/nexus/src/app/sagas/instance_start.rs b/nexus/src/app/sagas/instance_start.rs index e6717b0164..8957a838e7 100644 --- a/nexus/src/app/sagas/instance_start.rs +++ b/nexus/src/app/sagas/instance_start.rs @@ -734,7 +734,7 @@ mod test { use dropshot::test_util::ClientTestContext; use nexus_db_queries::authn; use nexus_test_utils::resource_helpers::{ - create_project, object_create, populate_ip_pool, + create_default_ip_pool, create_project, object_create, }; use nexus_test_utils_macros::nexus_test; use omicron_common::api::external::{ @@ -751,7 +751,7 @@ mod test { const INSTANCE_NAME: &str = "test-instance"; async fn setup_test_project(client: &ClientTestContext) -> Uuid { - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(&client, PROJECT_NAME).await; project.identity.id } diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index c3fe6fc327..ed8c8ccebf 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -1554,7 +1554,6 @@ mod test { use crate::app::saga::create_saga_dag; use crate::app::sagas::test_helpers; - use crate::external_api::shared::IpRange; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::{ ExpressionMethods, OptionalExtension, QueryDsl, SelectableHelper, @@ -1563,12 +1562,11 @@ mod test { use nexus_db_queries::context::OpContext; use nexus_db_queries::db::datastore::InstanceAndActiveVmm; use nexus_db_queries::db::DataStore; + use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_disk; - use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::delete_disk; use nexus_test_utils::resource_helpers::object_create; - use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params::InstanceDiskAttachment; @@ -1580,7 +1578,6 @@ mod test { use omicron_common::api::external::NameOrId; use sled_agent_client::types::CrucibleOpts; use sled_agent_client::TestInterfaces as SledAgentTestInterfaces; - use std::net::Ipv4Addr; use std::str::FromStr; #[test] @@ -1785,8 +1782,10 @@ mod test { const DISK_NAME: &str = "disky-mcdiskface"; const INSTANCE_NAME: &str = "base-instance"; - async fn create_org_project_and_disk(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None, None).await; + async fn create_project_and_disk_and_pool( + client: &ClientTestContext, + ) -> Uuid { + create_default_ip_pool(&client).await; create_project(client, PROJECT_NAME).await; create_disk(client, PROJECT_NAME, DISK_NAME).await.identity.id } @@ -1833,7 +1832,7 @@ mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx().nexus; - let disk_id = create_org_project_and_disk(&client).await; + let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters let opctx = test_opctx(cptestctx); @@ -2022,7 +2021,7 @@ mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx().nexus; - let disk_id = create_org_project_and_disk(&client).await; + let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters let opctx = test_opctx(&cptestctx); @@ -2040,24 +2039,6 @@ mod test { // before the first attempt to run the saga recreates it. delete_disk(client, PROJECT_NAME, DISK_NAME).await; - // The no-pantry variant of the test needs to see the disk attached to - // an instance. Set up an IP pool so that instances can be created - // against it. - if !use_the_pantry { - populate_ip_pool( - &client, - "default", - Some( - IpRange::try_from(( - Ipv4Addr::new(10, 1, 0, 0), - Ipv4Addr::new(10, 1, 255, 255), - )) - .unwrap(), - ), - ) - .await; - } - crate::app::sagas::test_helpers::action_failure_can_unwind::< SagaSnapshotCreate, _, @@ -2182,7 +2163,7 @@ mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx().nexus; - let disk_id = create_org_project_and_disk(&client).await; + let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters let opctx = test_opctx(cptestctx); @@ -2291,7 +2272,7 @@ mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx().nexus; - let disk_id = create_org_project_and_disk(&client).await; + let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters let opctx = test_opctx(cptestctx); @@ -2352,19 +2333,6 @@ mod test { assert!(output.is_err()); // Attach the disk to an instance, then rerun the saga - populate_ip_pool( - &client, - "default", - Some( - IpRange::try_from(( - Ipv4Addr::new(10, 1, 0, 0), - Ipv4Addr::new(10, 1, 255, 255), - )) - .unwrap(), - ), - ) - .await; - let instance_state = setup_test_instance( cptestctx, client, diff --git a/nexus/src/app/sagas/vpc_create.rs b/nexus/src/app/sagas/vpc_create.rs index 4b5bedf41e..6b48e4087a 100644 --- a/nexus/src/app/sagas/vpc_create.rs +++ b/nexus/src/app/sagas/vpc_create.rs @@ -455,8 +455,8 @@ pub(crate) mod test { db::datastore::DataStore, db::fixed_data::vpc::SERVICES_VPC_ID, db::lookup::LookupPath, }; + use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_project; - use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils_macros::nexus_test; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Name; @@ -469,7 +469,7 @@ pub(crate) mod test { const PROJECT_NAME: &str = "springfield-squidport"; async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(client, PROJECT_NAME).await; project.identity.id } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 5ac782aee6..21acb45ed3 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -6,13 +6,10 @@ use super::{ console_api, device_auth, params, - params::{ProjectSelector, UninitializedSledId}, - shared::UninitializedSled, views::{ self, Certificate, Group, IdentityProvider, Image, IpPool, IpPoolRange, - PhysicalDisk, Project, Rack, Role, Silo, SiloQuotas, SiloUtilization, - Sled, SledInstance, Snapshot, SshKey, Switch, User, UserBuiltin, Vpc, - VpcRouter, VpcSubnet, + PhysicalDisk, Project, Rack, Role, Silo, SiloUtilization, Sled, + Snapshot, SshKey, User, UserBuiltin, Vpc, VpcRouter, VpcSubnet, }, }; use crate::external_api::shared; @@ -40,15 +37,13 @@ use dropshot::{ use ipnetwork::IpNetwork; use nexus_db_queries::authz; use nexus_db_queries::db; -use nexus_db_queries::db::identity::AssetIdentityMetadata; use nexus_db_queries::db::identity::Resource; use nexus_db_queries::db::lookup::ImageLookup; use nexus_db_queries::db::lookup::ImageParentLookup; use nexus_db_queries::db::model::Name; -use nexus_db_queries::{ - authz::ApiResource, db::fixed_data::silo::INTERNAL_SILO_ID, -}; +use nexus_types::external_api::views::SiloQuotas; use nexus_types::external_api::views::Utilization; +use nexus_types::identity::AssetIdentityMetadata; use omicron_common::api::external::http_pagination::data_page_params_for; use omicron_common::api::external::http_pagination::marker_for_name; use omicron_common::api::external::http_pagination::marker_for_name_or_id; @@ -124,6 +119,10 @@ pub(crate) fn external_api() -> NexusApiDescription { // Operator-Accessible IP Pools API api.register(ip_pool_list)?; api.register(ip_pool_create)?; + api.register(ip_pool_silo_list)?; + api.register(ip_pool_silo_link)?; + api.register(ip_pool_silo_unlink)?; + api.register(ip_pool_silo_update)?; api.register(ip_pool_view)?; api.register(ip_pool_delete)?; api.register(ip_pool_update)?; @@ -1294,7 +1293,7 @@ async fn project_policy_update( // IP Pools -/// List all IP pools that can be used by a given project +/// List all IP pools #[endpoint { method = GET, path = "/v1/ip-pools", @@ -1302,14 +1301,8 @@ async fn project_policy_update( }] async fn project_ip_pool_list( rqctx: RequestContext>, - query_params: Query>, + query_params: Query, ) -> Result>, HttpError> { - // Per https://github.com/oxidecomputer/omicron/issues/2148 - // This is currently the same list as /v1/system/ip-pools, that is to say, - // IP pools that are *available to* a given project, those being ones that - // are not the internal pools for Oxide service usage. This may change - // in the future as the scoping of pools is further developed, but for now, - // this is literally a near-duplicate of `ip_pool_list`: let apictx = rqctx.context(); let handler = async { let nexus = &apictx.nexus; @@ -1318,10 +1311,8 @@ async fn project_ip_pool_list( let scan_params = ScanByNameOrId::from_query(&query)?; let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let project_lookup = - nexus.project_lookup(&opctx, scan_params.selector.clone())?; let pools = nexus - .project_ip_pools_list(&opctx, &project_lookup, &paginated_by) + .silo_ip_pools_list(&opctx, &paginated_by) .await? .into_iter() .map(IpPool::from) @@ -1344,28 +1335,13 @@ async fn project_ip_pool_list( async fn project_ip_pool_view( rqctx: RequestContext>, path_params: Path, - project: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let nexus = &apictx.nexus; let pool_selector = path_params.into_inner().pool; - let project_lookup = if let Some(project) = project.into_inner().project - { - Some(nexus.project_lookup(&opctx, ProjectSelector { project })?) - } else { - None - }; - let (authz_pool, pool) = nexus - .project_ip_pool_lookup(&opctx, &pool_selector, &project_lookup)? - .fetch() - .await?; - // TODO(2148): once we've actualy implemented filtering to pools belonging to - // the specified project, we can remove this internal check. - if pool.silo_id == Some(*INTERNAL_SILO_ID) { - return Err(authz_pool.not_found().into()); - } + let pool = nexus.silo_ip_pool_fetch(&opctx, &pool_selector).await?; Ok(HttpResponseOk(IpPool::from(pool))) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await @@ -1445,6 +1421,8 @@ async fn ip_pool_view( let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let nexus = &apictx.nexus; let pool_selector = path_params.into_inner().pool; + // We do not prevent the service pool from being fetched by name or ID + // like we do for update, delete, associate. let (.., pool) = nexus.ip_pool_lookup(&opctx, &pool_selector)?.fetch().await?; Ok(HttpResponseOk(IpPool::from(pool))) @@ -1498,6 +1476,128 @@ async fn ip_pool_update( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } +/// List an IP pool's linked silos +#[endpoint { + method = GET, + path = "/v1/system/ip-pools/{pool}/silos", + tags = ["system/networking"], +}] +async fn ip_pool_silo_list( + rqctx: RequestContext>, + path_params: Path, + // paginating by resource_id because they're unique per pool. most robust + // option would be to paginate by a composite key representing the (pool, + // resource_type, resource) + query_params: Query, +) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.nexus; + + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + + let path = path_params.into_inner(); + let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; + + let assocs = nexus + .ip_pool_silo_list(&opctx, &pool_lookup, &pag_params) + .await? + .into_iter() + .map(|assoc| assoc.into()) + .collect(); + + Ok(HttpResponseOk(ScanById::results_page( + &query, + assocs, + &|_, x: &views::IpPoolSilo| x.silo_id, + )?)) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Make an IP pool available within a silo +#[endpoint { + method = POST, + path = "/v1/system/ip-pools/{pool}/silos", + tags = ["system/networking"], +}] +async fn ip_pool_silo_link( + rqctx: RequestContext>, + path_params: Path, + resource_assoc: TypedBody, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + let resource_assoc = resource_assoc.into_inner(); + let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; + let assoc = nexus + .ip_pool_link_silo(&opctx, &pool_lookup, &resource_assoc) + .await?; + Ok(HttpResponseCreated(assoc.into())) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Unlink an IP pool from a silo +/// +/// Will fail if there are any outstanding IPs allocated in the silo. +#[endpoint { + method = DELETE, + path = "/v1/system/ip-pools/{pool}/silos/{silo}", + tags = ["system/networking"], +}] +async fn ip_pool_silo_unlink( + rqctx: RequestContext>, + path_params: Path, +) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; + let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; + nexus.ip_pool_unlink_silo(&opctx, &pool_lookup, &silo_lookup).await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Make an IP pool default or not-default for a silo +/// +/// When a pool is made default for a silo, any existing default will remain +/// linked to the silo, but will no longer be the default. +#[endpoint { + method = PUT, + path = "/v1/system/ip-pools/{pool}/silos/{silo}", + tags = ["system/networking"], +}] +async fn ip_pool_silo_update( + rqctx: RequestContext>, + path_params: Path, + update: TypedBody, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + let update = update.into_inner(); + let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; + let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; + let assoc = nexus + .ip_pool_silo_update(&opctx, &pool_lookup, &silo_lookup, &update) + .await?; + Ok(HttpResponseOk(assoc.into())) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + /// Fetch the IP pool used for Oxide services #[endpoint { method = GET, @@ -4660,7 +4760,7 @@ async fn rack_view( async fn sled_list_uninitialized( rqctx: RequestContext>, query: Query>, -) -> Result>, HttpError> { +) -> Result>, HttpError> { let apictx = rqctx.context(); // We don't actually support real pagination let pag_params = query.into_inner(); @@ -4691,7 +4791,7 @@ async fn sled_list_uninitialized( }] async fn sled_add( rqctx: RequestContext>, - sled: TypedBody, + sled: TypedBody, ) -> Result { let apictx = rqctx.context(); let nexus = &apictx.nexus; @@ -4805,7 +4905,7 @@ async fn sled_instance_list( rqctx: RequestContext>, path_params: Path, query_params: Query, -) -> Result>, HttpError> { +) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let nexus = &apictx.nexus; @@ -4826,7 +4926,7 @@ async fn sled_instance_list( Ok(HttpResponseOk(ScanById::results_page( &query, sled_instances, - &|_, sled_instance: &SledInstance| sled_instance.identity.id, + &|_, sled_instance: &views::SledInstance| sled_instance.identity.id, )?)) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await @@ -4875,7 +4975,7 @@ async fn physical_disk_list( async fn switch_list( rqctx: RequestContext>, query_params: Query, -) -> Result>, HttpError> { +) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let nexus = &apictx.nexus; @@ -4890,7 +4990,7 @@ async fn switch_list( Ok(HttpResponseOk(ScanById::results_page( &query, switches, - &|_, switch: &Switch| switch.identity.id, + &|_, switch: &views::Switch| switch.identity.id, )?)) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await @@ -4905,7 +5005,7 @@ async fn switch_list( async fn switch_view( rqctx: RequestContext>, path_params: Path, -) -> Result, HttpError> { +) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let nexus = &apictx.nexus; diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index c72c7ad780..c2516a1509 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -12,6 +12,7 @@ use dropshot::test_util::ClientTestContext; use dropshot::HttpErrorResponseBody; use dropshot::Method; use http::StatusCode; +use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO; use nexus_test_interface::NexusServer; use nexus_types::external_api::params; use nexus_types::external_api::params::PhysicalDiskKind; @@ -26,6 +27,7 @@ use nexus_types::external_api::views::IpPool; use nexus_types::external_api::views::IpPoolRange; use nexus_types::external_api::views::User; use nexus_types::external_api::views::{Project, Silo, Vpc, VpcRouter}; +use nexus_types::identity::Resource; use nexus_types::internal_api::params as internal_params; use nexus_types::internal_api::params::Baseboard; use omicron_common::api::external::ByteCount; @@ -55,6 +57,41 @@ where .unwrap() } +pub async fn object_get( + client: &ClientTestContext, + path: &str, +) -> OutputType +where + OutputType: serde::de::DeserializeOwned, +{ + NexusRequest::object_get(client, path) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap_or_else(|e| { + panic!("failed to make \"GET\" request to {path}: {e}") + }) + .parsed_body() + .unwrap() +} + +pub async fn object_get_error( + client: &ClientTestContext, + path: &str, + status: StatusCode, +) -> HttpErrorResponseBody { + NexusRequest::new( + RequestBuilder::new(client, Method::GET, path) + .expect_status(Some(status)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body::() + .unwrap() +} + pub async fn object_create( client: &ClientTestContext, path: &str, @@ -68,13 +105,36 @@ where .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .unwrap_or_else(|_| { - panic!("failed to make \"create\" request to {path}") + .unwrap_or_else(|e| { + panic!("failed to make \"POST\" request to {path}: {e}") }) .parsed_body() .unwrap() } +/// Make a POST, assert status code, return error response body +pub async fn object_create_error( + client: &ClientTestContext, + path: &str, + input: &InputType, + status: StatusCode, +) -> HttpErrorResponseBody +where + InputType: serde::Serialize, +{ + NexusRequest::new( + RequestBuilder::new(client, Method::POST, path) + .body(Some(&input)) + .expect_status(Some(status)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body::() + .unwrap() +} + pub async fn object_put( client: &ClientTestContext, path: &str, @@ -88,41 +148,60 @@ where .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .unwrap_or_else(|_| panic!("failed to make \"PUT\" request to {path}")) + .unwrap_or_else(|e| { + panic!("failed to make \"PUT\" request to {path}: {e}") + }) .parsed_body() .unwrap() } +pub async fn object_put_error( + client: &ClientTestContext, + path: &str, + input: &InputType, + status: StatusCode, +) -> HttpErrorResponseBody +where + InputType: serde::Serialize, +{ + NexusRequest::new( + RequestBuilder::new(client, Method::PUT, path) + .body(Some(&input)) + .expect_status(Some(status)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body::() + .unwrap() +} + pub async fn object_delete(client: &ClientTestContext, path: &str) { NexusRequest::object_delete(client, path) .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .unwrap_or_else(|_| { - panic!("failed to make \"delete\" request to {path}") + .unwrap_or_else(|e| { + panic!("failed to make \"DELETE\" request to {path}: {e}") }); } -pub async fn populate_ip_pool( +pub async fn object_delete_error( client: &ClientTestContext, - pool_name: &str, - ip_range: Option, -) -> IpPoolRange { - let ip_range = ip_range.unwrap_or_else(|| { - use std::net::Ipv4Addr; - IpRange::try_from(( - Ipv4Addr::new(10, 0, 0, 0), - Ipv4Addr::new(10, 0, 255, 255), - )) - .unwrap() - }); - let range = object_create( - client, - format!("/v1/system/ip-pools/{}/ranges/add", pool_name).as_str(), - &ip_range, + path: &str, + status: StatusCode, +) -> HttpErrorResponseBody { + NexusRequest::new( + RequestBuilder::new(client, Method::DELETE, path) + .expect_status(Some(status)), ) - .await; - range + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body::() + .unwrap() } /// Create an IP pool with a single range for testing. @@ -134,7 +213,6 @@ pub async fn create_ip_pool( client: &ClientTestContext, pool_name: &str, ip_range: Option, - silo: Option, ) -> (IpPool, IpPoolRange) { let pool = object_create( client, @@ -144,15 +222,47 @@ pub async fn create_ip_pool( name: pool_name.parse().unwrap(), description: String::from("an ip pool"), }, - silo: silo.map(|id| NameOrId::Id(id)), - is_default: false, }, ) .await; - let range = populate_ip_pool(client, pool_name, ip_range).await; + + let ip_range = ip_range.unwrap_or_else(|| { + use std::net::Ipv4Addr; + IpRange::try_from(( + Ipv4Addr::new(10, 0, 0, 0), + Ipv4Addr::new(10, 0, 255, 255), + )) + .unwrap() + }); + let url = format!("/v1/system/ip-pools/{}/ranges/add", pool_name); + let range = object_create(client, &url, &ip_range).await; (pool, range) } +pub async fn link_ip_pool( + client: &ClientTestContext, + pool_name: &str, + silo_id: &Uuid, + is_default: bool, +) { + let link = + params::IpPoolSiloLink { silo: NameOrId::Id(*silo_id), is_default }; + let url = format!("/v1/system/ip-pools/{pool_name}/silos"); + object_create::( + client, &url, &link, + ) + .await; +} + +/// What you want for any test that is not testing IP logic specifically +pub async fn create_default_ip_pool( + client: &ClientTestContext, +) -> views::IpPool { + let (pool, ..) = create_ip_pool(&client, "default", None).await; + link_ip_pool(&client, "default", &DEFAULT_SILO.id(), true).await; + pool +} + pub async fn create_floating_ip( client: &ClientTestContext, fip_name: &str, diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index a7c9c99509..b9023a8212 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -18,12 +18,12 @@ use nexus_test_utils::http_testing::Collection; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::identity_eq; +use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_disk; use nexus_test_utils::resource_helpers::create_instance; use nexus_test_utils::resource_helpers::create_instance_with; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::objects_list_page_authz; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; @@ -95,8 +95,8 @@ fn get_disk_detach_url(instance: &NameOrId) -> String { } } -async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - populate_ip_pool(&client, "default", None).await; +async fn create_project_and_pool(client: &ClientTestContext) -> Uuid { + create_default_ip_pool(client).await; let project = create_project(client, PROJECT_NAME).await; project.identity.id } @@ -107,7 +107,7 @@ async fn test_disk_not_found_before_creation( ) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); // List disks. There aren't any yet. @@ -186,7 +186,7 @@ async fn test_disk_create_attach_detach_delete( ) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - let project_id = create_org_and_project(client).await; + let project_id = create_project_and_pool(client).await; let nexus = &cptestctx.server.apictx().nexus; let disks_url = get_disks_url(); @@ -315,7 +315,7 @@ async fn test_disk_create_disk_that_already_exists_fails( ) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); // Create a disk. @@ -360,7 +360,7 @@ async fn test_disk_create_disk_that_already_exists_fails( async fn test_disk_slot_assignment(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let nexus = &cptestctx.server.apictx().nexus; let disk_names = ["a", "b", "c", "d"]; @@ -467,7 +467,7 @@ async fn test_disk_move_between_instances(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; let disks_url = get_disks_url(); // Create a disk. @@ -670,7 +670,7 @@ async fn test_disk_creation_region_requested_then_started( ) { let client = &cptestctx.external_client; let test = DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Before we create a disk, set the response from the Crucible Agent: // no matter what regions get requested, they'll always *start* as @@ -689,7 +689,7 @@ async fn test_disk_region_creation_failure( ) { let client = &cptestctx.external_client; let test = DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Before we create a disk, set the response from the Crucible Agent: // no matter what regions get requested, they'll always fail. @@ -745,7 +745,7 @@ async fn test_disk_invalid_block_size_rejected( ) { let client = &cptestctx.external_client; let _test = DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Attempt to allocate the disk, observe a server error. let disk_size = ByteCount::from_gibibytes_u32(3); @@ -788,7 +788,7 @@ async fn test_disk_reject_total_size_not_divisible_by_block_size( ) { let client = &cptestctx.external_client; let _test = DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Attempt to allocate the disk, observe a server error. let disk_size = ByteCount::from(3 * 1024 * 1024 * 1024 + 256); @@ -829,7 +829,7 @@ async fn test_disk_reject_total_size_less_than_min_disk_size_bytes( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disk_size = ByteCount::from(MIN_DISK_SIZE_BYTES / 2); @@ -871,7 +871,7 @@ async fn test_disk_reject_total_size_greater_than_max_disk_size_bytes( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disk_size = ByteCount::try_from(MAX_DISK_SIZE_BYTES + (1 << 30)).unwrap(); @@ -916,7 +916,7 @@ async fn test_disk_reject_total_size_not_divisible_by_min_disk_size( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disk_size = ByteCount::from(1024 * 1024 * 1024 + 512); @@ -971,7 +971,7 @@ async fn test_disk_backed_by_multiple_region_sets( test.add_zpool_with_dataset(cptestctx, 10).await; test.add_zpool_with_dataset(cptestctx, 10).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Ask for a 20 gibibyte disk. let disk_size = ByteCount::from_gibibytes_u32(20); @@ -1004,7 +1004,7 @@ async fn test_disk_backed_by_multiple_region_sets( async fn test_disk_too_big(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Assert default is still 10 GiB assert_eq!(10, DiskTest::DEFAULT_ZPOOL_SIZE_GIB); @@ -1044,7 +1044,7 @@ async fn test_disk_virtual_provisioning_collection( let _test = DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(client).await; let project_id1 = create_project(client, PROJECT_NAME).await.identity.id; let project_id2 = create_project(client, PROJECT_NAME_2).await.identity.id; @@ -1252,8 +1252,7 @@ async fn test_disk_virtual_provisioning_collection_failed_delete( let disk_test = DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - let project_id1 = create_project(client, PROJECT_NAME).await.identity.id; + let project_id1 = create_project_and_pool(client).await; let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -1393,7 +1392,6 @@ async fn test_phantom_disk_rename(cptestctx: &ControlPlaneTestContext) { let _disk_test = DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; let _project_id1 = create_project(client, PROJECT_NAME).await.identity.id; // Create a 1 GB disk @@ -1519,7 +1517,7 @@ async fn test_disk_size_accounting(cptestctx: &ControlPlaneTestContext) { // Assert default is still 10 GiB assert_eq!(10, DiskTest::DEFAULT_ZPOOL_SIZE_GIB); - create_org_and_project(client).await; + create_project_and_pool(client).await; // Total occupied size should start at 0 for zpool in &test.zpools { @@ -1688,7 +1686,7 @@ async fn test_multiple_disks_multiple_zpools( test.add_zpool_with_dataset(cptestctx, 10).await; test.add_zpool_with_dataset(cptestctx, 10).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Ask for a 10 gibibyte disk, this should succeed let disk_size = ByteCount::from_gibibytes_u32(10); @@ -1765,7 +1763,7 @@ async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { let oximeter = &cptestctx.oximeter; let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - let project_id = create_org_and_project(client).await; + let project_id = create_project_and_pool(client).await; let disk = create_disk(&client, PROJECT_NAME, DISK_NAME).await; oximeter.force_collect().await; @@ -1838,7 +1836,7 @@ async fn test_disk_metrics_paginated(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk(&client, PROJECT_NAME, DISK_NAME).await; create_instance_with_disk(client).await; @@ -1900,7 +1898,7 @@ async fn test_disk_metrics_paginated(cptestctx: &ControlPlaneTestContext) { async fn test_disk_create_for_importing(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); let new_disk = params::DiskCreate { @@ -1943,7 +1941,7 @@ async fn test_project_delete_disk_no_auth_idempotent( ) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Create a disk let disks_url = get_disks_url(); diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 8708083124..b7b838ca50 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -599,7 +599,7 @@ pub static DEMO_IMAGE_CREATE: Lazy = // IP Pools pub static DEMO_IP_POOLS_PROJ_URL: Lazy = - Lazy::new(|| format!("/v1/ip-pools?project={}", *DEMO_PROJECT_NAME)); + Lazy::new(|| "/v1/ip-pools".to_string()); pub const DEMO_IP_POOLS_URL: &'static str = "/v1/system/ip-pools"; pub static DEMO_IP_POOL_NAME: Lazy = Lazy::new(|| "default".parse().unwrap()); @@ -609,8 +609,6 @@ pub static DEMO_IP_POOL_CREATE: Lazy = name: DEMO_IP_POOL_NAME.clone(), description: String::from("an IP pool"), }, - silo: None, - is_default: true, }); pub static DEMO_IP_POOL_PROJ_URL: Lazy = Lazy::new(|| { format!( @@ -627,6 +625,19 @@ pub static DEMO_IP_POOL_UPDATE: Lazy = description: Some(String::from("a new IP pool")), }, }); +pub static DEMO_IP_POOL_SILOS_URL: Lazy = + Lazy::new(|| format!("{}/silos", *DEMO_IP_POOL_URL)); +pub static DEMO_IP_POOL_SILOS_BODY: Lazy = + Lazy::new(|| params::IpPoolSiloLink { + silo: NameOrId::Id(DEFAULT_SILO.identity().id), + is_default: true, // necessary for demo instance create to go through + }); + +pub static DEMO_IP_POOL_SILO_URL: Lazy = + Lazy::new(|| format!("{}/silos/{}", *DEMO_IP_POOL_URL, *DEMO_SILO_NAME)); +pub static DEMO_IP_POOL_SILO_UPDATE_BODY: Lazy = + Lazy::new(|| params::IpPoolSiloUpdate { is_default: false }); + pub static DEMO_IP_POOL_RANGE: Lazy = Lazy::new(|| { IpRange::V4( Ipv4Range::new( @@ -980,6 +991,26 @@ pub static VERIFY_ENDPOINTS: Lazy> = Lazy::new(|| { ], }, + // IP pool silos endpoint + VerifyEndpoint { + url: &DEMO_IP_POOL_SILOS_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post(serde_json::to_value(&*DEMO_IP_POOL_SILOS_BODY).unwrap()), + ], + }, + VerifyEndpoint { + url: &DEMO_IP_POOL_SILO_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Delete, + AllowedMethod::Put(serde_json::to_value(&*DEMO_IP_POOL_SILO_UPDATE_BODY).unwrap()), + ], + }, + // IP Pool ranges endpoint VerifyEndpoint { url: &DEMO_IP_POOL_RANGES_URL, diff --git a/nexus/tests/integration_tests/external_ips.rs b/nexus/tests/integration_tests/external_ips.rs index daec8e2064..3b6127ceb1 100644 --- a/nexus/tests/integration_tests/external_ips.rs +++ b/nexus/tests/integration_tests/external_ips.rs @@ -12,19 +12,26 @@ use dropshot::test_util::ClientTestContext; use dropshot::HttpErrorResponseBody; use http::Method; use http::StatusCode; +use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; +use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_floating_ip; use nexus_test_utils::resource_helpers::create_instance_with; use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::create_silo; -use nexus_test_utils::resource_helpers::populate_ip_pool; +use nexus_test_utils::resource_helpers::link_ip_pool; +use nexus_test_utils::resource_helpers::object_create; +use nexus_test_utils::resource_helpers::object_create_error; +use nexus_test_utils::resource_helpers::object_delete; +use nexus_test_utils::resource_helpers::object_delete_error; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; use nexus_types::external_api::shared; use nexus_types::external_api::views::FloatingIp; +use nexus_types::identity::Resource; use omicron_common::address::IpRange; use omicron_common::address::Ipv4Range; use omicron_common::api::external::IdentityMetadataCreateParams; @@ -59,7 +66,7 @@ pub fn get_floating_ip_by_id_url(fip_id: &Uuid) -> String { async fn test_floating_ip_access(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(client, PROJECT_NAME).await; // Create a floating IP from the default pool. @@ -106,12 +113,15 @@ async fn test_floating_ip_access(cptestctx: &ControlPlaneTestContext) { async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; + // automatically linked to current silo + create_default_ip_pool(&client).await; + let other_pool_range = IpRange::V4( Ipv4Range::new(Ipv4Addr::new(10, 1, 0, 1), Ipv4Addr::new(10, 1, 0, 5)) .unwrap(), ); - create_ip_pool(&client, "other-pool", Some(other_pool_range), None).await; + // not automatically linked to currently silo. see below + create_ip_pool(&client, "other-pool", Some(other_pool_range)).await; let project = create_project(client, PROJECT_NAME).await; @@ -146,16 +156,27 @@ async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { assert_eq!(fip.instance_id, None); assert_eq!(fip.ip, ip_addr); - // Create with no chosen IP from fleet-scoped named pool. + // Creating with other-pool fails with 404 until it is linked to the current silo let fip_name = FIP_NAMES[2]; - let fip = create_floating_ip( - client, - fip_name, - project.identity.name.as_str(), - None, - Some("other-pool"), - ) - .await; + let params = params::FloatingIpCreate { + identity: IdentityMetadataCreateParams { + name: fip_name.parse().unwrap(), + description: String::from("a floating ip"), + }, + address: None, + pool: Some(NameOrId::Name("other-pool".parse().unwrap())), + }; + let url = format!("/v1/floating-ips?project={}", project.identity.name); + let error = + object_create_error(client, &url, ¶ms, StatusCode::NOT_FOUND).await; + assert_eq!(error.message, "not found: ip-pool with name \"other-pool\""); + + // now link the pool and everything should work with the exact same params + let silo_id = DEFAULT_SILO.id(); + link_ip_pool(&client, "other-pool", &silo_id, false).await; + + // Create with no chosen IP from named pool. + let fip: FloatingIp = object_create(client, &url, ¶ms).await; assert_eq!(fip.identity.name.as_str(), fip_name); assert_eq!(fip.project_id, project.identity.id); assert_eq!(fip.instance_id, None); @@ -184,8 +205,6 @@ async fn test_floating_ip_create_fails_in_other_silo_pool( ) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; - let project = create_project(client, PROJECT_NAME).await; // Create other silo and pool linked to that silo @@ -200,13 +219,8 @@ async fn test_floating_ip_create_fails_in_other_silo_pool( Ipv4Range::new(Ipv4Addr::new(10, 2, 0, 1), Ipv4Addr::new(10, 2, 0, 5)) .unwrap(), ); - create_ip_pool( - &client, - "external-silo-pool", - Some(other_pool_range), - Some(other_silo.identity.id), - ) - .await; + create_ip_pool(&client, "external-silo-pool", Some(other_pool_range)).await; + // don't link pool to silo yet let fip_name = FIP_NAMES[4]; @@ -223,14 +237,19 @@ async fn test_floating_ip_create_fails_in_other_silo_pool( pool: Some(NameOrId::Name("external-silo-pool".parse().unwrap())), }; - let error = NexusRequest::new( - RequestBuilder::new(client, Method::POST, &url) - .body(Some(&body)) - .expect_status(Some(StatusCode::NOT_FOUND)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute_and_parse_unwrap::() - .await; + let error = + object_create_error(client, &url, &body, StatusCode::NOT_FOUND).await; + assert_eq!( + error.message, + "not found: ip-pool with name \"external-silo-pool\"" + ); + + // error is the same after linking the pool to the other silo + link_ip_pool(&client, "external-silo-pool", &other_silo.identity.id, false) + .await; + + let error = + object_create_error(client, &url, &body, StatusCode::NOT_FOUND).await; assert_eq!( error.message, "not found: ip-pool with name \"external-silo-pool\"" @@ -243,7 +262,7 @@ async fn test_floating_ip_create_ip_in_use( ) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(client, PROJECT_NAME).await; let contested_ip = "10.0.0.0".parse().unwrap(); @@ -291,7 +310,7 @@ async fn test_floating_ip_create_name_in_use( ) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(client, PROJECT_NAME).await; let contested_name = FIP_NAMES[0]; @@ -340,7 +359,7 @@ async fn test_floating_ip_create_name_in_use( async fn test_floating_ip_delete(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(client, PROJECT_NAME).await; let fip = create_floating_ip( @@ -352,15 +371,24 @@ async fn test_floating_ip_delete(cptestctx: &ControlPlaneTestContext) { ) .await; + // unlink fails because there are outstanding IPs + let silo_id = DEFAULT_SILO.id(); + let silo_link_url = + format!("/v1/system/ip-pools/default/silos/{}", silo_id); + let error = + object_delete_error(client, &silo_link_url, StatusCode::BAD_REQUEST) + .await; + assert_eq!( + error.message, + "IP addresses from this pool are in use in the linked silo" + ); + // Delete the floating IP. - NexusRequest::object_delete( - client, - &get_floating_ip_by_id_url(&fip.identity.id), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap(); + let floating_ip_url = get_floating_ip_by_id_url(&fip.identity.id); + object_delete(client, &floating_ip_url).await; + + // now unlink works + object_delete(client, &silo_link_url).await; } #[nexus_test] @@ -369,7 +397,7 @@ async fn test_floating_ip_attachment(cptestctx: &ControlPlaneTestContext) { let apictx = &cptestctx.server.apictx(); let nexus = &apictx.nexus; - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(client, PROJECT_NAME).await; let fip = create_floating_ip( diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 44b65fa67b..99ef165188 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -20,15 +20,20 @@ use nexus_test_interface::NexusServer; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; +use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_disk; use nexus_test_utils::resource_helpers::create_floating_ip; use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_local_user; use nexus_test_utils::resource_helpers::create_silo; use nexus_test_utils::resource_helpers::grant_iam; +use nexus_test_utils::resource_helpers::link_ip_pool; use nexus_test_utils::resource_helpers::object_create; +use nexus_test_utils::resource_helpers::object_create_error; +use nexus_test_utils::resource_helpers::object_delete; +use nexus_test_utils::resource_helpers::object_delete_error; +use nexus_test_utils::resource_helpers::object_put; use nexus_test_utils::resource_helpers::objects_list_page_authz; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils::start_sled_agent; use nexus_types::external_api::shared::IpKind; @@ -102,10 +107,9 @@ fn default_vpc_subnets_url() -> String { format!("/v1/vpc-subnets?{}&vpc=default", get_project_selector()) } -async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - populate_ip_pool(&client, "default", None).await; - let project = create_project(client, PROJECT_NAME).await; - project.identity.id +async fn create_project_and_pool(client: &ClientTestContext) -> views::Project { + create_default_ip_pool(client).await; + create_project(client, PROJECT_NAME).await } #[nexus_test] @@ -163,8 +167,7 @@ async fn test_instances_access_before_create_returns_not_found( async fn test_instance_access(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; - let project = create_project(client, PROJECT_NAME).await; + let project = create_project_and_pool(client).await; // Create an instance. let instance_name = "test-instance"; @@ -212,7 +215,7 @@ async fn test_instances_create_reboot_halt( let nexus = &apictx.nexus; let instance_name = "just-rainsticks"; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Create an instance. let instance_url = get_instance_url(instance_name); @@ -538,7 +541,7 @@ async fn test_instance_start_creates_networking_state( ); } - create_org_and_project(&client).await; + create_project_and_pool(&client).await; let instance_url = get_instance_url(instance_name); let instance = create_instance(client, PROJECT_NAME, instance_name).await; @@ -639,7 +642,7 @@ async fn test_instance_migrate(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); - create_org_and_project(&client).await; + create_project_and_pool(&client).await; let instance_url = get_instance_url(instance_name); // Explicitly create an instance with no disks. Simulated sled agent assumes @@ -743,8 +746,8 @@ async fn test_instance_migrate_v2p(cptestctx: &ControlPlaneTestContext) { } // Set up the project and test instance. - populate_ip_pool(&client, "default", None).await; - create_project(client, PROJECT_NAME).await; + create_project_and_pool(client).await; + let instance = nexus_test_utils::resource_helpers::create_instance_with( client, PROJECT_NAME, @@ -879,7 +882,7 @@ async fn test_instance_failed_after_sled_agent_error( let instance_name = "losing-is-fun"; // Create and start the test instance. - create_org_and_project(&client).await; + create_project_and_pool(&client).await; let instance_url = get_instance_url(instance_name); let instance = create_instance(client, PROJECT_NAME, instance_name).await; instance_simulate(nexus, &instance.identity.id).await; @@ -1010,8 +1013,8 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { let datastore = nexus.datastore(); // Create an IP pool and project that we'll use for testing. - populate_ip_pool(&client, "default", None).await; - let project_id = create_project(&client, PROJECT_NAME).await.identity.id; + let project = create_project_and_pool(&client).await; + let project_id = project.identity.id; // Query the view of these metrics stored within CRDB let opctx = @@ -1101,7 +1104,8 @@ async fn test_instance_metrics_with_migration( .await .unwrap(); - let project_id = create_org_and_project(&client).await; + let project = create_project_and_pool(&client).await; + let project_id = project.identity.id; let instance_url = get_instance_url(instance_name); // Explicitly create an instance with no disks. Simulated sled agent assumes @@ -1210,7 +1214,7 @@ async fn test_instances_create_stopped_start( let nexus = &apictx.nexus; let instance_name = "just-rainsticks"; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Create an instance in a stopped state. let instance: Instance = object_create( @@ -1260,7 +1264,7 @@ async fn test_instances_delete_fails_when_running_succeeds_when_stopped( let nexus = &apictx.nexus; let instance_name = "just-rainsticks"; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Create an instance. let instance_url = get_instance_url(instance_name); @@ -1356,7 +1360,7 @@ async fn test_instance_using_image_from_other_project_fails( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Create an image in springfield-squidport. let images_url = format!("/v1/images?project={}", PROJECT_NAME); @@ -1437,7 +1441,7 @@ async fn test_instance_create_saga_removes_instance_database_record( let client = &cptestctx.external_client; // Create test IP pool, organization and project - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // The network interface parameters. let default_name = "default".parse::().unwrap(); @@ -1552,7 +1556,7 @@ async fn test_instance_with_single_explicit_ip_address( ) { let client = &cptestctx.external_client; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Create the parameters for the interface. let default_name = "default".parse::().unwrap(); @@ -1626,7 +1630,7 @@ async fn test_instance_with_new_custom_network_interfaces( ) { let client = &cptestctx.external_client; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Create a VPC Subnet other than the default. // // We'll create one interface in the default VPC Subnet and one in this new @@ -1776,7 +1780,7 @@ async fn test_instance_create_delete_network_interface( let nexus = &cptestctx.server.apictx().nexus; let instance_name = "nic-attach-test-inst"; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Create the VPC Subnet for the secondary interface let secondary_subnet = params::VpcSubnetCreate { @@ -2016,7 +2020,7 @@ async fn test_instance_update_network_interfaces( let nexus = &cptestctx.server.apictx().nexus; let instance_name = "nic-update-test-inst"; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Create the VPC Subnet for the secondary interface let secondary_subnet = params::VpcSubnetCreate { @@ -2480,7 +2484,7 @@ async fn test_attach_one_disk_to_instance(cptestctx: &ControlPlaneTestContext) { // Test pre-reqs DiskTest::new(&cptestctx).await; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Create the "probablydata" disk create_disk(&client, PROJECT_NAME, "probablydata").await; @@ -2550,7 +2554,7 @@ async fn test_instance_create_attach_disks( // Test pre-reqs DiskTest::new(&cptestctx).await; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; let attachable_disk = create_disk(&client, PROJECT_NAME, "attachable-disk").await; @@ -2624,7 +2628,7 @@ async fn test_instance_create_attach_disks_undo( // Test pre-reqs DiskTest::new(&cptestctx).await; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; let regular_disk = create_disk(&client, PROJECT_NAME, "a-reg-disk").await; let faulted_disk = create_disk(&client, PROJECT_NAME, "faulted-disk").await; @@ -2717,7 +2721,7 @@ async fn test_attach_eight_disks_to_instance( // Test pre-reqs DiskTest::new(&cptestctx).await; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Make 8 disks for i in 0..8 { @@ -2870,7 +2874,7 @@ async fn test_cannot_attach_faulted_disks(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; // Test pre-reqs DiskTest::new(&cptestctx).await; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Make 8 disks for i in 0..8 { @@ -2970,7 +2974,7 @@ async fn test_disks_detached_when_instance_destroyed( // Test pre-reqs DiskTest::new(&cptestctx).await; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Make 8 disks for i in 0..8 { @@ -3136,7 +3140,7 @@ async fn test_instances_memory_rejected_less_than_min_memory_size( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Attempt to create the instance, observe a server error. let instance_name = "just-rainsticks"; @@ -3185,7 +3189,7 @@ async fn test_instances_memory_not_divisible_by_min_memory_size( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Attempt to create the instance, observe a server error. let instance_name = "just-rainsticks"; @@ -3233,7 +3237,7 @@ async fn test_instances_memory_greater_than_max_size( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Attempt to create the instance, observe a server error. let instance_name = "just-rainsticks"; @@ -3329,8 +3333,7 @@ async fn test_cannot_provision_instance_beyond_cpu_capacity( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_project(client, PROJECT_NAME).await; - populate_ip_pool(&client, "default", None).await; + create_project_and_pool(client).await; // The third item in each tuple specifies whether instance start should // succeed or fail if all these configs are visited in order and started in @@ -3396,8 +3399,7 @@ async fn test_cannot_provision_instance_beyond_cpu_limit( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_project(client, PROJECT_NAME).await; - populate_ip_pool(&client, "default", None).await; + create_project_and_pool(client).await; let too_many_cpus = InstanceCpuCount::try_from(i64::from(MAX_VCPU_PER_INSTANCE + 1)) @@ -3438,8 +3440,7 @@ async fn test_cannot_provision_instance_beyond_ram_capacity( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_project(client, PROJECT_NAME).await; - populate_ip_pool(&client, "default", None).await; + create_project_and_pool(client).await; let configs = vec![ ( @@ -3511,7 +3512,7 @@ async fn test_instance_serial(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); - create_org_and_project(&client).await; + create_project_and_pool(&client).await; let instance_url = get_instance_url(instance_name); // Make sure we get a 404 if we try to access the serial console before creation. @@ -3628,89 +3629,244 @@ async fn test_instance_ephemeral_ip_from_correct_pool( // // The first is given to the "default" pool, the provided to a distinct // explicit pool. - let default_pool_range = IpRange::V4( + let range1 = IpRange::V4( Ipv4Range::new( std::net::Ipv4Addr::new(10, 0, 0, 1), std::net::Ipv4Addr::new(10, 0, 0, 5), ) .unwrap(), ); - let other_pool_range = IpRange::V4( + let range2 = IpRange::V4( Ipv4Range::new( std::net::Ipv4Addr::new(10, 1, 0, 1), std::net::Ipv4Addr::new(10, 1, 0, 5), ) .unwrap(), ); - populate_ip_pool(&client, "default", Some(default_pool_range)).await; - create_ip_pool(&client, "other-pool", Some(other_pool_range), None).await; + + // make first pool the default for the priv user's silo + create_ip_pool(&client, "pool1", Some(range1)).await; + link_ip_pool(&client, "pool1", &DEFAULT_SILO.id(), /*default*/ true).await; + + // second pool is associated with the silo but not default + create_ip_pool(&client, "pool2", Some(range2)).await; + link_ip_pool(&client, "pool2", &DEFAULT_SILO.id(), /*default*/ false).await; // Create an instance with pool name blank, expect IP from default pool - create_instance_with_pool(client, "default-pool-inst", None).await; + create_instance_with_pool(client, "pool1-inst", None).await; - let ip = fetch_instance_ephemeral_ip(client, "default-pool-inst").await; + let ip = fetch_instance_ephemeral_ip(client, "pool1-inst").await; assert!( - ip.ip >= default_pool_range.first_address() - && ip.ip <= default_pool_range.last_address(), - "Expected ephemeral IP to come from default pool" + ip.ip >= range1.first_address() && ip.ip <= range1.last_address(), + "Expected ephemeral IP to come from pool1" ); - // Create an instance explicitly using the "other-pool". - create_instance_with_pool(client, "other-pool-inst", Some("other-pool")) - .await; - let ip = fetch_instance_ephemeral_ip(client, "other-pool-inst").await; + // Create an instance explicitly using the non-default "other-pool". + create_instance_with_pool(client, "pool2-inst", Some("pool2")).await; + let ip = fetch_instance_ephemeral_ip(client, "pool2-inst").await; assert!( - ip.ip >= other_pool_range.first_address() - && ip.ip <= other_pool_range.last_address(), - "Expected ephemeral IP to come from other pool" + ip.ip >= range2.first_address() && ip.ip <= range2.last_address(), + "Expected ephemeral IP to come from pool2" ); - // now create a third pool, a silo default, to confirm it gets used. not - // using create_ip_pool because we need to specify a silo and default: true - let pool_name = "silo-pool"; - let _silo_pool: views::IpPool = object_create( + // make pool2 default and create instance with default pool. check that it now it comes from pool2 + let _: views::IpPoolSilo = object_put( client, - "/v1/system/ip-pools", - ¶ms::IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: pool_name.parse().unwrap(), - description: String::from("an ip pool"), - }, - silo: Some(NameOrId::Id(DEFAULT_SILO.id())), - is_default: true, + &format!("/v1/system/ip-pools/pool2/silos/{}", DEFAULT_SILO.id()), + ¶ms::IpPoolSiloUpdate { is_default: true }, + ) + .await; + + create_instance_with_pool(client, "pool2-inst2", None).await; + let ip = fetch_instance_ephemeral_ip(client, "pool2-inst2").await; + assert!( + ip.ip >= range2.first_address() && ip.ip <= range2.last_address(), + "Expected ephemeral IP to come from pool2" + ); + + // try to delete association with pool1, but it fails because there is an + // instance with an IP from the pool in this silo + let pool1_silo_url = + format!("/v1/system/ip-pools/pool1/silos/{}", DEFAULT_SILO.id()); + let error = + object_delete_error(client, &pool1_silo_url, StatusCode::BAD_REQUEST) + .await; + assert_eq!( + error.message, + "IP addresses from this pool are in use in the linked silo" + ); + + // stop and delete instances with IPs from pool1. perhaps surprisingly, that + // includes pool2-inst also because the SNAT IP comes from the default pool + // even when different pool is specified for the ephemeral IP + stop_instance(&cptestctx, "pool1-inst").await; + stop_instance(&cptestctx, "pool2-inst").await; + + object_delete(client, &pool1_silo_url).await; + + // create instance with pool1, expecting allocation to fail + let instance_name = "pool1-inst-fail"; + let url = format!("/v1/instances?project={}", PROJECT_NAME); + let instance_params = params::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: instance_name.parse().unwrap(), + description: format!("instance {:?}", instance_name), }, + ncpus: InstanceCpuCount(4), + memory: ByteCount::from_gibibytes_u32(1), + hostname: String::from("the_host"), + user_data: vec![], + network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![params::ExternalIpCreate::Ephemeral { + pool_name: Some("pool1".parse().unwrap()), + }], + disks: vec![], + start: true, + }; + let error = object_create_error( + client, + &url, + &instance_params, + StatusCode::NOT_FOUND, ) .await; - let silo_pool_range = IpRange::V4( + assert_eq!(error.message, "not found: ip-pool with name \"pool1\""); +} + +async fn stop_instance( + cptestctx: &ControlPlaneTestContext, + instance_name: &str, +) { + let client = &cptestctx.external_client; + let instance = + instance_post(&client, instance_name, InstanceOp::Stop).await; + let nexus = &cptestctx.server.apictx().nexus; + instance_simulate(nexus, &instance.identity.id).await; + let url = + format!("/v1/instances/{}?project={}", instance_name, PROJECT_NAME); + object_delete(client, &url).await; +} + +// IP pool that exists but is not associated with any silo (or with a silo other +// than the current user's) cannot be used to get IPs +#[nexus_test] +async fn test_instance_ephemeral_ip_from_orphan_pool( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + let _ = create_project(&client, PROJECT_NAME).await; + + // make first pool the default for the priv user's silo + create_ip_pool(&client, "default", None).await; + link_ip_pool(&client, "default", &DEFAULT_SILO.id(), true).await; + + let orphan_pool_range = IpRange::V4( Ipv4Range::new( - std::net::Ipv4Addr::new(10, 2, 0, 1), - std::net::Ipv4Addr::new(10, 2, 0, 5), + std::net::Ipv4Addr::new(10, 1, 0, 1), + std::net::Ipv4Addr::new(10, 1, 0, 5), ) .unwrap(), ); - populate_ip_pool(client, pool_name, Some(silo_pool_range)).await; + create_ip_pool(&client, "orphan-pool", Some(orphan_pool_range)).await; - create_instance_with_pool(client, "silo-pool-inst", Some("silo-pool")) - .await; - let ip = fetch_instance_ephemeral_ip(client, "silo-pool-inst").await; - assert!( - ip.ip >= silo_pool_range.first_address() - && ip.ip <= silo_pool_range.last_address(), - "Expected ephemeral IP to come from the silo default pool" + let instance_name = "orphan-pool-inst"; + let body = params::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: instance_name.parse().unwrap(), + description: format!("instance {:?}", instance_name), + }, + ncpus: InstanceCpuCount(4), + memory: ByteCount::from_gibibytes_u32(1), + hostname: String::from("the_host"), + user_data: vec![], + network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![params::ExternalIpCreate::Ephemeral { + pool_name: Some("orphan-pool".parse().unwrap()), + }], + disks: vec![], + start: true, + }; + + // instance create 404s + let url = format!("/v1/instances?project={}", PROJECT_NAME); + let error = + object_create_error(client, &url, &body, StatusCode::NOT_FOUND).await; + + assert_eq!(error.error_code.unwrap(), "ObjectNotFound".to_string()); + assert_eq!( + error.message, + "not found: ip-pool with name \"orphan-pool\"".to_string() ); - // we can still specify other pool even though we now have a silo default - create_instance_with_pool(client, "other-pool-inst-2", Some("other-pool")) - .await; + // associate the pool with a different silo and we should get the same + // error on instance create + let params = params::IpPoolSiloLink { + silo: NameOrId::Name(cptestctx.silo_name.clone()), + is_default: false, + }; + let _: views::IpPoolSilo = + object_create(client, "/v1/system/ip-pools/orphan-pool/silos", ¶ms) + .await; - let ip = fetch_instance_ephemeral_ip(client, "other-pool-inst-2").await; - assert!( - ip.ip >= other_pool_range.first_address() - && ip.ip <= other_pool_range.last_address(), - "Expected ephemeral IP to come from the other pool" + let error = + object_create_error(client, &url, &body, StatusCode::NOT_FOUND).await; + + assert_eq!(error.error_code.unwrap(), "ObjectNotFound".to_string()); + assert_eq!( + error.message, + "not found: ip-pool with name \"orphan-pool\"".to_string() ); } +// Test the error when creating an instance with an IP from the default pool, +// but there is no default pool +#[nexus_test] +async fn test_instance_ephemeral_ip_no_default_pool_error( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + let _ = create_project(&client, PROJECT_NAME).await; + + // important: no pool create, so there is no pool + + let body = params::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: "no-default-pool".parse().unwrap(), + description: "".to_string(), + }, + ncpus: InstanceCpuCount(4), + memory: ByteCount::from_gibibytes_u32(1), + hostname: String::from("the_host"), + user_data: vec![], + network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![params::ExternalIpCreate::Ephemeral { + pool_name: None, // <--- the only important thing here + }], + disks: vec![], + start: true, + }; + + let url = format!("/v1/instances?project={}", PROJECT_NAME); + let error = + object_create_error(client, &url, &body, StatusCode::NOT_FOUND).await; + let msg = "not found: ip-pool with id \"Default pool for current silo\"" + .to_string(); + assert_eq!(error.message, msg); + + // same deal if you specify a pool that doesn't exist + let body = params::InstanceCreate { + external_ips: vec![params::ExternalIpCreate::Ephemeral { + pool_name: Some("nonexistent-pool".parse().unwrap()), + }], + ..body + }; + let error = + object_create_error(client, &url, &body, StatusCode::NOT_FOUND).await; + assert_eq!(error.message, msg); +} + #[nexus_test] async fn test_instance_attach_several_external_ips( cptestctx: &ControlPlaneTestContext, @@ -3727,7 +3883,12 @@ async fn test_instance_attach_several_external_ips( ) .unwrap(), ); - populate_ip_pool(&client, "default", Some(default_pool_range)).await; + create_ip_pool(&client, "default", Some(default_pool_range)).await; + link_ip_pool(&client, "default", &DEFAULT_SILO.id(), true).await; + + // this doesn't work as a replacement for the above. figure out why and + // probably delete it + // create_default_ip_pool(&client).await; // Create several floating IPs for the instance, totalling 8 IPs. let mut external_ip_create = @@ -3797,47 +3958,35 @@ async fn test_instance_allow_only_one_ephemeral_ip( let _ = create_project(&client, PROJECT_NAME).await; - // Create one IP pool with space for two ephemerals. - let default_pool_range = IpRange::V4( - Ipv4Range::new( - std::net::Ipv4Addr::new(10, 0, 0, 1), - std::net::Ipv4Addr::new(10, 0, 0, 2), - ) - .unwrap(), - ); - populate_ip_pool(&client, "default", Some(default_pool_range)).await; + // don't need any IP pools because request fails at parse time let ephemeral_create = params::ExternalIpCreate::Ephemeral { pool_name: Some("default".parse().unwrap()), }; - let error: HttpErrorResponseBody = NexusRequest::new( - RequestBuilder::new(client, Method::POST, &get_instances_url()) - .body(Some(¶ms::InstanceCreate { - identity: IdentityMetadataCreateParams { - name: "default-pool-inst".parse().unwrap(), - description: "instance default-pool-inst".into(), - }, - ncpus: InstanceCpuCount(4), - memory: ByteCount::from_gibibytes_u32(1), - hostname: String::from("the_host"), - user_data: - b"#cloud-config\nsystem_info:\n default_user:\n name: oxide" - .to_vec(), - network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, - external_ips: vec![ - ephemeral_create.clone(), ephemeral_create - ], - disks: vec![], - start: true, - })) - .expect_status(Some(StatusCode::BAD_REQUEST)), + let create_params = params::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: "default-pool-inst".parse().unwrap(), + description: "instance default-pool-inst".into(), + }, + ncpus: InstanceCpuCount(4), + memory: ByteCount::from_gibibytes_u32(1), + hostname: String::from("the_host"), + user_data: + b"#cloud-config\nsystem_info:\n default_user:\n name: oxide" + .to_vec(), + network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![ephemeral_create.clone(), ephemeral_create], + disks: vec![], + start: true, + }; + let error = object_create_error( + client, + &get_instances_url(), + &create_params, + StatusCode::BAD_REQUEST, ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + .await; + assert_eq!( error.message, "An instance may not have more than 1 ephemeral IP address" @@ -3915,8 +4064,9 @@ async fn test_instance_create_in_silo(cptestctx: &ControlPlaneTestContext) { ) .await; - // Populate IP Pool - populate_ip_pool(&client, "default", None).await; + // can't use create_default_ip_pool because we need to link to the silo we just made + create_ip_pool(&client, "default", None).await; + link_ip_pool(&client, "default", &silo.identity.id, true).await; // Create test projects NexusRequest::objects_post( @@ -4022,8 +4172,7 @@ async fn test_instance_create_in_silo(cptestctx: &ControlPlaneTestContext) { async fn test_instance_v2p_mappings(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; - create_project(client, PROJECT_NAME).await; + create_project_and_pool(client).await; // Add a few more sleds let nsleds = 3; diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 6a633fc5e1..5682df2c3a 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -6,33 +6,47 @@ use dropshot::test_util::ClientTestContext; use dropshot::HttpErrorResponseBody; +use dropshot::ResultsPage; use http::method::Method; use http::StatusCode; +use nexus_db_queries::db::datastore::SERVICE_IP_POOL_NAME; +use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; +use nexus_test_utils::resource_helpers::create_instance; +use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_project; +use nexus_test_utils::resource_helpers::link_ip_pool; +use nexus_test_utils::resource_helpers::object_create; +use nexus_test_utils::resource_helpers::object_create_error; +use nexus_test_utils::resource_helpers::object_delete; +use nexus_test_utils::resource_helpers::object_delete_error; +use nexus_test_utils::resource_helpers::object_get; +use nexus_test_utils::resource_helpers::object_get_error; +use nexus_test_utils::resource_helpers::object_put; +use nexus_test_utils::resource_helpers::object_put_error; use nexus_test_utils::resource_helpers::objects_list_page_authz; -use nexus_test_utils::resource_helpers::{ - create_instance, create_instance_with, -}; use nexus_test_utils_macros::nexus_test; -use nexus_types::external_api::params::ExternalIpCreate; -use nexus_types::external_api::params::InstanceDiskAttachment; -use nexus_types::external_api::params::InstanceNetworkInterfaceAttachment; +use nexus_types::external_api::params; use nexus_types::external_api::params::IpPoolCreate; +use nexus_types::external_api::params::IpPoolSiloLink; +use nexus_types::external_api::params::IpPoolSiloUpdate; use nexus_types::external_api::params::IpPoolUpdate; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::shared::Ipv4Range; use nexus_types::external_api::shared::Ipv6Range; use nexus_types::external_api::views::IpPool; use nexus_types::external_api::views::IpPoolRange; +use nexus_types::external_api::views::IpPoolSilo; +use nexus_types::external_api::views::Silo; +use nexus_types::identity::Resource; use omicron_common::api::external::IdentityMetadataUpdateParams; use omicron_common::api::external::NameOrId; use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; use omicron_nexus::TestInterfaces; use sled_agent_client::TestInterfaces as SledTestInterfaces; -use std::collections::HashSet; +use uuid::Uuid; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -58,41 +72,18 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { .await .expect("Failed to list IP Pools") .all_items; - assert_eq!(ip_pools.len(), 1, "Expected to see default IP pool"); - - assert_eq!(ip_pools[0].identity.name, "default",); - assert_eq!(ip_pools[0].silo_id, None); - assert!(ip_pools[0].is_default); + assert_eq!(ip_pools.len(), 0, "Expected empty list of IP pools"); // Verify 404 if the pool doesn't exist yet, both for creating or deleting - let error: HttpErrorResponseBody = NexusRequest::expect_failure( - client, - StatusCode::NOT_FOUND, - Method::GET, - &ip_pool_url, - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + let error = + object_get_error(client, &ip_pool_url, StatusCode::NOT_FOUND).await; assert_eq!( error.message, format!("not found: ip-pool with name \"{}\"", pool_name), ); - let error: HttpErrorResponseBody = NexusRequest::expect_failure( - client, - StatusCode::NOT_FOUND, - Method::DELETE, - &ip_pool_url, - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + + let error = + object_delete_error(client, &ip_pool_url, StatusCode::NOT_FOUND).await; assert_eq!( error.message, format!("not found: ip-pool with name \"{}\"", pool_name), @@ -105,20 +96,11 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, - silo: None, - is_default: false, }; let created_pool: IpPool = - NexusRequest::objects_post(client, ip_pools_url, ¶ms) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + object_create(client, ip_pools_url, ¶ms).await; assert_eq!(created_pool.identity.name, pool_name); assert_eq!(created_pool.identity.description, description); - assert_eq!(created_pool.silo_id, None); let list = NexusRequest::iter_collection_authn::( client, @@ -129,30 +111,21 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { .await .expect("Failed to list IP Pools") .all_items; - assert_eq!(list.len(), 2, "Expected exactly two IP pools"); - assert_pools_eq(&created_pool, &list[1]); + assert_eq!(list.len(), 1, "Expected exactly 1 IP pool"); + assert_pools_eq(&created_pool, &list[0]); - let fetched_pool: IpPool = NexusRequest::object_get(client, &ip_pool_url) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + let fetched_pool: IpPool = object_get(client, &ip_pool_url).await; assert_pools_eq(&created_pool, &fetched_pool); // Verify we get a conflict error if we insert it again - let error: HttpErrorResponseBody = NexusRequest::new( - RequestBuilder::new(client, Method::POST, ip_pools_url) - .body(Some(¶ms)) - .expect_status(Some(StatusCode::BAD_REQUEST)), + let error = object_create_error( + client, + ip_pools_url, + ¶ms, + StatusCode::BAD_REQUEST, ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + .await; + assert_eq!( error.message, format!("already exists: ip-pool \"{}\"", pool_name) @@ -167,27 +140,13 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { .unwrap(), ); let created_range: IpPoolRange = - NexusRequest::objects_post(client, &ip_pool_add_range_url, &range) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + object_create(client, &ip_pool_add_range_url, &range).await; assert_eq!(range.first_address(), created_range.range.first_address()); assert_eq!(range.last_address(), created_range.range.last_address()); - let error: HttpErrorResponseBody = NexusRequest::expect_failure( - client, - StatusCode::BAD_REQUEST, - Method::DELETE, - &ip_pool_url, - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + + let error: HttpErrorResponseBody = + object_delete_error(client, &ip_pool_url, StatusCode::BAD_REQUEST) + .await; assert_eq!( error.message, "IP Pool cannot be deleted while it contains IP ranges", @@ -208,13 +167,7 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { }, }; let modified_pool: IpPool = - NexusRequest::object_put(client, &ip_pool_url, Some(&updates)) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + object_put(client, &ip_pool_url, &updates).await; assert_eq!(modified_pool.identity.name, new_pool_name); assert_eq!(modified_pool.identity.id, created_pool.identity.id); assert_eq!( @@ -231,27 +184,11 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { ); let fetched_modified_pool: IpPool = - NexusRequest::object_get(client, &new_ip_pool_url) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + object_get(client, &new_ip_pool_url).await; assert_pools_eq(&modified_pool, &fetched_modified_pool); - let error: HttpErrorResponseBody = NexusRequest::expect_failure( - client, - StatusCode::NOT_FOUND, - Method::GET, - &ip_pool_url, - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + let error: HttpErrorResponseBody = + object_get_error(client, &ip_pool_url, StatusCode::NOT_FOUND).await; assert_eq!( error.message, format!("not found: ip-pool with name \"{}\"", pool_name), @@ -275,69 +212,346 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { .expect("Expected to be able to delete an empty IP Pool"); } +/// The internal IP pool, defined by its association with the internal silo, +/// cannot be interacted with through the operator API. CRUD operations should +/// all 404 except fetch by name or ID. #[nexus_test] -async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { +async fn test_ip_pool_service_no_cud(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - // can create a pool with an existing silo by name - let params = IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: String::from("p0").parse().unwrap(), - description: String::from(""), + let internal_pool_name_url = + format!("/v1/system/ip-pools/{}", SERVICE_IP_POOL_NAME); + + // we can fetch the service pool by name or ID + let pool = NexusRequest::object_get(client, &internal_pool_name_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await; + + let internal_pool_id_url = + format!("/v1/system/ip-pools/{}", pool.identity.id); + let pool = NexusRequest::object_get(client, &internal_pool_id_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await; + + // but it does not come back in the list. there are none in the list + let pools = + objects_list_page_authz::(client, "/v1/system/ip-pools").await; + assert_eq!(pools.items.len(), 0); + + // deletes fail + + let error = object_delete_error( + client, + &internal_pool_name_url, + StatusCode::NOT_FOUND, + ) + .await; + assert_eq!( + error.message, + "not found: ip-pool with name \"oxide-service-pool\"" + ); + + let not_found_id = + format!("not found: ip-pool with id \"{}\"", pool.identity.id); + let error = object_delete_error( + client, + &internal_pool_id_url, + StatusCode::NOT_FOUND, + ) + .await; + assert_eq!(error.message, not_found_id); + + // Update not allowed + let put_body = params::IpPoolUpdate { + identity: IdentityMetadataUpdateParams { + name: Some("test".parse().unwrap()), + description: Some("test".to_string()), }, - silo: Some(NameOrId::Name(cptestctx.silo_name.clone())), - is_default: false, }; - let created_pool = create_pool(client, ¶ms).await; - assert_eq!(created_pool.identity.name, "p0"); + let error = object_put_error( + client, + &internal_pool_id_url, + &put_body, + StatusCode::NOT_FOUND, + ) + .await; + assert_eq!(error.message, not_found_id); + + // linking not allowed + + // let link_body = params::IpPoolSiloLink { + // silo: NameOrId::Name(cptestctx.silo_name.clone()), + // is_default: false, + // }; + // let link_url = format!("{}/silos", internal_pool_id_url); + // let error = object_create_error( + // client, + // &link_url, + // &link_body, + // StatusCode::NOT_FOUND, + // ) + // .await; + // assert_eq!(error.message, not_found_id); + + // TODO: link, unlink, add/remove range by name or ID should all fail +} - let silo_id = - created_pool.silo_id.expect("Expected pool to have a silo_id"); +#[nexus_test] +async fn test_ip_pool_silo_link(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; - // now we'll create another IP pool using that silo ID - let params = IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: String::from("p1").parse().unwrap(), - description: String::from(""), - }, - silo: Some(NameOrId::Id(silo_id)), + let p0 = create_pool(client, "p0").await; + let p1 = create_pool(client, "p1").await; + + // there should be no associations + let assocs_p0 = silos_for_pool(client, "p0").await; + assert_eq!(assocs_p0.items.len(), 0); + + // expect 404 on association if the specified silo doesn't exist + let nonexistent_silo_id = Uuid::new_v4(); + let params = params::IpPoolSiloLink { + silo: NameOrId::Id(nonexistent_silo_id), is_default: false, }; - let created_pool = create_pool(client, ¶ms).await; - assert_eq!(created_pool.identity.name, "p1"); - assert_eq!(created_pool.silo_id.unwrap(), silo_id); - // expect 404 if the specified silo doesn't exist - let bad_silo_params = IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: String::from("p2").parse().unwrap(), - description: String::from(""), - }, - silo: Some(NameOrId::Name( - String::from("not-a-thing").parse().unwrap(), - )), - is_default: false, + let error = object_create_error( + client, + "/v1/system/ip-pools/p0/silos", + ¶ms, + StatusCode::NOT_FOUND, + ) + .await; + + assert_eq!( + error.message, + format!("not found: silo with id \"{nonexistent_silo_id}\"") + ); + + // associate by name with silo that exists + let silo = NameOrId::Name(cptestctx.silo_name.clone()); + let params = + params::IpPoolSiloLink { silo: silo.clone(), is_default: false }; + let _: IpPoolSilo = + object_create(client, "/v1/system/ip-pools/p0/silos", ¶ms).await; + + // second attempt to create the same link errors due to conflict + let error = object_create_error( + client, + "/v1/system/ip-pools/p0/silos", + ¶ms, + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!(error.error_code.unwrap(), "ObjectAlreadyExists"); + + // get silo ID so we can test association by ID as well + let silo_url = format!("/v1/system/silos/{}", cptestctx.silo_name); + let silo_id = object_get::(client, &silo_url).await.identity.id; + + let assocs_p0 = silos_for_pool(client, "p0").await; + let silo_link = + IpPoolSilo { ip_pool_id: p0.identity.id, silo_id, is_default: false }; + assert_eq!(assocs_p0.items.len(), 1); + assert_eq!(assocs_p0.items[0], silo_link); + + // associate same silo to other pool by ID instead of name + let link_params = params::IpPoolSiloLink { + silo: NameOrId::Id(silo_id), + is_default: true, }; - let error: HttpErrorResponseBody = NexusRequest::new( - RequestBuilder::new(client, Method::POST, "/v1/system/ip-pools") - .body(Some(&bad_silo_params)) - .expect_status(Some(StatusCode::NOT_FOUND)), + let url = "/v1/system/ip-pools/p1/silos"; + let _: IpPoolSilo = object_create(client, &url, &link_params).await; + + let silos_p1 = silos_for_pool(client, "p1").await; + assert_eq!(silos_p1.items.len(), 1); + assert_eq!( + silos_p1.items[0], + IpPoolSilo { ip_pool_id: p1.identity.id, is_default: true, silo_id } + ); + + // creating a third pool and trying to link it as default: true should fail + create_pool(client, "p2").await; + let url = "/v1/system/ip-pools/p2/silos"; + let error = object_create_error( + client, + &url, + &link_params, + StatusCode::BAD_REQUEST, ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + .await; + assert_eq!(error.error_code.unwrap(), "ObjectAlreadyExists"); + + // pool delete fails because it is linked to a silo + let error = object_delete_error( + client, + "/v1/system/ip-pools/p1", + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!( + error.message, + "IP Pool cannot be deleted while it is linked to a silo", + ); + + // unlink silo (doesn't matter that it's a default) + let url = format!("/v1/system/ip-pools/p1/silos/{}", cptestctx.silo_name); + object_delete(client, &url).await; + + let silos_p1 = silos_for_pool(client, "p1").await; + assert_eq!(silos_p1.items.len(), 0); + + // now we can delete the pool too + object_delete(client, "/v1/system/ip-pools/p1").await; +} + +#[nexus_test] +async fn test_ip_pool_update_default(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + + create_pool(client, "p0").await; + create_pool(client, "p1").await; + + // there should be no linked silos + let silos_p0 = silos_for_pool(client, "p0").await; + assert_eq!(silos_p0.items.len(), 0); + + let silos_p1 = silos_for_pool(client, "p1").await; + assert_eq!(silos_p1.items.len(), 0); + + // put 404s if link doesn't exist yet + let params = IpPoolSiloUpdate { is_default: true }; + let p0_silo_url = + format!("/v1/system/ip-pools/p0/silos/{}", cptestctx.silo_name); + let error = + object_put_error(client, &p0_silo_url, ¶ms, StatusCode::NOT_FOUND) + .await; + assert_eq!( + error.message, + "not found: ip-pool-resource with id \"(pool, silo)\"" + ); - assert_eq!(error.message, "not found: silo with name \"not-a-thing\""); + // associate both pools with the test silo + let silo = NameOrId::Name(cptestctx.silo_name.clone()); + let params = + params::IpPoolSiloLink { silo: silo.clone(), is_default: false }; + let _: IpPoolSilo = + object_create(client, "/v1/system/ip-pools/p0/silos", ¶ms).await; + let _: IpPoolSilo = + object_create(client, "/v1/system/ip-pools/p1/silos", ¶ms).await; + + // now both are linked to the silo, neither is marked default + let silos_p0 = silos_for_pool(client, "p0").await; + assert_eq!(silos_p0.items.len(), 1); + assert_eq!(silos_p0.items[0].is_default, false); + + let silos_p1 = silos_for_pool(client, "p1").await; + assert_eq!(silos_p1.items.len(), 1); + assert_eq!(silos_p1.items[0].is_default, false); + + // make p0 default + let params = IpPoolSiloUpdate { is_default: true }; + let _: IpPoolSilo = object_put(client, &p0_silo_url, ¶ms).await; + + // making the same one default again is not an error + let _: IpPoolSilo = object_put(client, &p0_silo_url, ¶ms).await; + + // now p0 is default + let silos_p0 = silos_for_pool(client, "p0").await; + assert_eq!(silos_p0.items.len(), 1); + assert_eq!(silos_p0.items[0].is_default, true); + + // p1 still not default + let silos_p1 = silos_for_pool(client, "p1").await; + assert_eq!(silos_p1.items.len(), 1); + assert_eq!(silos_p1.items[0].is_default, false); + + // making p1 the default pool for the silo unsets it on p0 + + // set p1 default + let params = IpPoolSiloUpdate { is_default: true }; + let p1_silo_url = + format!("/v1/system/ip-pools/p1/silos/{}", cptestctx.silo_name); + let _: IpPoolSilo = object_put(client, &p1_silo_url, ¶ms).await; + + // p1 is now default + let silos_p1 = silos_for_pool(client, "p1").await; + assert_eq!(silos_p1.items.len(), 1); + assert_eq!(silos_p1.items[0].is_default, true); + + // p0 is no longer default + let silos_p0 = silos_for_pool(client, "p0").await; + assert_eq!(silos_p0.items.len(), 1); + assert_eq!(silos_p0.items[0].is_default, false); + + // we can also unset default + let params = IpPoolSiloUpdate { is_default: false }; + let _: IpPoolSilo = object_put(client, &p1_silo_url, ¶ms).await; + + let silos_p1 = silos_for_pool(client, "p1").await; + assert_eq!(silos_p1.items.len(), 1); + assert_eq!(silos_p1.items[0].is_default, false); } -async fn create_pool( +// IP pool list fetch logic includes a join to ip_pool_resource, which is +// unusual, so we want to make sure pagination logic still works +#[nexus_test] +async fn test_ip_pool_pagination(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + let base_url = "/v1/system/ip-pools"; + let first_page = objects_list_page_authz::(client, &base_url).await; + + // we start out with no pools + assert_eq!(first_page.items.len(), 0); + + let mut pool_names = vec![]; + + // create more pools to work with, adding their names to the list so we + // can use it to check order + for i in 1..=8 { + let name = format!("other-pool-{}", i); + pool_names.push(name.clone()); + create_pool(client, &name).await; + } + + let first_five_url = format!("{}?limit=5", base_url); + let first_five = + objects_list_page_authz::(client, &first_five_url).await; + assert!(first_five.next_page.is_some()); + assert_eq!(get_names(first_five.items), &pool_names[0..5]); + + let next_page_url = format!( + "{}?limit=5&page_token={}", + base_url, + first_five.next_page.unwrap() + ); + let next_page = + objects_list_page_authz::(client, &next_page_url).await; + assert_eq!(get_names(next_page.items), &pool_names[5..8]); +} + +/// helper to make tests less ugly +fn get_names(pools: Vec) -> Vec { + pools.iter().map(|p| p.identity.name.to_string()).collect() +} + +async fn silos_for_pool( client: &ClientTestContext, - params: &IpPoolCreate, -) -> IpPool { - NexusRequest::objects_post(client, "/v1/system/ip-pools", params) + id: &str, +) -> ResultsPage { + let url = format!("/v1/system/ip-pools/{}/silos", id); + objects_list_page_authz::(client, &url).await +} + +async fn create_pool(client: &ClientTestContext, name: &str) -> IpPool { + let params = IpPoolCreate { + identity: IdentityMetadataCreateParams { + name: Name::try_from(name.to_string()).unwrap(), + description: "".to_string(), + }, + }; + NexusRequest::objects_post(client, "/v1/system/ip-pools", ¶ms) .authn_as(AuthnMode::PrivilegedUser) .execute() .await @@ -374,8 +588,8 @@ async fn test_ip_pool_range_overlapping_ranges_fails( name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, - silo: None, - is_default: false, + // silo: None, + // is_default: false, }; let created_pool: IpPool = NexusRequest::objects_post(client, ip_pools_url, ¶ms) @@ -557,8 +771,6 @@ async fn test_ip_pool_range_pagination(cptestctx: &ControlPlaneTestContext) { name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, - silo: None, - is_default: false, }; let created_pool: IpPool = NexusRequest::objects_post(client, ip_pools_url, ¶ms) @@ -640,71 +852,15 @@ async fn test_ip_pool_range_pagination(cptestctx: &ControlPlaneTestContext) { } #[nexus_test] -async fn test_ip_pool_list_usable_by_project( - cptestctx: &ControlPlaneTestContext, -) { +async fn test_ip_pool_list_in_silo(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let scoped_ip_pools_url = "/v1/ip-pools"; - let ip_pools_url = "/v1/system/ip-pools"; let mypool_name = "mypool"; - let default_ip_pool_add_range_url = - format!("{}/default/ranges/add", ip_pools_url); - let mypool_ip_pool_add_range_url = - format!("{}/{}/ranges/add", ip_pools_url, mypool_name); - let service_ip_pool_add_range_url = - "/v1/system/ip-pools-service/ranges/add".to_string(); - - // Add an IP range to the default pool - let default_range = IpRange::V4( - Ipv4Range::new( - std::net::Ipv4Addr::new(10, 0, 0, 1), - std::net::Ipv4Addr::new(10, 0, 0, 2), - ) - .unwrap(), - ); - let created_range: IpPoolRange = NexusRequest::objects_post( - client, - &default_ip_pool_add_range_url, - &default_range, - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); - assert_eq!( - default_range.first_address(), - created_range.range.first_address() - ); - assert_eq!( - default_range.last_address(), - created_range.range.last_address() - ); - - // Create an org and project, and then try to make an instance with an IP from - // each range to which the project is expected have access. const PROJECT_NAME: &str = "myproj"; - const INSTANCE_NAME: &str = "myinst"; create_project(client, PROJECT_NAME).await; - // TODO: give this project explicit access when such functionality exists - let params = IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: String::from(mypool_name).parse().unwrap(), - description: String::from("right on cue"), - }, - silo: None, - is_default: false, - }; - NexusRequest::objects_post(client, ip_pools_url, ¶ms) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap(); - - // Add an IP range to mypool + // create pool with range and link (as default pool) to default silo, which + // is the privileged user's silo let mypool_range = IpRange::V4( Ipv4Range::new( std::net::Ipv4Addr::new(10, 0, 0, 51), @@ -712,102 +868,35 @@ async fn test_ip_pool_list_usable_by_project( ) .unwrap(), ); - let created_range: IpPoolRange = NexusRequest::objects_post( - client, - &mypool_ip_pool_add_range_url, - &mypool_range, - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); - assert_eq!( - mypool_range.first_address(), - created_range.range.first_address() - ); - assert_eq!(mypool_range.last_address(), created_range.range.last_address()); + create_ip_pool(client, mypool_name, Some(mypool_range)).await; + link_ip_pool(client, mypool_name, &DEFAULT_SILO.id(), true).await; - // add a service range we *don't* expect to see in the results - let service_range = IpRange::V4( + // create another pool and don't link it to anything + let otherpool_name = "other-pool"; + let otherpool_range = IpRange::V4( Ipv4Range::new( - std::net::Ipv4Addr::new(10, 0, 0, 101), - std::net::Ipv4Addr::new(10, 0, 0, 102), + std::net::Ipv4Addr::new(10, 0, 0, 53), + std::net::Ipv4Addr::new(10, 0, 0, 54), ) .unwrap(), ); + create_ip_pool(client, otherpool_name, Some(otherpool_range)).await; - let created_range: IpPoolRange = NexusRequest::objects_post( - client, - &service_ip_pool_add_range_url, - &service_range, - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); - assert_eq!( - service_range.first_address(), - created_range.range.first_address() - ); - assert_eq!( - service_range.last_address(), - created_range.range.last_address() - ); - - // TODO: add non-service, ip pools that the project *can't* use, when that - // functionality is implemented in the future (i.e. a "notmypool") + let list = + objects_list_page_authz::(client, "/v1/ip-pools").await.items; - let list_url = format!("{}?project={}", scoped_ip_pools_url, PROJECT_NAME); - let list = NexusRequest::iter_collection_authn::( - client, &list_url, "", None, - ) - .await - .expect("Failed to list IP Pools") - .all_items; + // only mypool shows up because it's linked to my silo + assert_eq!(list.len(), 1); + assert_eq!(list[0].identity.name.to_string(), mypool_name); - // default and mypool - assert_eq!(list.len(), 2); - let pool_names: HashSet = - list.iter().map(|pool| pool.identity.name.to_string()).collect(); - let expected_names: HashSet = - ["default", "mypool"].into_iter().map(|s| s.to_string()).collect(); - assert_eq!(pool_names, expected_names); - - // ensure we can view each pool returned - for pool_name in &pool_names { - let view_pool_url = format!( - "{}/{}?project={}", - scoped_ip_pools_url, pool_name, PROJECT_NAME - ); - let pool: IpPool = NexusRequest::object_get(client, &view_pool_url) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); - assert_eq!(pool.identity.name.as_str(), pool_name.as_str()); - } + // fetch the pool directly too + let url = format!("/v1/ip-pools/{}", mypool_name); + let pool: IpPool = object_get(client, &url).await; + assert_eq!(pool.identity.name.as_str(), mypool_name); - // ensure we can successfully create an instance with each of the pools we - // should be able to access - for pool_name in pool_names { - let instance_name = format!("{}-{}", INSTANCE_NAME, pool_name); - let pool_name = Some(Name::try_from(pool_name).unwrap()); - create_instance_with( - client, - PROJECT_NAME, - &instance_name, - &InstanceNetworkInterfaceAttachment::Default, - Vec::::new(), - vec![ExternalIpCreate::Ephemeral { pool_name }], - ) - .await; - } + // fetching the other pool directly 404s + let url = format!("/v1/ip-pools/{}", otherpool_name); + object_get_error(client, &url, StatusCode::NOT_FOUND).await; } #[nexus_test] @@ -818,13 +907,36 @@ async fn test_ip_range_delete_with_allocated_external_ip_fails( let apictx = &cptestctx.server.apictx(); let nexus = &apictx.nexus; let ip_pools_url = "/v1/system/ip-pools"; - let pool_name = "default"; + let pool_name = "mypool"; let ip_pool_url = format!("{}/{}", ip_pools_url, pool_name); + let ip_pool_silos_url = format!("{}/{}/silos", ip_pools_url, pool_name); let ip_pool_ranges_url = format!("{}/ranges", ip_pool_url); let ip_pool_add_range_url = format!("{}/add", ip_pool_ranges_url); let ip_pool_rem_range_url = format!("{}/remove", ip_pool_ranges_url); - // Add an IP range to the default pool + // create pool + let params = IpPoolCreate { + identity: IdentityMetadataCreateParams { + name: String::from(pool_name).parse().unwrap(), + description: String::from("right on cue"), + }, + }; + NexusRequest::objects_post(client, ip_pools_url, ¶ms) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await; + + // associate pool with default silo, which is the privileged user's silo + let params = IpPoolSiloLink { + silo: NameOrId::Id(DEFAULT_SILO.id()), + is_default: true, + }; + NexusRequest::objects_post(client, &ip_pool_silos_url, ¶ms) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await; + + // Add an IP range to the pool let range = IpRange::V4( Ipv4Range::new( std::net::Ipv4Addr::new(10, 0, 0, 1), diff --git a/nexus/tests/integration_tests/metrics.rs b/nexus/tests/integration_tests/metrics.rs index 89dd2e3cc6..5f517d49e0 100644 --- a/nexus/tests/integration_tests/metrics.rs +++ b/nexus/tests/integration_tests/metrics.rs @@ -9,8 +9,8 @@ use http::{Method, StatusCode}; use nexus_db_queries::db::fixed_data::silo::SILO_ID; use nexus_test_utils::http_testing::{AuthnMode, NexusRequest, RequestBuilder}; use nexus_test_utils::resource_helpers::{ - create_disk, create_instance, create_project, objects_list_page_authz, - populate_ip_pool, DiskTest, + create_default_ip_pool, create_disk, create_instance, create_project, + objects_list_page_authz, DiskTest, }; use nexus_test_utils::ControlPlaneTestContext; use nexus_test_utils_macros::nexus_test; @@ -168,7 +168,7 @@ async fn test_metrics( let client = &cptestctx.external_client; cptestctx.server.register_as_producer().await; // needed for oximeter metrics to work - populate_ip_pool(&client, "default", None).await; // needed for instance create to work + create_default_ip_pool(&client).await; // needed for instance create to work DiskTest::new(cptestctx).await; // needed for disk create to work // silo metrics start out zero diff --git a/nexus/tests/integration_tests/pantry.rs b/nexus/tests/integration_tests/pantry.rs index dc4e8e6c95..1a3908affa 100644 --- a/nexus/tests/integration_tests/pantry.rs +++ b/nexus/tests/integration_tests/pantry.rs @@ -11,10 +11,10 @@ use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::identity_eq; +use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_instance; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::object_create; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; @@ -54,8 +54,8 @@ fn get_disk_attach_url(instance_name: &str) -> String { ) } -async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - populate_ip_pool(&client, "default", None).await; +async fn create_project_and_pool(client: &ClientTestContext) -> Uuid { + create_default_ip_pool(client).await; let project = create_project(client, PROJECT_NAME).await; project.identity.id } @@ -350,7 +350,7 @@ async fn validate_disk_state(client: &ClientTestContext, state: DiskState) { async fn test_disk_create_for_importing(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); let new_disk = params::DiskCreate { @@ -396,7 +396,7 @@ async fn test_cannot_mount_import_ready_disk( let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -427,7 +427,7 @@ async fn test_cannot_mount_import_from_bulk_writes_disk( let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -451,7 +451,7 @@ async fn test_import_blocks_with_bulk_write( let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -492,7 +492,7 @@ async fn test_import_blocks_with_bulk_write_with_snapshot( let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -543,7 +543,7 @@ async fn test_cannot_finalize_without_stopping_bulk_writes( let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -572,7 +572,7 @@ async fn test_cannot_bulk_write_to_unaligned_offset( let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -605,7 +605,7 @@ async fn test_cannot_bulk_write_data_not_block_size_multiple( let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -637,7 +637,7 @@ async fn test_cannot_bulk_write_data_past_end_of_disk( let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -669,7 +669,7 @@ async fn test_cannot_bulk_write_data_non_base64( let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -707,7 +707,7 @@ async fn test_can_stop_start_import_from_bulk_write( let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -735,7 +735,7 @@ async fn test_cannot_bulk_write_start_attached_disk( let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -765,7 +765,7 @@ async fn test_cannot_bulk_write_attached_disk( let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -795,7 +795,7 @@ async fn test_cannot_bulk_write_stop_attached_disk( let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -824,7 +824,7 @@ async fn test_cannot_finalize_attached_disk( let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; diff --git a/nexus/tests/integration_tests/projects.rs b/nexus/tests/integration_tests/projects.rs index 24b2721a1d..d9d6ceef5b 100644 --- a/nexus/tests/integration_tests/projects.rs +++ b/nexus/tests/integration_tests/projects.rs @@ -10,8 +10,8 @@ use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::resource_helpers::{ - create_disk, create_project, create_vpc, object_create, populate_ip_pool, - project_get, projects_list, DiskTest, + create_default_ip_pool, create_disk, create_project, create_vpc, + object_create, project_get, projects_list, DiskTest, }; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; @@ -134,7 +134,7 @@ async fn test_project_deletion_with_instance( ) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; // Create a project that we'll use for testing. let name = "springfield-squidport"; diff --git a/nexus/tests/integration_tests/quotas.rs b/nexus/tests/integration_tests/quotas.rs index 0ad2419bee..858837bb32 100644 --- a/nexus/tests/integration_tests/quotas.rs +++ b/nexus/tests/integration_tests/quotas.rs @@ -6,16 +6,17 @@ use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::http_testing::TestResponse; +use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_local_user; use nexus_test_utils::resource_helpers::grant_iam; +use nexus_test_utils::resource_helpers::link_ip_pool; use nexus_test_utils::resource_helpers::object_create; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; use nexus_types::external_api::shared; use nexus_types::external_api::shared::SiloRole; -use nexus_types::external_api::views::SiloQuotas; +use nexus_types::external_api::views::{Silo, SiloQuotas}; use omicron_common::api::external::ByteCount; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::InstanceCpuCount; @@ -168,7 +169,7 @@ async fn setup_silo_with_quota( silo_name: &str, quotas: params::SiloQuotasCreate, ) -> ResourceAllocator { - let silo = object_create( + let silo: Silo = object_create( client, "/v1/system/silos", ¶ms::SiloCreate { @@ -186,7 +187,10 @@ async fn setup_silo_with_quota( ) .await; - populate_ip_pool(&client, "default", None).await; + // create default pool and link to this silo. can't use + // create_default_ip_pool because that links to the default silo + create_ip_pool(&client, "default", None).await; + link_ip_pool(&client, "default", &silo.identity.id, true).await; // Create a silo user let user = create_local_user( diff --git a/nexus/tests/integration_tests/sleds.rs b/nexus/tests/integration_tests/sleds.rs index a166280ead..5e399cbe84 100644 --- a/nexus/tests/integration_tests/sleds.rs +++ b/nexus/tests/integration_tests/sleds.rs @@ -7,12 +7,12 @@ use camino::Utf8Path; use dropshot::test_util::ClientTestContext; use nexus_test_interface::NexusServer; +use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_instance; use nexus_test_utils::resource_helpers::create_physical_disk; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::delete_physical_disk; use nexus_test_utils::resource_helpers::objects_list_page_authz; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::start_sled_agent; use nexus_test_utils::SLED_AGENT_UUID; use nexus_test_utils_macros::nexus_test; @@ -144,7 +144,7 @@ async fn test_sled_instance_list(cptestctx: &ControlPlaneTestContext) { .is_empty()); // Create an IP pool and project that we'll use for testing. - populate_ip_pool(&external_client, "default", None).await; + create_default_ip_pool(&external_client).await; let project = create_project(&external_client, "test-project").await; let instance = create_instance(&external_client, "test-project", "test-instance") diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index 24b04bf718..87ec2b3163 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -17,9 +17,9 @@ use nexus_db_queries::db::lookup::LookupPath; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; +use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::object_create; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; @@ -48,7 +48,8 @@ fn get_disk_url(name: &str) -> String { format!("/v1/disks/{}?project={}", name, PROJECT_NAME) } -async fn create_org_and_project(client: &ClientTestContext) -> Uuid { +async fn create_project_and_pool(client: &ClientTestContext) -> Uuid { + create_default_ip_pool(client).await; let project = create_project(client, PROJECT_NAME).await; project.identity.id } @@ -57,8 +58,7 @@ async fn create_org_and_project(client: &ClientTestContext) -> Uuid { async fn test_snapshot_basic(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); // Define a global image @@ -162,8 +162,7 @@ async fn test_snapshot_basic(cptestctx: &ControlPlaneTestContext) { async fn test_snapshot_without_instance(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); // Define a global image @@ -262,8 +261,7 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { let nexus = &cptestctx.server.apictx().nexus; let datastore = nexus.datastore(); DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - let project_id = create_org_and_project(client).await; + let project_id = create_project_and_pool(client).await; let disks_url = get_disks_url(); // Create a blank disk @@ -422,7 +420,7 @@ async fn test_reject_creating_disk_from_snapshot( let nexus = &cptestctx.server.apictx().nexus; let datastore = nexus.datastore(); - let project_id = create_org_and_project(&client).await; + let project_id = create_project_and_pool(&client).await; let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -575,7 +573,7 @@ async fn test_reject_creating_disk_from_illegal_snapshot( let nexus = &cptestctx.server.apictx().nexus; let datastore = nexus.datastore(); - let project_id = create_org_and_project(&client).await; + let project_id = create_project_and_pool(&client).await; let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -671,7 +669,7 @@ async fn test_reject_creating_disk_from_other_project_snapshot( let nexus = &cptestctx.server.apictx().nexus; let datastore = nexus.datastore(); - let project_id = create_org_and_project(&client).await; + let project_id = create_project_and_pool(&client).await; let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -751,8 +749,7 @@ async fn test_cannot_snapshot_if_no_space(cptestctx: &ControlPlaneTestContext) { // Test that snapshots cannot be created if there is no space for the blocks let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); // Create a disk at just over half the capacity of what DiskTest allocates @@ -805,8 +802,7 @@ async fn test_cannot_snapshot_if_no_space(cptestctx: &ControlPlaneTestContext) { async fn test_snapshot_unwind(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; let disk_test = DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); // Define a global image @@ -905,7 +901,7 @@ async fn test_create_snapshot_record_idempotent( let nexus = &cptestctx.server.apictx().nexus; let datastore = nexus.datastore(); - let project_id = create_org_and_project(&client).await; + let project_id = create_project_and_pool(&client).await; let disk_id = Uuid::new_v4(); let snapshot = db::model::Snapshot { @@ -1093,8 +1089,7 @@ async fn test_multiple_deletes_not_sent(cptestctx: &ControlPlaneTestContext) { let nexus = &cptestctx.server.apictx().nexus; let datastore = nexus.datastore(); DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - let _project_id = create_org_and_project(client).await; + let _project_id = create_project_and_pool(client).await; let disks_url = get_disks_url(); // Create a blank disk diff --git a/nexus/tests/integration_tests/subnet_allocation.rs b/nexus/tests/integration_tests/subnet_allocation.rs index 7f5c27384c..91a933754c 100644 --- a/nexus/tests/integration_tests/subnet_allocation.rs +++ b/nexus/tests/integration_tests/subnet_allocation.rs @@ -13,10 +13,10 @@ use ipnetwork::Ipv4Network; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; +use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_instance_with; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::objects_list_page_authz; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; use omicron_common::api::external::{ @@ -84,7 +84,7 @@ async fn test_subnet_allocation(cptestctx: &ControlPlaneTestContext) { let project_name = "springfield-squidport"; // Create a project that we'll use for testing. - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; create_project(&client, project_name).await; let url_instances = format!("/v1/instances?project={}", project_name); diff --git a/nexus/tests/integration_tests/unauthorized.rs b/nexus/tests/integration_tests/unauthorized.rs index 317a5a0576..3671564866 100644 --- a/nexus/tests/integration_tests/unauthorized.rs +++ b/nexus/tests/integration_tests/unauthorized.rs @@ -69,9 +69,8 @@ async fn test_unauthorized(cptestctx: &ControlPlaneTestContext) { .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .unwrap_or_else(|_| { - panic!("Failed to GET from URL: {url}") - }), + .map_err(|e| panic!("Failed to GET from URL: {url}, {e}")) + .unwrap(), id_routes, ), SetupReq::Post { url, body, id_routes } => ( @@ -80,7 +79,8 @@ async fn test_unauthorized(cptestctx: &ControlPlaneTestContext) { .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .unwrap_or_else(|_| panic!("Failed to POST to URL: {url}")), + .map_err(|e| panic!("Failed to POST to URL: {url}, {e}")) + .unwrap(), id_routes, ), }; @@ -201,14 +201,24 @@ static SETUP_REQUESTS: Lazy> = Lazy::new(|| { &*DEMO_SILO_USER_ID_SET_PASSWORD_URL, ], }, - // Get the default IP pool - SetupReq::Get { url: &DEMO_IP_POOL_URL, id_routes: vec![] }, + // Create the default IP pool + SetupReq::Post { + url: &DEMO_IP_POOLS_URL, + body: serde_json::to_value(&*DEMO_IP_POOL_CREATE).unwrap(), + id_routes: vec!["/v1/ip-pools/{id}"], + }, // Create an IP pool range SetupReq::Post { url: &DEMO_IP_POOL_RANGES_ADD_URL, body: serde_json::to_value(&*DEMO_IP_POOL_RANGE).unwrap(), id_routes: vec![], }, + // Link default pool to default silo + SetupReq::Post { + url: &DEMO_IP_POOL_SILOS_URL, + body: serde_json::to_value(&*DEMO_IP_POOL_SILOS_BODY).unwrap(), + id_routes: vec![], + }, // Create a Project in the Organization SetupReq::Post { url: "/v1/projects", diff --git a/nexus/tests/integration_tests/utilization.rs b/nexus/tests/integration_tests/utilization.rs index 5ebf56f35a..e09e71a9e3 100644 --- a/nexus/tests/integration_tests/utilization.rs +++ b/nexus/tests/integration_tests/utilization.rs @@ -3,10 +3,10 @@ use http::StatusCode; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; +use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_instance; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::objects_list_page_authz; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; @@ -27,7 +27,7 @@ type ControlPlaneTestContext = async fn test_utilization(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let current_util = objects_list_page_authz::( client, diff --git a/nexus/tests/integration_tests/volume_management.rs b/nexus/tests/integration_tests/volume_management.rs index 466cb5472e..34f037ee8c 100644 --- a/nexus/tests/integration_tests/volume_management.rs +++ b/nexus/tests/integration_tests/volume_management.rs @@ -12,9 +12,9 @@ use nexus_db_queries::db::DataStore; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; +use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::object_create; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; @@ -51,14 +51,14 @@ fn get_snapshot_url(snapshot: &str) -> String { format!("/v1/snapshots/{}?project={}", snapshot, PROJECT_NAME) } -async fn create_org_and_project(client: &ClientTestContext) -> Uuid { +async fn create_project_and_pool(client: &ClientTestContext) -> Uuid { + create_default_ip_pool(client).await; let project = create_project(client, PROJECT_NAME).await; project.identity.id } async fn create_image(client: &ClientTestContext) -> views::Image { - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Define a global image @@ -411,8 +411,7 @@ async fn test_multiple_disks_multiple_snapshots_order_1( // Test multiple disks with multiple snapshots let client = &cptestctx.external_client; let disk_test = DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); // Create a blank disk @@ -547,8 +546,7 @@ async fn test_multiple_disks_multiple_snapshots_order_2( // Test multiple disks with multiple snapshots, varying the delete order let client = &cptestctx.external_client; let disk_test = DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); // Create a blank disk @@ -817,8 +815,7 @@ async fn test_multiple_layers_of_snapshots_delete_all_disks_first( // delete all disks, then delete all snapshots let client = &cptestctx.external_client; let disk_test = DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; prepare_for_test_multiple_layers_of_snapshots(&client).await; @@ -856,8 +853,7 @@ async fn test_multiple_layers_of_snapshots_delete_all_snapshots_first( // delete all snapshots, then delete all disks let client = &cptestctx.external_client; let disk_test = DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; prepare_for_test_multiple_layers_of_snapshots(&client).await; @@ -895,8 +891,7 @@ async fn test_multiple_layers_of_snapshots_random_delete_order( // delete snapshots and disks in a random order let client = &cptestctx.external_client; let disk_test = DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; prepare_for_test_multiple_layers_of_snapshots(&client).await; @@ -1116,8 +1111,7 @@ async fn delete_image_test( let disk_test = DiskTest::new(&cptestctx).await; let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); @@ -2345,8 +2339,7 @@ async fn test_disk_create_saga_unwinds_correctly( // created. let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disk_test = DiskTest::new(&cptestctx).await; let disks_url = get_disks_url(); @@ -2398,8 +2391,7 @@ async fn test_snapshot_create_saga_unwinds_correctly( // created. let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disk_test = DiskTest::new(&cptestctx).await; let disks_url = get_disks_url(); diff --git a/nexus/tests/integration_tests/vpc_subnets.rs b/nexus/tests/integration_tests/vpc_subnets.rs index 3067300e19..76cff9ac79 100644 --- a/nexus/tests/integration_tests/vpc_subnets.rs +++ b/nexus/tests/integration_tests/vpc_subnets.rs @@ -14,7 +14,7 @@ use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::identity_eq; use nexus_test_utils::resource_helpers::objects_list_page_authz; use nexus_test_utils::resource_helpers::{ - create_instance, create_project, create_vpc, populate_ip_pool, + create_default_ip_pool, create_instance, create_project, create_vpc, }; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::{params, views::VpcSubnet}; @@ -37,8 +37,8 @@ async fn test_delete_vpc_subnet_with_interfaces_fails( // Create a project that we'll use for testing. let project_name = "springfield-squidport"; let instance_name = "inst"; + create_default_ip_pool(client).await; let _ = create_project(&client, project_name).await; - populate_ip_pool(client, "default", None).await; let subnets_url = format!("/v1/vpc-subnets?project={}&vpc=default", project_name); diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index b607bbf1f3..2d842dd930 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -149,6 +149,10 @@ ip_pool_service_range_add POST /v1/system/ip-pools-service/ra ip_pool_service_range_list GET /v1/system/ip-pools-service/ranges ip_pool_service_range_remove POST /v1/system/ip-pools-service/ranges/remove ip_pool_service_view GET /v1/system/ip-pools-service +ip_pool_silo_link POST /v1/system/ip-pools/{pool}/silos +ip_pool_silo_list GET /v1/system/ip-pools/{pool}/silos +ip_pool_silo_unlink DELETE /v1/system/ip-pools/{pool}/silos/{silo} +ip_pool_silo_update PUT /v1/system/ip-pools/{pool}/silos/{silo} ip_pool_update PUT /v1/system/ip-pools/{pool} ip_pool_view GET /v1/system/ip-pools/{pool} networking_address_lot_block_list GET /v1/system/networking/address-lot/{address_lot}/blocks diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 209d1f607c..d3f269ef5d 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -836,17 +836,6 @@ impl std::fmt::Debug for CertificateCreate { pub struct IpPoolCreate { #[serde(flatten)] pub identity: IdentityMetadataCreateParams, - - /// If an IP pool is associated with a silo, instance IP allocations in that - /// silo can draw from that pool. - pub silo: Option, - - /// Whether the IP pool is considered a default pool for its scope (fleet - /// or silo). If a pool is marked default and is associated with a silo, - /// instances created in that silo will draw IPs from that pool unless - /// another pool is specified at instance create time. - #[serde(default)] - pub is_default: bool, } /// Parameters for updating an IP Pool @@ -856,6 +845,31 @@ pub struct IpPoolUpdate { pub identity: IdentityMetadataUpdateParams, } +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct IpPoolSiloPath { + pub pool: NameOrId, + pub silo: NameOrId, +} + +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct IpPoolSiloLink { + pub silo: NameOrId, + /// When a pool is the default for a silo, floating IPs and instance + /// ephemeral IPs will come from that pool when no other pool is specified. + /// There can be at most one default for a given silo. + pub is_default: bool, +} + +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct IpPoolSiloUpdate { + /// When a pool is the default for a silo, floating IPs and instance + /// ephemeral IPs will come from that pool when no other pool is specified. + /// There can be at most one default for a given silo, so when a pool is + /// made default, an existing default will remain linked but will no longer + /// be the default. + pub is_default: bool, +} + // Floating IPs /// Parameters for creating a new floating IP address for instances. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index 46a8aa3d95..c85597e94c 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -295,11 +295,23 @@ pub struct VpcRouter { // IP POOLS +/// A collection of IP ranges. If a pool is linked to a silo, IP addresses from +/// the pool can be allocated within that silo #[derive(ObjectIdentity, Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct IpPool { #[serde(flatten)] pub identity: IdentityMetadata, - pub silo_id: Option, +} + +/// A link between an IP pool and a silo that allows one to allocate IPs from +/// the pool within the silo +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +pub struct IpPoolSilo { + pub ip_pool_id: Uuid, + pub silo_id: Uuid, + /// When a pool is the default for a silo, floating IPs and instance + /// ephemeral IPs will come from that pool when no other pool is specified. + /// There can be at most one default for a given silo. pub is_default: bool, } diff --git a/openapi/nexus.json b/openapi/nexus.json index f2433e5512..a4ba6cbb86 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -2154,7 +2154,7 @@ "tags": [ "projects" ], - "summary": "List all IP pools that can be used by a given project", + "summary": "List all IP pools", "operationId": "project_ip_pool_list", "parameters": [ { @@ -2177,14 +2177,6 @@ "type": "string" } }, - { - "in": "query", - "name": "project", - "description": "Name or ID of the project", - "schema": { - "$ref": "#/components/schemas/NameOrId" - } - }, { "in": "query", "name": "sort_by", @@ -2212,9 +2204,7 @@ } }, "x-dropshot-pagination": { - "required": [ - "project" - ] + "required": [] } } }, @@ -2234,14 +2224,6 @@ "schema": { "$ref": "#/components/schemas/NameOrId" } - }, - { - "in": "query", - "name": "project", - "description": "Name or ID of the project", - "schema": { - "$ref": "#/components/schemas/NameOrId" - } } ], "responses": { @@ -5006,6 +4988,213 @@ } } }, + "/v1/system/ip-pools/{pool}/silos": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "List an IP pool's linked silos", + "operationId": "ip_pool_silo_list", + "parameters": [ + { + "in": "path", + "name": "pool", + "description": "Name or ID of the IP pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolSiloResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "post": { + "tags": [ + "system/networking" + ], + "summary": "Make an IP pool available within a silo", + "operationId": "ip_pool_silo_link", + "parameters": [ + { + "in": "path", + "name": "pool", + "description": "Name or ID of the IP pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolSiloLink" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolSilo" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/ip-pools/{pool}/silos/{silo}": { + "put": { + "tags": [ + "system/networking" + ], + "summary": "Make an IP pool default or not-default for a silo", + "description": "When a pool is made default for a silo, any existing default will remain linked to the silo, but will no longer be the default.", + "operationId": "ip_pool_silo_update", + "parameters": [ + { + "in": "path", + "name": "pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolSiloUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolSilo" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "system/networking" + ], + "summary": "Unlink an IP pool from a silo", + "description": "Will fail if there are any outstanding IPs allocated in the silo.", + "operationId": "ip_pool_silo_unlink", + "parameters": [ + { + "in": "path", + "name": "pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/v1/system/ip-pools-service": { "get": { "tags": [ @@ -12253,7 +12442,7 @@ ] }, "IpPool": { - "description": "Identity-related metadata that's included in nearly all public API objects", + "description": "A collection of IP ranges. If a pool is linked to a silo, IP addresses from the pool can be allocated within that silo", "type": "object", "properties": { "description": { @@ -12265,9 +12454,6 @@ "type": "string", "format": "uuid" }, - "is_default": { - "type": "boolean" - }, "name": { "description": "unique, mutable, user-controlled identifier for each resource", "allOf": [ @@ -12276,11 +12462,6 @@ } ] }, - "silo_id": { - "nullable": true, - "type": "string", - "format": "uuid" - }, "time_created": { "description": "timestamp when this resource was created", "type": "string", @@ -12295,7 +12476,6 @@ "required": [ "description", "id", - "is_default", "name", "time_created", "time_modified" @@ -12308,22 +12488,8 @@ "description": { "type": "string" }, - "is_default": { - "description": "Whether the IP pool is considered a default pool for its scope (fleet or silo). If a pool is marked default and is associated with a silo, instances created in that silo will draw IPs from that pool unless another pool is specified at instance create time.", - "default": false, - "type": "boolean" - }, "name": { "$ref": "#/components/schemas/Name" - }, - "silo": { - "nullable": true, - "description": "If an IP pool is associated with a silo, instance IP allocations in that silo can draw from that pool.", - "allOf": [ - { - "$ref": "#/components/schemas/NameOrId" - } - ] } }, "required": [ @@ -12399,6 +12565,78 @@ "items" ] }, + "IpPoolSilo": { + "description": "A link between an IP pool and a silo that allows one to allocate IPs from the pool within the silo", + "type": "object", + "properties": { + "ip_pool_id": { + "type": "string", + "format": "uuid" + }, + "is_default": { + "description": "When a pool is the default for a silo, floating IPs and instance ephemeral IPs will come from that pool when no other pool is specified. There can be at most one default for a given silo.", + "type": "boolean" + }, + "silo_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "ip_pool_id", + "is_default", + "silo_id" + ] + }, + "IpPoolSiloLink": { + "type": "object", + "properties": { + "is_default": { + "description": "When a pool is the default for a silo, floating IPs and instance ephemeral IPs will come from that pool when no other pool is specified. There can be at most one default for a given silo.", + "type": "boolean" + }, + "silo": { + "$ref": "#/components/schemas/NameOrId" + } + }, + "required": [ + "is_default", + "silo" + ] + }, + "IpPoolSiloResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/IpPoolSilo" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "IpPoolSiloUpdate": { + "type": "object", + "properties": { + "is_default": { + "description": "When a pool is the default for a silo, floating IPs and instance ephemeral IPs will come from that pool when no other pool is specified. There can be at most one default for a given silo, so when a pool is made default, an existing default will remain linked but will no longer be the default.", + "type": "boolean" + } + }, + "required": [ + "is_default" + ] + }, "IpPoolUpdate": { "description": "Parameters for updating an IP Pool", "type": "object", diff --git a/schema/crdb/23.0.0/up1.sql b/schema/crdb/23.0.0/up1.sql new file mode 100644 index 0000000000..28204a4d3b --- /dev/null +++ b/schema/crdb/23.0.0/up1.sql @@ -0,0 +1,3 @@ +CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_resource_type AS ENUM ( + 'silo' +); diff --git a/schema/crdb/23.0.0/up2.sql b/schema/crdb/23.0.0/up2.sql new file mode 100644 index 0000000000..cf4668c325 --- /dev/null +++ b/schema/crdb/23.0.0/up2.sql @@ -0,0 +1,8 @@ +CREATE TABLE IF NOT EXISTS omicron.public.ip_pool_resource ( + ip_pool_id UUID NOT NULL, + resource_type omicron.public.ip_pool_resource_type NOT NULL, + resource_id UUID NOT NULL, + is_default BOOL NOT NULL, + + PRIMARY KEY (ip_pool_id, resource_type, resource_id) +); diff --git a/schema/crdb/23.0.0/up3.sql b/schema/crdb/23.0.0/up3.sql new file mode 100644 index 0000000000..c345fd794e --- /dev/null +++ b/schema/crdb/23.0.0/up3.sql @@ -0,0 +1,5 @@ +CREATE UNIQUE INDEX IF NOT EXISTS one_default_ip_pool_per_resource ON omicron.public.ip_pool_resource ( + resource_id +) where + is_default = true; + diff --git a/schema/crdb/23.0.0/up4.sql b/schema/crdb/23.0.0/up4.sql new file mode 100644 index 0000000000..8fb43f9cf1 --- /dev/null +++ b/schema/crdb/23.0.0/up4.sql @@ -0,0 +1,38 @@ +-- Copy existing fleet-scoped pools over to the pool-silo join table +-- +-- Fleet-scoped pools are going away, but we recreate the equivalent of a fleet +-- link for existing fleet-scoped pools by associating them with every existing +-- silo, i.e., inserting a row into the association table for each (pool, silo) +-- pair. +set local disallow_full_table_scans = off; + +INSERT INTO omicron.public.ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) +SELECT + p.id AS ip_pool_id, + 'silo' AS resource_type, + s.id AS resource_id, + -- Special handling is required for conflicts between a fleet default and a + -- silo default. If pool P1 is a fleet default and pool P2 is a silo default + -- on silo S1, we cannot link both to S1 with is_default = true. What we + -- really want in that case is: + -- + -- row 1: (P1, S1, is_default=false) + -- row 2: (P2, S1, is_default=true) + -- + -- i.e., we want to link both, but have the silo default take precedence. The + -- AND NOT EXISTS here causes is_default to be false in row 1 if there is a + -- conflicting silo default pool. row 2 is inserted in up5. + p.is_default AND NOT EXISTS ( + SELECT 1 FROM omicron.public.ip_pool + WHERE silo_id = s.id AND is_default + ) +FROM omicron.public.ip_pool AS p +-- cross join means we are looking at the cartesian product of all fleet-scoped +-- IP pools and all silos +CROSS JOIN omicron.public.silo AS s +WHERE p.time_deleted IS null + AND p.silo_id IS null -- means it's a fleet pool + AND s.time_deleted IS null +-- this makes it idempotent +ON CONFLICT (ip_pool_id, resource_type, resource_id) +DO NOTHING; diff --git a/schema/crdb/23.0.0/up5.sql b/schema/crdb/23.0.0/up5.sql new file mode 100644 index 0000000000..3c1b100c9b --- /dev/null +++ b/schema/crdb/23.0.0/up5.sql @@ -0,0 +1,13 @@ +-- Copy existing silo-scoped pools over to the pool-silo join table +INSERT INTO omicron.public.ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) +SELECT + id as ip_pool_id, + 'silo' as resource_type, + silo_id as resource_id, + is_default +FROM omicron.public.ip_pool AS ip +WHERE silo_id IS NOT null + AND time_deleted IS null +-- this makes it idempotent +ON CONFLICT (ip_pool_id, resource_type, resource_id) +DO NOTHING; diff --git a/schema/crdb/23.0.1/README.md b/schema/crdb/23.0.1/README.md new file mode 100644 index 0000000000..bd12f9883b --- /dev/null +++ b/schema/crdb/23.0.1/README.md @@ -0,0 +1 @@ +These steps are separated from 11.0.0 because they drop things that are used in previous steps, which causes the idempotence test to fail when it runs each migration multiple times — the things earlier steps rely on are not there. diff --git a/schema/crdb/23.0.1/up1.sql b/schema/crdb/23.0.1/up1.sql new file mode 100644 index 0000000000..3e5347e78c --- /dev/null +++ b/schema/crdb/23.0.1/up1.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS one_default_pool_per_scope; \ No newline at end of file diff --git a/schema/crdb/23.0.1/up2.sql b/schema/crdb/23.0.1/up2.sql new file mode 100644 index 0000000000..a62bda1adc --- /dev/null +++ b/schema/crdb/23.0.1/up2.sql @@ -0,0 +1,3 @@ +ALTER TABLE omicron.public.ip_pool + DROP COLUMN IF EXISTS silo_id, + DROP COLUMN IF EXISTS is_default; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 57ce791a03..e40c97972f 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1566,29 +1566,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool ( time_deleted TIMESTAMPTZ, /* The collection's child-resource generation number */ - rcgen INT8 NOT NULL, - - /* - * Association with a silo. silo_id is also used to mark an IP pool as - * "internal" by associating it with the oxide-internal silo. Null silo_id - * means the pool is can be used fleet-wide. - */ - silo_id UUID, - - /* Is this the default pool for its scope (fleet or silo) */ - is_default BOOLEAN NOT NULL DEFAULT FALSE + rcgen INT8 NOT NULL ); -/* - * Ensure there can only be one default pool for the fleet or a given silo. - * Coalesce is needed because otherwise different nulls are considered to be - * distinct from each other. - */ -CREATE UNIQUE INDEX IF NOT EXISTS one_default_pool_per_scope ON omicron.public.ip_pool ( - COALESCE(silo_id, '00000000-0000-0000-0000-000000000000'::uuid) -) WHERE - is_default = true AND time_deleted IS NULL; - /* * Index ensuring uniqueness of IP Pool names, globally. */ @@ -1597,6 +1577,33 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_pool_by_name ON omicron.public.ip_pool ) WHERE time_deleted IS NULL; +-- The order here is most-specific first, and it matters because we use this +-- fact to select the most specific default in the case where there is both a +-- silo default and a fleet default. If we were to add a project type, it should +-- be added before silo. +CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_resource_type AS ENUM ( + 'silo' +); + +-- join table associating IP pools with resources like fleet or silo +CREATE TABLE IF NOT EXISTS omicron.public.ip_pool_resource ( + ip_pool_id UUID NOT NULL, + resource_type omicron.public.ip_pool_resource_type NOT NULL, + resource_id UUID NOT NULL, + is_default BOOL NOT NULL, + -- TODO: timestamps for soft deletes? + + -- resource_type is redundant because resource IDs are globally unique, but + -- logically it belongs here + PRIMARY KEY (ip_pool_id, resource_type, resource_id) +); + +-- a given resource can only have one default ip pool +CREATE UNIQUE INDEX IF NOT EXISTS one_default_ip_pool_per_resource ON omicron.public.ip_pool_resource ( + resource_id +) where + is_default = true; + /* * IP Pools are made up of a set of IP ranges, which are start/stop addresses. * Note that these need not be CIDR blocks or well-behaved subnets with a @@ -3251,7 +3258,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '22.0.0', NULL) + ( TRUE, NOW(), NOW(), '23.0.1', NULL) ON CONFLICT DO NOTHING; COMMIT;