From f47aefcb7600486edef0451847098f7a11a557c0 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 19 Dec 2022 18:24:45 -0500 Subject: [PATCH] [nexus] Remove project_id, rack_id from IP pools (#2056) ## Before this PR - IP Pools could exist in at most one project. IP allocation during instance creation occurred by [either by requesting an IP pool belonging to a project, or by "just looking for any unreserved IP Pool"](https://github.com/oxidecomputer/omicron/blob/79765a4e3b39a29bc9940c0e4a49c4364fbcc9e3/nexus/src/db/queries/external_ip.rs#L186-L212). As discussed in https://github.com/oxidecomputer/omicron/issues/2055 , our intention is for IP pools to be used across multiple projects, and for projects to be able to use multiple IP pools. - "Service" IP pools were indexed by rack ID, though (as documented in https://github.com/oxidecomputer/omicron/issues/1276 ), they should probably be accessed by AZ instead. ## This PR - Adds a default IP pool named `default`, which is used for address allocation unless a more specific IP pool is provided - Removes "project ID" from IP pools (and external IP addresses) - Removes "rack ID" from IP pool API and DB representation ## In the future - This PR doesn't provide the many-to-many connection between projects and IP pools that we eventually want, where projects can be configured to use different IP pools for different purposes. However, by removing the not-quite-accurate relationship that an IP pool must belong to a *single* project, the API moves closer towards this direction. - We probably should access the `service_ip_pool` API with the AZ UUID used for the query, but since AZs don't exist in the API yet, this has been omitted. Part of #2055 --- common/src/sql/dbinit.sql | 51 +--- end-to-end-tests/src/bin/bootstrap.rs | 16 +- nexus/db-model/src/external_ip.rs | 40 +-- nexus/db-model/src/ip_pool.rs | 30 +- nexus/db-model/src/project.rs | 28 +- nexus/db-model/src/schema.rs | 5 +- nexus/src/app/ip_pool.rs | 56 ++-- nexus/src/app/sagas/disk_create.rs | 2 +- nexus/src/app/sagas/instance_create.rs | 26 +- nexus/src/db/datastore/external_ip.rs | 69 +---- nexus/src/db/datastore/ip_pool.rs | 138 ++++----- nexus/src/db/datastore/mod.rs | 3 - nexus/src/db/datastore/project.rs | 4 - nexus/src/db/queries/external_ip.rs | 273 ++++-------------- nexus/src/db/queries/ip_pool.rs | 2 - nexus/src/external_api/http_entrypoints.rs | 48 +-- nexus/src/populate.rs | 19 +- nexus/test-utils/src/resource_helpers.rs | 39 ++- nexus/tests/integration_tests/disks.rs | 4 +- nexus/tests/integration_tests/endpoints.rs | 8 +- nexus/tests/integration_tests/instances.rs | 125 ++------ nexus/tests/integration_tests/ip_pools.rs | 58 ++-- nexus/tests/integration_tests/projects.rs | 66 +---- nexus/tests/integration_tests/snapshots.rs | 10 +- .../integration_tests/subnet_allocation.rs | 4 +- nexus/tests/integration_tests/unauthorized.rs | 15 +- .../integration_tests/volume_management.rs | 14 +- nexus/tests/integration_tests/vpc_subnets.rs | 6 +- nexus/tests/output/nexus_tags.txt | 8 +- nexus/types/src/external_api/params.rs | 10 - nexus/types/src/external_api/views.rs | 1 - openapi/nexus.json | 65 +---- 32 files changed, 330 insertions(+), 913 deletions(-) diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index 93c2772e6c..26e86856d0 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -1112,16 +1112,9 @@ CREATE TABLE omicron.public.ip_pool ( time_modified TIMESTAMPTZ NOT NULL, time_deleted TIMESTAMPTZ, - /* Optional ID of the project for which this pool is reserved. */ - project_id UUID, - /* - * Optional rack ID, indicating this is a reserved pool for internal - * services on a specific rack. - * TODO(https://github.com/oxidecomputer/omicron/issues/1276): This - * should probably point to an AZ or fleet, not a rack. - */ - rack_id UUID, + /* Identifies if the IP Pool is dedicated to Control Plane services */ + internal BOOL NOT NULL, /* The collection's child-resource generation number */ rcgen INT8 NOT NULL @@ -1135,15 +1128,6 @@ CREATE UNIQUE INDEX ON omicron.public.ip_pool ( ) WHERE time_deleted IS NULL; -/* - * Index ensuring uniqueness of IP pools by rack ID - */ -CREATE UNIQUE INDEX ON omicron.public.ip_pool ( - rack_id -) WHERE - rack_id IS NOT NULL AND - time_deleted IS NULL; - /* * IP Pools are made up of a set of IP ranges, which are start/stop addresses. * Note that these need not be CIDR blocks or well-behaved subnets with a @@ -1158,20 +1142,11 @@ CREATE TABLE omicron.public.ip_pool_range ( /* The range is inclusive of the last address. */ last_address INET NOT NULL, ip_pool_id UUID NOT NULL, - /* Optional ID of the project for which this range is reserved. - * - * NOTE: This denormalizes the tables a bit, since the project_id is - * duplicated here and in the parent `ip_pool` table. We're allowing this - * for now, since it reduces the complexity of the already-bad IP allocation - * query, but we may want to revisit that, and JOIN with the parent table - * instead. - */ - project_id UUID, /* Tracks child resources, IP addresses allocated out of this range. */ rcgen INT8 NOT NULL ); -/* +/* * These help Nexus enforce that the ranges within an IP Pool do not overlap * with any other ranges. See `nexus/src/db/queries/ip_pool.rs` for the actual * query which does that. @@ -1187,14 +1162,6 @@ CREATE UNIQUE INDEX ON omicron.public.ip_pool_range ( STORING (first_address) WHERE time_deleted IS NULL; -/* - * Index supporting allocation of IPs out of a Pool reserved for a project. - */ -CREATE INDEX ON omicron.public.ip_pool_range ( - project_id -) WHERE - time_deleted IS NULL; - /* The kind of external IP address. */ CREATE TYPE omicron.public.ip_kind AS ENUM ( @@ -1245,9 +1212,6 @@ CREATE TABLE omicron.public.external_ip ( /* FK to the `ip_pool_range` table. */ ip_pool_range_id UUID NOT NULL, - /* FK to the `project` table. */ - project_id UUID, - /* FK to the `instance` table. See the constraints below. */ instance_id UUID, @@ -1263,15 +1227,6 @@ CREATE TABLE omicron.public.external_ip ( /* The last port in the allowed range, also inclusive. */ last_port INT4 NOT NULL, - /* - * The project can only be NULL for service IPs. - * Additionally, the project MUST be NULL for service IPs. - */ - CONSTRAINT null_project CHECK( - (kind != 'service' AND project_id IS NOT NULL) OR - (kind = 'service' AND project_id IS NULL) - ), - /* The name must be non-NULL iff this is a floating IP. */ CONSTRAINT null_fip_name CHECK ( (kind != 'floating' AND name IS NULL) OR diff --git a/end-to-end-tests/src/bin/bootstrap.rs b/end-to-end-tests/src/bin/bootstrap.rs index edbf9ac550..cb82857524 100644 --- a/end-to-end-tests/src/bin/bootstrap.rs +++ b/end-to-end-tests/src/bin/bootstrap.rs @@ -3,7 +3,7 @@ use end_to_end_tests::helpers::ctx::{build_client, Context}; use end_to_end_tests::helpers::{generate_name, get_system_ip_pool}; use omicron_test_utils::dev::poll::{wait_for_condition, CondCheckError}; use oxide_client::types::{ - ByteCount, DiskCreate, DiskSource, IpPoolCreate, IpRange, Ipv4Range, + ByteCount, DiskCreate, DiskSource, IpRange, Ipv4Range, }; use oxide_client::{ClientDisksExt, ClientOrganizationsExt, ClientSystemExt}; use std::time::Duration; @@ -30,21 +30,9 @@ async fn main() -> Result<()> { // ===== CREATE IP POOL ===== // eprintln!("creating IP pool..."); let (first, last) = get_system_ip_pool()?; - let pool_name = client - .ip_pool_create() - .body(IpPoolCreate { - name: generate_name("ip-pool")?, - description: String::new(), - organization: None, - project: None, - }) - .send() - .await? - .name - .clone(); client .ip_pool_range_add() - .pool_name(pool_name) + .pool_name("default") .body(IpRange::V4(Ipv4Range { first, last })) .send() .await?; diff --git a/nexus/db-model/src/external_ip.rs b/nexus/db-model/src/external_ip.rs index 75c5183540..48e8e77d0d 100644 --- a/nexus/db-model/src/external_ip.rs +++ b/nexus/db-model/src/external_ip.rs @@ -58,7 +58,6 @@ pub struct ExternalIp { pub time_deleted: Option>, pub ip_pool_id: Uuid, pub ip_pool_range_id: Uuid, - pub project_id: Option, // This is Some(_) for: // - all instance SNAT IPs // - all ephemeral IPs @@ -80,18 +79,6 @@ impl From for sled_agent_client::types::SourceNatConfig { } } -/// Describes where the IP candidates for allocation come from: either -/// from an IP pool, or from a project. -/// -/// This ensures that a source is always specified, and a caller cannot -/// request an external IP allocation without providing at least one of -/// these options. -#[derive(Debug, Clone, Copy)] -pub enum IpSource { - Instance { project_id: Uuid, pool_id: Option }, - Service { pool_id: Uuid }, -} - /// An incomplete external IP, used to store state required for issuing the /// database query that selects an available IP and stores the resulting record. #[derive(Debug, Clone)] @@ -102,15 +89,14 @@ pub struct IncompleteExternalIp { time_created: DateTime, kind: IpKind, instance_id: Option, - source: IpSource, + pool_id: Uuid, } impl IncompleteExternalIp { pub fn for_instance_source_nat( id: Uuid, - project_id: Uuid, instance_id: Uuid, - pool_id: Option, + pool_id: Uuid, ) -> Self { Self { id, @@ -119,16 +105,11 @@ impl IncompleteExternalIp { time_created: Utc::now(), kind: IpKind::SNat, instance_id: Some(instance_id), - source: IpSource::Instance { project_id, pool_id }, + pool_id, } } - pub fn for_ephemeral( - id: Uuid, - project_id: Uuid, - instance_id: Uuid, - pool_id: Option, - ) -> Self { + pub fn for_ephemeral(id: Uuid, instance_id: Uuid, pool_id: Uuid) -> Self { Self { id, name: None, @@ -136,7 +117,7 @@ impl IncompleteExternalIp { time_created: Utc::now(), kind: IpKind::Ephemeral, instance_id: Some(instance_id), - source: IpSource::Instance { project_id, pool_id }, + pool_id, } } @@ -144,8 +125,7 @@ impl IncompleteExternalIp { id: Uuid, name: &Name, description: &str, - project_id: Uuid, - pool_id: Option, + pool_id: Uuid, ) -> Self { Self { id, @@ -154,7 +134,7 @@ impl IncompleteExternalIp { time_created: Utc::now(), kind: IpKind::Floating, instance_id: None, - source: IpSource::Instance { project_id, pool_id }, + pool_id, } } @@ -166,7 +146,7 @@ impl IncompleteExternalIp { time_created: Utc::now(), kind: IpKind::Service, instance_id: None, - source: IpSource::Service { pool_id }, + pool_id, } } @@ -194,8 +174,8 @@ impl IncompleteExternalIp { &self.instance_id } - pub fn source(&self) -> &IpSource { - &self.source + pub fn pool_id(&self) -> &Uuid { + &self.pool_id } } diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 75da25e86e..9e5c1db11e 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -28,13 +28,11 @@ pub struct IpPool { #[diesel(embed)] pub identity: IpPoolIdentity, - /// An optional ID of the project for which this pool is reserved. - pub project_id: Option, - - /// An optional ID of the rack for which this pool is reserved. - // TODO(https://github.com/oxidecomputer/omicron/issues/1276): This - // should probably point to an AZ or fleet, not a rack. - pub rack_id: Option, + /// If true, identifies that this IP pool is dedicated to "Control-Plane + /// Services", such as Nexus. + /// + /// Otherwise, this IP pool is intended for usage by customer VMs. + pub internal: bool, /// Child resource generation number, for optimistic concurrency control of /// the contained ranges. @@ -44,16 +42,14 @@ pub struct IpPool { impl IpPool { pub fn new( pool_identity: &external::IdentityMetadataCreateParams, - project_id: Option, - rack_id: Option, + internal: bool, ) -> Self { Self { identity: IpPoolIdentity::new( Uuid::new_v4(), pool_identity.clone(), ), - project_id, - rack_id, + internal, rcgen: 0, } } @@ -61,7 +57,7 @@ impl IpPool { impl From for views::IpPool { fn from(pool: IpPool) -> Self { - Self { identity: pool.identity(), project_id: pool.project_id } + Self { identity: pool.identity() } } } @@ -98,20 +94,13 @@ pub struct IpPoolRange { pub last_address: IpNetwork, /// Foreign-key to the `ip_pool` table with the parent pool for this range pub ip_pool_id: Uuid, - /// Foreign-key to the `project` table, with the Project to which this range - /// is restricted, if any (derived from the `ip_pool` table). - pub project_id: Option, /// The child resource generation number, tracking IP addresses allocated or /// used from this range. pub rcgen: i64, } impl IpPoolRange { - pub fn new( - range: &IpRange, - ip_pool_id: Uuid, - project_id: Option, - ) -> Self { + pub fn new(range: &IpRange, ip_pool_id: Uuid) -> Self { let now = Utc::now(); let first_address = range.first_address(); let last_address = range.last_address(); @@ -129,7 +118,6 @@ impl IpPoolRange { first_address: IpNetwork::from(range.first_address()), last_address: IpNetwork::from(range.last_address()), ip_pool_id, - project_id, rcgen: 0, } } diff --git a/nexus/db-model/src/project.rs b/nexus/db-model/src/project.rs index bca65cb9aa..568f96ddc9 100644 --- a/nexus/db-model/src/project.rs +++ b/nexus/db-model/src/project.rs @@ -2,13 +2,9 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use super::{ - Disk, ExternalIp, Generation, Image, Instance, IpPool, Name, Snapshot, Vpc, -}; +use super::{Disk, Generation, Image, Instance, Name, Snapshot, Vpc}; use crate::collection::DatastoreCollectionConfig; -use crate::schema::{ - disk, external_ip, image, instance, ip_pool, project, snapshot, vpc, -}; +use crate::schema::{disk, image, instance, project, snapshot, vpc}; use chrono::{DateTime, Utc}; use db_macros::Resource; use nexus_types::external_api::params; @@ -83,26 +79,6 @@ impl DatastoreCollectionConfig for Project { type CollectionIdColumn = vpc::dsl::project_id; } -// NOTE: "IpPoolRange" also contains a reference to "project_id", but -// ranges should only exist within IP Pools. -impl DatastoreCollectionConfig for Project { - type CollectionId = Uuid; - type GenerationNumberColumn = project::dsl::rcgen; - type CollectionTimeDeletedColumn = project::dsl::time_deleted; - type CollectionIdColumn = ip_pool::dsl::project_id; -} - -// TODO(https://github.com/oxidecomputer/omicron/issues/1482): Not yet utilized, -// but needed for project deletion safety. -// TODO(https://github.com/oxidecomputer/omicron/issues/1334): Cannot be -// utilized until floating IPs are implemented. -impl DatastoreCollectionConfig for Project { - type CollectionId = Uuid; - type GenerationNumberColumn = project::dsl::rcgen; - type CollectionTimeDeletedColumn = project::dsl::time_deleted; - type CollectionIdColumn = external_ip::dsl::project_id; -} - /// Describes a set of updates for the [`Project`] model. #[derive(AsChangeset)] #[diesel(table_name = project)] diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 03060d57ac..281ab1d967 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -149,8 +149,7 @@ table! { time_created -> Timestamptz, time_modified -> Timestamptz, time_deleted -> Nullable, - project_id -> Nullable, - rack_id -> Nullable, + internal -> Bool, rcgen -> Int8, } } @@ -164,7 +163,6 @@ table! { first_address -> Inet, last_address -> Inet, ip_pool_id -> Uuid, - project_id -> Nullable, rcgen -> Int8, } } @@ -179,7 +177,6 @@ table! { time_deleted -> Nullable, ip_pool_id -> Uuid, ip_pool_range_id -> Uuid, - project_id -> Nullable, instance_id -> Nullable, kind -> crate::IpKindEnum, ip -> Inet, diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index a0c3e0d1b1..e82bd97737 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -28,16 +28,19 @@ impl super::Nexus { opctx: &OpContext, new_pool: ¶ms::IpPoolCreate, ) -> CreateResult { - self.db_datastore.ip_pool_create(opctx, new_pool, None).await + self.db_datastore + .ip_pool_create(opctx, new_pool, /* internal= */ false) + .await } pub async fn ip_pool_services_create( &self, opctx: &OpContext, new_pool: ¶ms::IpPoolCreate, - rack_id: Uuid, ) -> CreateResult { - self.db_datastore.ip_pool_create(opctx, new_pool, Some(rack_id)).await + self.db_datastore + .ip_pool_create(opctx, new_pool, /* internal= */ true) + .await } pub async fn ip_pools_list_by_name( @@ -119,7 +122,7 @@ impl super::Nexus { .ip_pool_name(pool_name) .fetch_for(authz::Action::ListChildren) .await?; - if db_pool.rack_id.is_some() { + if db_pool.internal { return Err(Error::not_found_by_name( ResourceType::IpPool, pool_name, @@ -142,15 +145,13 @@ impl super::Nexus { .ip_pool_name(pool_name) .fetch_for(authz::Action::Modify) .await?; - if db_pool.rack_id.is_some() { + if db_pool.internal { return Err(Error::not_found_by_name( ResourceType::IpPool, pool_name, )); } - self.db_datastore - .ip_pool_add_range(opctx, &authz_pool, &db_pool, range) - .await + self.db_datastore.ip_pool_add_range(opctx, &authz_pool, range).await } pub async fn ip_pool_delete_range( @@ -164,7 +165,7 @@ impl super::Nexus { .ip_pool_name(pool_name) .fetch_for(authz::Action::Modify) .await?; - if db_pool.rack_id.is_some() { + if db_pool.internal { return Err(Error::not_found_by_name( ResourceType::IpPool, pool_name, @@ -174,21 +175,17 @@ impl super::Nexus { } // The "ip_pool_service_..." functions look up IP pools for Oxide service usage, - // rather than for VMs. As such, they're identified by rack UUID, not - // by pool names. + // rather than for VMs. // // TODO(https://github.com/oxidecomputer/omicron/issues/1276): Should be - // AZ UUID, probably. + // accessed via AZ UUID, probably. pub async fn ip_pool_service_fetch( &self, opctx: &OpContext, - rack_id: Uuid, ) -> LookupResult { - let (authz_pool, db_pool) = self - .db_datastore - .ip_pools_lookup_by_rack_id(opctx, rack_id) - .await?; + let (authz_pool, db_pool) = + self.db_datastore.ip_pools_service_lookup(opctx).await?; opctx.authorize(authz::Action::Read, &authz_pool).await?; Ok(db_pool) } @@ -196,13 +193,10 @@ impl super::Nexus { pub async fn ip_pool_service_list_ranges( &self, opctx: &OpContext, - rack_id: Uuid, pagparams: &DataPageParams<'_, IpNetwork>, ) -> ListResultVec { - let (authz_pool, ..) = self - .db_datastore - .ip_pools_lookup_by_rack_id(opctx, rack_id) - .await?; + let (authz_pool, ..) = + self.db_datastore.ip_pools_service_lookup(opctx).await?; opctx.authorize(authz::Action::Read, &authz_pool).await?; self.db_datastore .ip_pool_list_ranges(opctx, &authz_pool, pagparams) @@ -212,29 +206,21 @@ impl super::Nexus { pub async fn ip_pool_service_add_range( &self, opctx: &OpContext, - rack_id: Uuid, range: &IpRange, ) -> UpdateResult { - let (authz_pool, db_pool) = self - .db_datastore - .ip_pools_lookup_by_rack_id(opctx, rack_id) - .await?; + let (authz_pool, ..) = + self.db_datastore.ip_pools_service_lookup(opctx).await?; opctx.authorize(authz::Action::Modify, &authz_pool).await?; - self.db_datastore - .ip_pool_add_range(opctx, &authz_pool, &db_pool, range) - .await + self.db_datastore.ip_pool_add_range(opctx, &authz_pool, range).await } pub async fn ip_pool_service_delete_range( &self, opctx: &OpContext, - rack_id: Uuid, range: &IpRange, ) -> DeleteResult { - let (authz_pool, ..) = self - .db_datastore - .ip_pools_lookup_by_rack_id(opctx, rack_id) - .await?; + let (authz_pool, ..) = + self.db_datastore.ip_pools_service_lookup(opctx).await?; opctx.authorize(authz::Action::Modify, &authz_pool).await?; self.db_datastore.ip_pool_delete_range(opctx, &authz_pool, range).await } diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index 10a3c517e3..f70c6b4f6a 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -642,7 +642,7 @@ mod test { const PROJECT_NAME: &str = "springfield-squidport"; async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None, None).await; + create_ip_pool(&client, "p0", None).await; create_organization(&client, ORG_NAME).await; let project = create_project(client, ORG_NAME, PROJECT_NAME).await; project.identity.id diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 59ae7e03f6..bdb60b934d 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -626,13 +626,15 @@ async fn sic_allocate_instance_snat_ip( OpContext::for_saga_action(&sagactx, &saga_params.serialized_authn); let instance_id = sagactx.lookup::("instance_id")?; let ip_id = sagactx.lookup::("snat_ip_id")?; + + let (.., pool) = datastore + .ip_pools_fetch_default_for(&opctx, authz::Action::CreateChild) + .await + .map_err(ActionError::action_failed)?; + let pool_id = pool.identity.id; + datastore - .allocate_instance_snat_ip( - &opctx, - ip_id, - saga_params.project_id, - instance_id, - ) + .allocate_instance_snat_ip(&opctx, ip_id, instance_id, pool_id) .await .map_err(ActionError::action_failed)?; Ok(()) @@ -684,13 +686,7 @@ async fn sic_allocate_instance_external_ip( } }; datastore - .allocate_instance_ephemeral_ip( - &opctx, - ip_id, - saga_params.project_id, - instance_id, - pool_name, - ) + .allocate_instance_ephemeral_ip(&opctx, ip_id, instance_id, pool_name) .await .map_err(ActionError::action_failed)?; Ok(()) @@ -999,9 +995,9 @@ mod test { use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; use dropshot::test_util::ClientTestContext; use nexus_test_utils::resource_helpers::create_disk; - use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_organization; use nexus_test_utils::resource_helpers::create_project; + use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use omicron_common::api::external::{ @@ -1018,7 +1014,7 @@ mod test { const DISK_NAME: &str = "my-disk"; async fn create_org_project_and_disk(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None, None).await; + populate_ip_pool(&client, "default", None).await; create_organization(&client, ORG_NAME).await; let project = create_project(client, ORG_NAME, PROJECT_NAME).await; create_disk(&client, ORG_NAME, PROJECT_NAME, DISK_NAME).await; diff --git a/nexus/src/db/datastore/external_ip.rs b/nexus/src/db/datastore/external_ip.rs index 4c90846192..c7afdc8c06 100644 --- a/nexus/src/db/datastore/external_ip.rs +++ b/nexus/src/db/datastore/external_ip.rs @@ -5,6 +5,7 @@ //! [`DataStore`] methods on [`ExternalIp`]s. use super::DataStore; +use crate::authz; use crate::context::OpContext; use crate::db; use crate::db::error::public_error_from_diesel_pool; @@ -12,7 +13,6 @@ use crate::db::error::ErrorHandler; use crate::db::model::ExternalIp; use crate::db::model::IncompleteExternalIp; use crate::db::model::IpKind; -use crate::db::model::IpPool; use crate::db::model::Name; use crate::db::queries::external_ip::NextExternalIp; use crate::db::update_and_check::UpdateAndCheck; @@ -24,8 +24,8 @@ use nexus_types::identity::Resource; use omicron_common::api::external::CreateResult; use omicron_common::api::external::Error; use omicron_common::api::external::LookupResult; -use omicron_common::api::external::LookupType; -use omicron_common::api::external::ResourceType; +use omicron_common::api::external::Name as ExternalName; +use std::str::FromStr; use uuid::Uuid; impl DataStore { @@ -34,14 +34,13 @@ impl DataStore { &self, opctx: &OpContext, ip_id: Uuid, - project_id: Uuid, instance_id: Uuid, + pool_id: Uuid, ) -> CreateResult { let data = IncompleteExternalIp::for_instance_source_nat( ip_id, - project_id, instance_id, - /* pool_id = */ None, + pool_id, ); self.allocate_external_ip(opctx, data).await } @@ -51,53 +50,19 @@ impl DataStore { &self, opctx: &OpContext, ip_id: Uuid, - project_id: Uuid, instance_id: Uuid, pool_name: Option, ) -> CreateResult { - let pool_id = if let Some(ref name) = pool_name { - // We'd like to add authz checks here, and use the `LookupPath` - // methods on the project-scoped view of this resource. It's not - // entirely clear how that'll work in the API, so see RFD 288 and - // https://github.com/oxidecomputer/omicron/issues/1470 for more - // details. - // - // For now, we just ensure that the pool is either unreserved, or - // reserved for the instance's project. - use db::schema::ip_pool::dsl; - Some( - dsl::ip_pool - .filter(dsl::name.eq(name.clone())) - .filter(dsl::time_deleted.is_null()) - .filter( - dsl::project_id - .is_null() - .or(dsl::project_id.eq(Some(project_id))), - ) - .select(IpPool::as_select()) - .first_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::IpPool, - LookupType::ByName(name.to_string()), - ), - ) - })? - .identity - .id, - ) - } else { - None - }; - let data = IncompleteExternalIp::for_ephemeral( - ip_id, - project_id, - instance_id, - pool_id, - ); + let name = pool_name.unwrap_or_else(|| { + Name(ExternalName::from_str("default").unwrap()) + }); + let (.., pool) = self + .ip_pools_fetch_for(opctx, authz::Action::CreateChild, &name) + .await?; + let pool_id = pool.identity.id; + + let data = + IncompleteExternalIp::for_ephemeral(ip_id, instance_id, pool_id); self.allocate_external_ip(opctx, data).await } @@ -106,10 +71,8 @@ impl DataStore { &self, opctx: &OpContext, ip_id: Uuid, - rack_id: Uuid, ) -> CreateResult { - let (.., pool) = - self.ip_pools_lookup_by_rack_id(opctx, rack_id).await?; + let (.., pool) = self.ip_pools_service_lookup(opctx).await?; let data = IncompleteExternalIp::for_service(ip_id, pool.id()); self.allocate_external_ip(opctx, data).await diff --git a/nexus/src/db/datastore/ip_pool.rs b/nexus/src/db/datastore/ip_pool.rs index 55a725d503..56e834739e 100644 --- a/nexus/src/db/datastore/ip_pool.rs +++ b/nexus/src/db/datastore/ip_pool.rs @@ -20,7 +20,6 @@ use crate::db::model::IpPool; use crate::db::model::IpPoolRange; use crate::db::model::IpPoolUpdate; use crate::db::model::Name; -use crate::db::model::Project; use crate::db::pagination::paginated; use crate::db::queries::ip_pool::FilterOverlappingIpRanges; use crate::external_api::params; @@ -36,8 +35,10 @@ use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::LookupType; +use omicron_common::api::external::Name as ExternalName; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; +use std::str::FromStr; use uuid::Uuid; impl DataStore { @@ -52,7 +53,7 @@ impl DataStore { .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) .await?; paginated(dsl::ip_pool, dsl::name, pagparams) - .filter(dsl::rack_id.is_null()) + .filter(dsl::internal.eq(false)) .filter(dsl::time_deleted.is_null()) .select(db::model::IpPool::as_select()) .get_results_async(self.pool_authorized(opctx).await?) @@ -71,7 +72,7 @@ impl DataStore { .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) .await?; paginated(dsl::ip_pool, dsl::id, pagparams) - .filter(dsl::rack_id.is_null()) + .filter(dsl::internal.eq(false)) .filter(dsl::time_deleted.is_null()) .select(db::model::IpPool::as_select()) .get_results_async(self.pool_authorized(opctx).await?) @@ -79,15 +80,44 @@ impl DataStore { .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) } - /// Looks up an IP pool by a particular Rack ID. + /// Looks up the default IP pool by name. + pub(crate) async fn ip_pools_fetch_default_for( + &self, + opctx: &OpContext, + action: authz::Action, + ) -> LookupResult<(authz::IpPool, IpPool)> { + self.ip_pools_fetch_for( + opctx, + action, + &Name(ExternalName::from_str("default").unwrap()), + ) + .await + } + + /// Looks up an IP pool by name. + pub(crate) async fn ip_pools_fetch_for( + &self, + opctx: &OpContext, + action: authz::Action, + name: &Name, + ) -> LookupResult<(authz::IpPool, IpPool)> { + let (.., authz_pool, pool) = LookupPath::new(opctx, &self) + .ip_pool_name(&name) + .fetch_for(action) + .await?; + if pool.internal { + return Err(authz_pool.not_found()); + } + + Ok((authz_pool, pool)) + } + + /// Looks up an IP pool intended for internal services. /// - /// An index exists to look up pools by rack ID, but it is not a primary - /// key, which requires this lookup function to be used instead of the - /// [`LookupPath`] utility. - pub async fn ip_pools_lookup_by_rack_id( + /// This method may require an index by Availability Zone in the future. + pub async fn ip_pools_service_lookup( &self, opctx: &OpContext, - rack_id: Uuid, ) -> LookupResult<(authz::IpPool, IpPool)> { use db::schema::ip_pool::dsl; @@ -98,7 +128,7 @@ impl DataStore { .await .map_err(|e| match e { Error::Forbidden => { - LookupType::ByCompositeId(format!("Rack ID: {rack_id}")) + LookupType::ByCompositeId("Service IP Pool".to_string()) .into_not_found(ResourceType::IpPool) } _ => e, @@ -106,7 +136,7 @@ impl DataStore { // Look up this IP pool by rack ID. let (authz_pool, pool) = dsl::ip_pool - .filter(dsl::rack_id.eq(Some(rack_id))) + .filter(dsl::internal.eq(true)) .filter(dsl::time_deleted.is_null()) .select(IpPool::as_select()) .get_result_async(self.pool_authorized(opctx).await?) @@ -117,9 +147,9 @@ impl DataStore { authz::IpPool::new( authz::FLEET, ip_pool.id(), - LookupType::ByCompositeId(format!( - "Rack ID: {rack_id}" - )), + LookupType::ByCompositeId( + "Service IP Pool".to_string(), + ), ), ip_pool, ) @@ -129,80 +159,31 @@ impl DataStore { /// Creates a new IP pool. /// - /// - If `rack_id` is provided, this IP pool is used for Oxide - /// services. + /// - If `internal` is set, this IP pool is used for Oxide services. pub async fn ip_pool_create( &self, opctx: &OpContext, new_pool: ¶ms::IpPoolCreate, - rack_id: Option, + internal: bool, ) -> CreateResult { use db::schema::ip_pool::dsl; opctx .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) .await?; - let maybe_authz_project = match new_pool.project.clone() { - None => None, - Some(project) => { - if let Some(_) = &rack_id { - return Err(Error::invalid_request( - "Internal Service IP pools cannot be project-scoped", - )); - } - - let (.., authz_project) = LookupPath::new(opctx, self) - .organization_name(&Name(project.organization)) - .project_name(&Name(project.project)) - .lookup_for(authz::Action::CreateChild) - .await?; - Some(authz_project) - } - }; - - let maybe_project_id = maybe_authz_project - .as_ref() - .map(|authz_project| authz_project.id()); - let pool = IpPool::new(&new_pool.identity, maybe_project_id, rack_id); + let pool = IpPool::new(&new_pool.identity, internal); let pool_name = pool.name().as_str().to_string(); - if let Some(authz_project) = maybe_authz_project { - opctx.authorize(authz::Action::CreateChild, &authz_project).await?; - Project::insert_resource( - authz_project.id(), - diesel::insert_into(dsl::ip_pool).values(pool), - ) - .insert_and_get_result_async(self.pool_authorized(opctx).await?) + diesel::insert_into(dsl::ip_pool) + .values(pool) + .returning(IpPool::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) .await - .map_err(|e| match e { - AsyncInsertError::CollectionNotFound => { - authz_project.not_found() - } - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::IpPool, - &pool_name, - ), - ) - } + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict(ResourceType::IpPool, &pool_name), + ) }) - } else { - diesel::insert_into(dsl::ip_pool) - .values(pool) - .returning(IpPool::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::IpPool, - &pool_name, - ), - ) - }) - } } pub async fn ip_pool_delete( @@ -239,7 +220,7 @@ impl DataStore { // in between the above check for children and this query. let now = Utc::now(); let updated_rows = diesel::update(dsl::ip_pool) - .filter(dsl::rack_id.is_null()) + .filter(dsl::internal.eq(false)) .filter(dsl::time_deleted.is_null()) .filter(dsl::id.eq(authz_pool.id())) .filter(dsl::rcgen.eq(db_pool.rcgen)) @@ -271,7 +252,7 @@ impl DataStore { use db::schema::ip_pool::dsl; opctx.authorize(authz::Action::Modify, authz_pool).await?; diesel::update(dsl::ip_pool) - .filter(dsl::rack_id.is_null()) + .filter(dsl::internal.eq(false)) .filter(dsl::id.eq(authz_pool.id())) .filter(dsl::time_deleted.is_null()) .set(updates) @@ -312,13 +293,12 @@ impl DataStore { &self, opctx: &OpContext, authz_pool: &authz::IpPool, - db_pool: &IpPool, range: &IpRange, ) -> CreateResult { use db::schema::ip_pool_range::dsl; opctx.authorize(authz::Action::CreateChild, authz_pool).await?; let pool_id = authz_pool.id(); - let new_range = IpPoolRange::new(range, pool_id, db_pool.project_id); + let new_range = IpPoolRange::new(range, pool_id); let filter_subquery = FilterOverlappingIpRanges { range: new_range }; let insert_query = diesel::insert_into(dsl::ip_pool_range).values(filter_subquery); diff --git a/nexus/src/db/datastore/mod.rs b/nexus/src/db/datastore/mod.rs index 1e779c796f..460fabfa95 100644 --- a/nexus/src/db/datastore/mod.rs +++ b/nexus/src/db/datastore/mod.rs @@ -1085,7 +1085,6 @@ mod test { time_deleted: None, ip_pool_id: Uuid::new_v4(), ip_pool_range_id: Uuid::new_v4(), - project_id: Some(Uuid::new_v4()), instance_id: Some(instance_id), kind: IpKind::Ephemeral, ip: ipnetwork::IpNetwork::from(IpAddr::from(Ipv4Addr::new( @@ -1144,7 +1143,6 @@ mod test { time_deleted: None, ip_pool_id: Uuid::new_v4(), ip_pool_range_id: Uuid::new_v4(), - project_id: Some(Uuid::new_v4()), instance_id: Some(Uuid::new_v4()), kind: IpKind::SNat, ip: ipnetwork::IpNetwork::from(IpAddr::from(Ipv4Addr::new( @@ -1215,7 +1213,6 @@ mod test { time_deleted: None, ip_pool_id: Uuid::new_v4(), ip_pool_range_id: Uuid::new_v4(), - project_id: Some(Uuid::new_v4()), instance_id: Some(Uuid::new_v4()), kind: IpKind::Floating, ip: addresses.next().unwrap().into(), diff --git a/nexus/src/db/datastore/project.rs b/nexus/src/db/datastore/project.rs index 702a6b4eae..6c28c0dbc1 100644 --- a/nexus/src/db/datastore/project.rs +++ b/nexus/src/db/datastore/project.rs @@ -126,8 +126,6 @@ impl DataStore { generate_fn_to_ensure_none_in_project!(image, name, String); generate_fn_to_ensure_none_in_project!(snapshot, name, String); generate_fn_to_ensure_none_in_project!(vpc, name, String); - generate_fn_to_ensure_none_in_project!(ip_pool, name, String); - generate_fn_to_ensure_none_in_project!(external_ip); /// Delete a project pub async fn project_delete( @@ -144,8 +142,6 @@ impl DataStore { self.ensure_no_images_in_project(opctx, authz_project).await?; self.ensure_no_snapshots_in_project(opctx, authz_project).await?; self.ensure_no_vpcs_in_project(opctx, authz_project).await?; - self.ensure_no_ip_pools_in_project(opctx, authz_project).await?; - self.ensure_no_external_ips_in_project(opctx, authz_project).await?; use db::schema::project::dsl; diff --git a/nexus/src/db/queries/external_ip.rs b/nexus/src/db/queries/external_ip.rs index 2496fdbde3..9a284967ce 100644 --- a/nexus/src/db/queries/external_ip.rs +++ b/nexus/src/db/queries/external_ip.rs @@ -9,7 +9,6 @@ use crate::db::model::ExternalIp; use crate::db::model::IncompleteExternalIp; use crate::db::model::IpKind; use crate::db::model::IpKindEnum; -use crate::db::model::IpSource; use crate::db::model::Name; use crate::db::pool::DbConnection; use crate::db::schema; @@ -79,7 +78,6 @@ const MAX_PORT: i32 = u16::MAX as _; /// AS time_created, /// AS time_modified, /// NULL AS time_deleted, -/// AS project_id, /// AS instance_id, /// ip_pool_id, /// ip_pool_range_id, @@ -186,30 +184,7 @@ const MAX_PORT: i32 = u16::MAX as _; /// Pool restriction /// ---------------- /// -/// Clients may optionally request an external address from a specific IP Pool. -/// If they don't provide a pool, the query is restricted to all IP Pools -/// available to their project (not reserved for a different project). In that -/// case, the pool restriction looks like: -/// -/// ```sql -/// (project_id = OR project_id IS NULL) -/// ``` -/// -/// That is, we search for all pools that are unreserved for reserved for _this -/// instance's_ project. -/// -/// If the client does provide a pool, an additional filtering clause is added, -/// so that the whole filter looks like: -/// -/// ```sql -/// pool_id = AND -/// (project_id = OR project_id IS NULL) -/// ``` -/// -/// That filter on `project_id` is a bit redundant, as the caller should be -/// looking up the pool's ID by name, among those available to the instance's -/// project. However, it helps provides safety in the case that the caller -/// violates that expectation. +/// Clients must supply the UUID of an IP pool from which they are allocating. #[derive(Debug, Clone)] pub struct NextExternalIp { ip: IncompleteExternalIp, @@ -294,20 +269,6 @@ impl NextExternalIp { out.push_identifier(dsl::ip_pool_range_id::NAME)?; out.push_sql(", "); - // Project ID - match self.ip.source() { - IpSource::Instance { project_id, .. } => { - out.push_bind_param::(project_id)?; - } - IpSource::Service { .. } => { - out.push_bind_param::, Option>(&None)?; - } - }; - - out.push_sql(" AS "); - out.push_identifier(dsl::project_id::NAME)?; - out.push_sql(", "); - // Instance ID out.push_bind_param::, Option>(self.ip.instance_id())?; out.push_sql(" AS "); @@ -428,9 +389,7 @@ impl NextExternalIp { } // Push a subquery that selects the sequence of IP addresses, from each range in - // each IP Pool, along with the pool/range IDs. Note that we only allow - // allocation out of IP Pools that match the project_id of the instance, or - // which have no project ID (the pool is not restricted to a project) + // each IP Pool, along with the pool/range IDs. // // ```sql // SELECT @@ -463,28 +422,9 @@ impl NextExternalIp { out.push_sql(") AS candidate_ip FROM "); IP_POOL_RANGE_FROM_CLAUSE.walk_ast(out.reborrow())?; out.push_sql(" WHERE "); - match self.ip.source() { - IpSource::Instance { project_id, pool_id } => { - if let Some(pool_id) = pool_id { - out.push_identifier(dsl::ip_pool_id::NAME)?; - out.push_sql(" = "); - out.push_bind_param::(pool_id)?; - } else { - out.push_sql("("); - out.push_identifier(dsl::project_id::NAME)?; - out.push_sql(" = "); - out.push_bind_param::(project_id)?; - out.push_sql(" OR "); - out.push_identifier(dsl::project_id::NAME)?; - out.push_sql(" IS NULL)"); - } - } - IpSource::Service { pool_id } => { - out.push_identifier(dsl::ip_pool_id::NAME)?; - out.push_sql(" = "); - out.push_bind_param::(pool_id)?; - } - } + out.push_identifier(dsl::ip_pool_id::NAME)?; + out.push_sql(" = "); + out.push_bind_param::(self.ip.pool_id())?; out.push_sql(" AND "); out.push_identifier(dsl::time_deleted::NAME)?; out.push_sql(" IS NULL"); @@ -653,7 +593,6 @@ mod tests { use async_bb8_diesel::AsyncRunQueryDsl; use dropshot::test_util::LogContext; use nexus_test_utils::db::test_setup_database; - use nexus_test_utils::RACK_UUID; use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_test_utils::dev; @@ -689,16 +628,14 @@ mod tests { &self, name: &str, range: IpRange, - project_id: Option, - rack_id: Option, + internal: bool, ) { let pool = IpPool::new( &IdentityMetadataCreateParams { name: String::from(name).parse().unwrap(), description: format!("ip pool {}", name), }, - project_id, - rack_id, + internal, ); diesel::insert_into(crate::db::schema::ip_pool::dsl::ip_pool) @@ -712,7 +649,7 @@ mod tests { .await .expect("Failed to create IP Pool"); - let pool_range = IpPoolRange::new(&range, pool.id(), project_id); + let pool_range = IpPoolRange::new(&range, pool.id()); diesel::insert_into( crate::db::schema::ip_pool_range::dsl::ip_pool_range, ) @@ -724,31 +661,25 @@ mod tests { .expect("Failed to create IP Pool range"); } - async fn create_rack_ip_pool( - &self, - name: &str, - range: IpRange, - rack_id: Uuid, - ) { - self.create_ip_pool_internal( - name, - range, - /* project_id= */ None, - Some(rack_id), - ) - .await; + async fn create_service_ip_pool(&self, name: &str, range: IpRange) { + self.create_ip_pool_internal(name, range, /*internal=*/ true).await; } - async fn create_ip_pool( - &self, - name: &str, - range: IpRange, - project_id: Option, - ) { - self.create_ip_pool_internal( - name, range, project_id, /* rack_id= */ None, - ) - .await; + async fn create_ip_pool(&self, name: &str, range: IpRange) { + self.create_ip_pool_internal(name, range, /*internal=*/ false) + .await; + } + + async fn default_pool_id(&self) -> Uuid { + let (.., pool) = self + .db_datastore + .ip_pools_fetch_default_for( + &self.opctx, + crate::authz::Action::ListChildren, + ) + .await + .expect("Failed to lookup default ip pool"); + pool.identity.id } async fn success(mut self) { @@ -767,8 +698,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 1), )) .unwrap(); - context.create_ip_pool("p0", range, None).await; - let project_id = Uuid::new_v4(); + context.create_ip_pool("default", range).await; for first_port in (0..super::MAX_PORT).step_by(super::NUM_SOURCE_NAT_PORTS) { @@ -779,8 +709,8 @@ mod tests { .allocate_instance_snat_ip( &context.opctx, id, - project_id, instance_id, + context.default_pool_id().await, ) .await .expect("Failed to allocate instance external IP address"); @@ -799,8 +729,8 @@ mod tests { .allocate_instance_snat_ip( &context.opctx, Uuid::new_v4(), - project_id, instance_id, + context.default_pool_id().await, ) .await .expect_err( @@ -825,8 +755,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 1), )) .unwrap(); - context.create_ip_pool("p0", range, None).await; - let project_id = Uuid::new_v4(); + context.create_ip_pool("default", range).await; // Allocate an Ephemeral IP, which should take the entire port range of // the only address in the pool. @@ -836,7 +765,6 @@ mod tests { .allocate_instance_ephemeral_ip( &context.opctx, Uuid::new_v4(), - project_id, instance_id, /* pool_name = */ None, ) @@ -857,8 +785,8 @@ mod tests { .allocate_instance_snat_ip( &context.opctx, Uuid::new_v4(), - project_id, instance_id, + context.default_pool_id().await, ) .await; assert!( @@ -879,7 +807,6 @@ mod tests { .allocate_instance_ephemeral_ip( &context.opctx, Uuid::new_v4(), - project_id, instance_id, /* pool_name = */ None, ) @@ -912,7 +839,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.create_ip_pool("p0", range, None).await; + context.create_ip_pool("default", range).await; // TODO-completess: Implementing Iterator for IpRange would be nice. let addresses = [ @@ -925,7 +852,6 @@ mod tests { // Allocate two addresses let mut ips = Vec::with_capacity(2); - let project_id = Uuid::new_v4(); for (expected_ip, expected_first_port) in external_ips.clone().take(2) { let instance_id = Uuid::new_v4(); let ip = context @@ -933,8 +859,8 @@ mod tests { .allocate_instance_snat_ip( &context.opctx, Uuid::new_v4(), - project_id, instance_id, + context.default_pool_id().await, ) .await .expect("Failed to allocate instance external IP address"); @@ -962,8 +888,8 @@ mod tests { .allocate_instance_snat_ip( &context.opctx, Uuid::new_v4(), - project_id, instance_id, + context.default_pool_id().await, ) .await .expect("Failed to allocate instance external IP address"); @@ -989,8 +915,8 @@ mod tests { .allocate_instance_snat_ip( &context.opctx, Uuid::new_v4(), - project_id, instance_id, + context.default_pool_id().await, ) .await .expect("Failed to allocate instance external IP address"); @@ -1016,10 +942,9 @@ mod tests { Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.create_ip_pool("p0", range, None).await; + context.create_ip_pool("default", range).await; let instance_id = Uuid::new_v4(); - let project_id = Uuid::new_v4(); let id = Uuid::new_v4(); let pool_name = None; @@ -1028,7 +953,6 @@ mod tests { .allocate_instance_ephemeral_ip( &context.opctx, id, - project_id, instance_id, pool_name, ) @@ -1042,98 +966,24 @@ mod tests { context.success().await; } - #[tokio::test] - async fn test_next_external_ip_is_restricted_to_projects() { - let context = - TestContext::new("test_next_external_ip_is_restricted_to_projects") - .await; - - // Create one pool restricted to a project, and one not. - let project_id = Uuid::new_v4(); - let first_range = IpRange::try_from(( - Ipv4Addr::new(10, 0, 0, 1), - Ipv4Addr::new(10, 0, 0, 3), - )) - .unwrap(); - context.create_ip_pool("p0", first_range, Some(project_id)).await; - - let second_range = IpRange::try_from(( - Ipv4Addr::new(10, 0, 0, 4), - Ipv4Addr::new(10, 0, 0, 6), - )) - .unwrap(); - context.create_ip_pool("p1", second_range, None).await; - - // Allocating an address on an instance in a _different_ project should - // get an address from the second pool. - let instance_id = Uuid::new_v4(); - let instance_project_id = Uuid::new_v4(); - let id = Uuid::new_v4(); - let pool_name = None; - - let ip = context - .db_datastore - .allocate_instance_ephemeral_ip( - &context.opctx, - id, - instance_project_id, - instance_id, - pool_name, - ) - .await - .expect("Failed to allocate instance ephemeral IP address"); - assert_eq!(ip.kind, IpKind::Ephemeral); - assert_eq!(ip.ip.ip(), second_range.first_address()); - assert_eq!(ip.first_port.0, 0); - assert_eq!(ip.last_port.0, u16::MAX); - assert_eq!(ip.project_id.unwrap(), instance_project_id); - - // Allocating an address on an instance in the same project should get - // an address from the first pool. - let instance_id = Uuid::new_v4(); - let id = Uuid::new_v4(); - let pool_name = None; - - let ip = context - .db_datastore - .allocate_instance_ephemeral_ip( - &context.opctx, - id, - project_id, - instance_id, - pool_name, - ) - .await - .expect("Failed to allocate instance ephemeral IP address"); - assert_eq!(ip.kind, IpKind::Ephemeral); - assert_eq!(ip.ip.ip(), first_range.first_address()); - assert_eq!(ip.first_port.0, 0); - assert_eq!(ip.last_port.0, u16::MAX); - assert_eq!(ip.project_id.unwrap(), project_id); - - context.success().await; - } - #[tokio::test] async fn test_next_external_ip_for_service() { let context = TestContext::new("test_next_external_ip_for_service").await; - // Create an IP pool without an associated project. - let rack_id = Uuid::parse_str(RACK_UUID).unwrap(); let ip_range = IpRange::try_from(( Ipv4Addr::new(10, 0, 0, 1), Ipv4Addr::new(10, 0, 0, 2), )) .unwrap(); - context.create_rack_ip_pool("p0", ip_range, rack_id).await; + context.create_service_ip_pool("for-nexus", ip_range).await; // Allocate an IP address as we would for an external, rack-associated // service. let id1 = Uuid::new_v4(); let ip1 = context .db_datastore - .allocate_service_ip(&context.opctx, id1, rack_id) + .allocate_service_ip(&context.opctx, id1) .await .expect("Failed to allocate service IP address"); assert_eq!(ip1.kind, IpKind::Service); @@ -1141,13 +991,12 @@ mod tests { assert_eq!(ip1.first_port.0, 0); assert_eq!(ip1.last_port.0, u16::MAX); assert!(ip1.instance_id.is_none()); - assert!(ip1.project_id.is_none()); // Allocate the next (last) IP address let id2 = Uuid::new_v4(); let ip2 = context .db_datastore - .allocate_service_ip(&context.opctx, id2, rack_id) + .allocate_service_ip(&context.opctx, id2) .await .expect("Failed to allocate service IP address"); assert_eq!(ip2.kind, IpKind::Service); @@ -1155,13 +1004,12 @@ mod tests { assert_eq!(ip2.first_port.0, 0); assert_eq!(ip2.last_port.0, u16::MAX); assert!(ip2.instance_id.is_none()); - assert!(ip2.project_id.is_none()); // Once we're out of IP addresses, test that we see the right error. let id3 = Uuid::new_v4(); let err = context .db_datastore - .allocate_service_ip(&context.opctx, id3, rack_id) + .allocate_service_ip(&context.opctx, id3) .await .expect_err("Should have failed to allocate after pool exhausted"); assert_eq!( @@ -1181,21 +1029,19 @@ mod tests { ) .await; - // Create an IP pool without an associated project. - let rack_id = Uuid::parse_str(RACK_UUID).unwrap(); let ip_range = IpRange::try_from(( Ipv4Addr::new(10, 0, 0, 1), Ipv4Addr::new(10, 0, 0, 2), )) .unwrap(); - context.create_rack_ip_pool("p0", ip_range, rack_id).await; + context.create_service_ip_pool("for-nexus", ip_range).await; // Allocate an IP address as we would for an external, rack-associated // service. let id = Uuid::new_v4(); let ip = context .db_datastore - .allocate_service_ip(&context.opctx, id, rack_id) + .allocate_service_ip(&context.opctx, id) .await .expect("Failed to allocate service IP address"); assert_eq!(ip.kind, IpKind::Service); @@ -1203,11 +1049,10 @@ mod tests { assert_eq!(ip.first_port.0, 0); assert_eq!(ip.last_port.0, u16::MAX); assert!(ip.instance_id.is_none()); - assert!(ip.project_id.is_none()); let ip_again = context .db_datastore - .allocate_service_ip(&context.opctx, id, rack_id) + .allocate_service_ip(&context.opctx, id) .await .expect("Failed to allocate service IP address"); @@ -1228,21 +1073,19 @@ mod tests { ) .await; - // Create an IP pool without an associated project. - let rack_id = Uuid::parse_str(RACK_UUID).unwrap(); let ip_range = IpRange::try_from(( Ipv4Addr::new(10, 0, 0, 1), Ipv4Addr::new(10, 0, 0, 1), )) .unwrap(); - context.create_rack_ip_pool("p0", ip_range, rack_id).await; + context.create_service_ip_pool("for-nexus", ip_range).await; // Allocate an IP address as we would for an external, rack-associated // service. let id = Uuid::new_v4(); let ip = context .db_datastore - .allocate_service_ip(&context.opctx, id, rack_id) + .allocate_service_ip(&context.opctx, id) .await .expect("Failed to allocate service IP address"); assert_eq!(ip.kind, IpKind::Service); @@ -1250,11 +1093,10 @@ mod tests { assert_eq!(ip.first_port.0, 0); assert_eq!(ip.last_port.0, u16::MAX); assert!(ip.instance_id.is_none()); - assert!(ip.project_id.is_none()); let ip_again = context .db_datastore - .allocate_service_ip(&context.opctx, id, rack_id) + .allocate_service_ip(&context.opctx, id) .await .expect("Failed to allocate service IP address"); @@ -1275,19 +1117,18 @@ mod tests { Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.create_ip_pool("p0", range, None).await; + context.create_ip_pool("default", range).await; // Create one SNAT IP address. let instance_id = Uuid::new_v4(); let id = Uuid::new_v4(); - let project_id = Uuid::new_v4(); let ip = context .db_datastore .allocate_instance_snat_ip( &context.opctx, id, - project_id, instance_id, + context.default_pool_id().await, ) .await .expect("Failed to allocate instance SNAT IP address"); @@ -1298,7 +1139,6 @@ mod tests { usize::from(ip.last_port.0), super::NUM_SOURCE_NAT_PORTS - 1 ); - assert_eq!(ip.project_id.unwrap(), project_id); // Create a new IP, with the _same_ ID, and ensure we get back the same // value. @@ -1307,8 +1147,8 @@ mod tests { .allocate_instance_snat_ip( &context.opctx, id, - project_id, instance_id, + context.default_pool_id().await, ) .await .expect("Failed to allocate instance SNAT IP address"); @@ -1336,24 +1176,23 @@ mod tests { TestContext::new("test_next_external_ip_is_restricted_to_pools") .await; - // Create two pools, neither project-restricted. + // Create two pools let first_range = IpRange::try_from(( Ipv4Addr::new(10, 0, 0, 1), Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.create_ip_pool("p0", first_range, None).await; + context.create_ip_pool("default", first_range).await; let second_range = IpRange::try_from(( Ipv4Addr::new(10, 0, 0, 4), Ipv4Addr::new(10, 0, 0, 6), )) .unwrap(); - context.create_ip_pool("p1", second_range, None).await; + context.create_ip_pool("p1", second_range).await; // Allocating an address on an instance in the second pool should be // respected, even though there are IPs available in the first. let instance_id = Uuid::new_v4(); - let project_id = Uuid::new_v4(); let id = Uuid::new_v4(); let pool_name = Some(Name("p1".parse().unwrap())); @@ -1362,7 +1201,6 @@ mod tests { .allocate_instance_ephemeral_ip( &context.opctx, id, - project_id, instance_id, pool_name, ) @@ -1372,7 +1210,6 @@ mod tests { assert_eq!(ip.ip.ip(), second_range.first_address()); assert_eq!(ip.first_port.0, 0); assert_eq!(ip.last_port.0, u16::MAX); - assert_eq!(ip.project_id.unwrap(), project_id); context.success().await; } @@ -1384,22 +1221,20 @@ mod tests { ) .await; - // Create two pools, neither project-restricted. let first_range = IpRange::try_from(( Ipv4Addr::new(10, 0, 0, 1), Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.create_ip_pool("p0", first_range, None).await; + context.create_ip_pool("default", first_range).await; let first_address = Ipv4Addr::new(10, 0, 0, 4); let last_address = Ipv4Addr::new(10, 0, 0, 6); let second_range = IpRange::try_from((first_address, last_address)).unwrap(); - context.create_ip_pool("p1", second_range, None).await; + context.create_ip_pool("p1", second_range).await; // Allocate all available addresses in the second pool. let instance_id = Uuid::new_v4(); - let project_id = Uuid::new_v4(); let pool_name = Some(Name("p1".parse().unwrap())); let first_octet = first_address.octets()[3]; let last_octet = last_address.octets()[3]; @@ -1409,7 +1244,6 @@ mod tests { .allocate_instance_ephemeral_ip( &context.opctx, Uuid::new_v4(), - project_id, instance_id, pool_name.clone(), ) @@ -1429,7 +1263,6 @@ mod tests { .allocate_instance_ephemeral_ip( &context.opctx, Uuid::new_v4(), - project_id, instance_id, pool_name, ) diff --git a/nexus/src/db/queries/ip_pool.rs b/nexus/src/db/queries/ip_pool.rs index de5fe8cb18..6a28854a24 100644 --- a/nexus/src/db/queries/ip_pool.rs +++ b/nexus/src/db/queries/ip_pool.rs @@ -221,8 +221,6 @@ impl QueryFragment for FilterOverlappingIpRanges { out.push_sql(", "); out.push_bind_param::(&self.range.ip_pool_id)?; out.push_sql(", "); - out.push_bind_param::, Option>(&self.range.project_id)?; - out.push_sql(", "); out.push_bind_param::(&self.range.rcgen)?; out.push_sql(" WHERE NOT EXISTS("); diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 50b3435f5c..dc332153b5 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -107,23 +107,21 @@ pub fn external_api() -> NexusApiDescription { api.register(project_policy_view)?; api.register(project_policy_update)?; - // Customer-Accessible IP Pools API + // Operator-Accessible IP Pools API api.register(ip_pool_list)?; api.register(ip_pool_create)?; api.register(ip_pool_view)?; api.register(ip_pool_view_by_id)?; api.register(ip_pool_delete)?; api.register(ip_pool_update)?; - - // Operator-Accessible IP Pools API + // Variants for internal services api.register(ip_pool_service_view)?; - // Customer-Accessible IP Pool Range API (used by instances) + // Operator-Accessible IP Pool Range API api.register(ip_pool_range_list)?; api.register(ip_pool_range_add)?; api.register(ip_pool_range_remove)?; - - // Operator-Accessible IP Pool Range API (used by Oxide services) + // Variants for internal services api.register(ip_pool_service_range_list)?; api.register(ip_pool_service_range_add)?; api.register(ip_pool_service_range_remove)?; @@ -1569,23 +1567,20 @@ async fn ip_pool_update( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/// Fetch an IP pool used for Oxide services. +/// Fetch the IP pool used for Oxide services. #[endpoint { method = GET, - path = "/system/ip-pools-service/{rack_id}", + path = "/system/ip-pools-service", tags = ["system"], }] async fn ip_pool_service_view( rqctx: Arc>>, - path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let nexus = &apictx.nexus; - let path = path_params.into_inner(); - let rack_id = path.rack_id; let handler = async { let opctx = OpContext::for_external_api(&rqctx).await?; - let pool = nexus.ip_pool_service_fetch(&opctx, rack_id).await?; + let pool = nexus.ip_pool_service_fetch(&opctx).await?; Ok(HttpResponseOk(IpPool::from(pool))) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await @@ -1687,29 +1682,21 @@ async fn ip_pool_range_remove( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -#[derive(Deserialize, JsonSchema)] -pub struct IpPoolServicePathParam { - pub rack_id: Uuid, -} - -/// List ranges for an IP pool used for Oxide services. +/// List ranges for the IP pool used for Oxide services. /// /// Ranges are ordered by their first address. #[endpoint { method = GET, - path = "/system/ip-pools-service/{rack_id}/ranges", + path = "/system/ip-pools-service/ranges", tags = ["system"], }] async fn ip_pool_service_range_list( rqctx: Arc>>, - path_params: Path, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let nexus = &apictx.nexus; let query = query_params.into_inner(); - let path = path_params.into_inner(); - let rack_id = path.rack_id; let handler = async { let opctx = OpContext::for_external_api(&rqctx).await?; let marker = match query.page { @@ -1722,7 +1709,7 @@ async fn ip_pool_service_range_list( marker, }; let ranges = nexus - .ip_pool_service_list_ranges(&opctx, rack_id, &pag_params) + .ip_pool_service_list_ranges(&opctx, &pag_params) .await? .into_iter() .map(|range| range.into()) @@ -1741,23 +1728,19 @@ async fn ip_pool_service_range_list( /// Add a range to an IP pool used for Oxide services. #[endpoint { method = POST, - path = "/system/ip-pools-service/{rack_id}/ranges/add", + path = "/system/ip-pools-service/ranges/add", tags = ["system"], }] async fn ip_pool_service_range_add( rqctx: Arc>>, - path_params: Path, range_params: TypedBody, ) -> Result, HttpError> { let apictx = &rqctx.context(); let nexus = &apictx.nexus; - let path = path_params.into_inner(); - let rack_id = path.rack_id; let range = range_params.into_inner(); let handler = async { let opctx = OpContext::for_external_api(&rqctx).await?; - let out = - nexus.ip_pool_service_add_range(&opctx, rack_id, &range).await?; + let out = nexus.ip_pool_service_add_range(&opctx, &range).await?; Ok(HttpResponseCreated(out.into())) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await @@ -1766,22 +1749,19 @@ async fn ip_pool_service_range_add( /// Remove a range from an IP pool used for Oxide services. #[endpoint { method = POST, - path = "/system/ip-pools-service/{rack_id}/ranges/remove", + path = "/system/ip-pools-service/ranges/remove", tags = ["system"], }] async fn ip_pool_service_range_remove( rqctx: Arc>>, - path_params: Path, range_params: TypedBody, ) -> Result { let apictx = &rqctx.context(); let nexus = &apictx.nexus; - let path = path_params.into_inner(); - let rack_id = path.rack_id; let range = range_params.into_inner(); let handler = async { let opctx = OpContext::for_external_api(&rqctx).await?; - nexus.ip_pool_service_delete_range(&opctx, rack_id, &range).await?; + nexus.ip_pool_service_delete_range(&opctx, &range).await?; Ok(HttpResponseUpdatedNoContent()) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await diff --git a/nexus/src/populate.rs b/nexus/src/populate.rs index 8c201fba16..ae4cfe423e 100644 --- a/nexus/src/populate.rs +++ b/nexus/src/populate.rs @@ -288,16 +288,31 @@ impl Populator for PopulateRack { name: "oxide-service-pool".parse::().unwrap(), description: String::from("IP Pool for Oxide Services"), }, - project: None, }; datastore - .ip_pool_create(opctx, ¶ms, Some(args.rack_id)) + .ip_pool_create(opctx, ¶ms, /*internal=*/ true) .await .map(|_| ()) .or_else(|e| match e { Error::ObjectAlreadyExists { .. } => Ok(()), _ => Err(e), })?; + + let params = params::IpPoolCreate { + identity: IdentityMetadataCreateParams { + name: "default".parse::().unwrap(), + description: String::from("default IP pool"), + }, + }; + datastore + .ip_pool_create(opctx, ¶ms, /*internal=*/ false) + .await + .map(|_| ()) + .or_else(|e| match e { + Error::ObjectAlreadyExists { .. } => Ok(()), + _ => Err(e), + })?; + Ok(()) } .boxed() diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index da344d4065..ea9591c010 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -63,22 +63,16 @@ where .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .expect("failed to make \"create\" request") + .expect(&format!("failed to make \"create\" request to {path}")) .parsed_body() .unwrap() } -/// Create an IP pool with a single range for testing. -/// -/// The IP range may be specified if it's important for testing the behavior -/// around specific subnets, or a large subnet (2 ** 16 addresses) will be -/// provided, if the `ip_range` argument is `None`. -pub async fn create_ip_pool( +pub async fn populate_ip_pool( client: &ClientTestContext, pool_name: &str, ip_range: Option, - project_path: Option, -) -> (IpPool, IpPoolRange) { +) -> IpPoolRange { let ip_range = ip_range.unwrap_or_else(|| { use std::net::Ipv4Addr; IpRange::try_from(( @@ -87,6 +81,25 @@ pub async fn create_ip_pool( )) .unwrap() }); + let range = object_create( + client, + format!("/system/ip-pools/{}/ranges/add", pool_name).as_str(), + &ip_range, + ) + .await; + range +} + +/// Create an IP pool with a single range for testing. +/// +/// The IP range may be specified if it's important for testing the behavior +/// around specific subnets, or a large subnet (2 ** 16 addresses) will be +/// provided, if the `ip_range` argument is `None`. +pub async fn create_ip_pool( + client: &ClientTestContext, + pool_name: &str, + ip_range: Option, +) -> (IpPool, IpPoolRange) { let pool = object_create( client, "/system/ip-pools", @@ -95,16 +108,10 @@ pub async fn create_ip_pool( name: pool_name.parse().unwrap(), description: String::from("an ip pool"), }, - project: project_path, }, ) .await; - let range = object_create( - client, - format!("/system/ip-pools/{}/ranges/add", pool_name).as_str(), - &ip_range, - ) - .await; + let range = populate_ip_pool(client, pool_name, ip_range).await; (pool, range) } diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index e7cd2bd968..ae631019d5 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -19,10 +19,10 @@ use nexus_test_utils::identity_eq; use nexus_test_utils::resource_helpers::create_disk; use nexus_test_utils::resource_helpers::create_instance; use nexus_test_utils::resource_helpers::create_instance_with; -use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_organization; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::objects_list_page_authz; +use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use omicron_common::api::external::ByteCount; @@ -73,7 +73,7 @@ fn get_disk_detach_url(instance_name: &str) -> String { } async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None, None).await; + populate_ip_pool(&client, "default", None).await; create_organization(&client, ORG_NAME).await; let project = create_project(client, ORG_NAME, PROJECT_NAME).await; project.identity.id diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 6051eeaf7a..4192bd5882 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -263,7 +263,7 @@ lazy_static! { network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![ - params::ExternalIpCreate::Ephemeral { pool_name: None } + params::ExternalIpCreate::Ephemeral { pool_name: Some(DEMO_IP_POOL_NAME.clone()) } ], disks: vec![], start: true, @@ -330,14 +330,13 @@ lazy_static! { // IP Pools pub static ref DEMO_IP_POOLS_URL: &'static str = "/system/ip-pools"; - pub static ref DEMO_IP_POOL_NAME: Name = "pool0".parse().unwrap(); + pub static ref DEMO_IP_POOL_NAME: Name = "default".parse().unwrap(); pub static ref DEMO_IP_POOL_CREATE: params::IpPoolCreate = params::IpPoolCreate { identity: IdentityMetadataCreateParams { name: DEMO_IP_POOL_NAME.clone(), description: String::from("an IP pool"), }, - project: None, }; pub static ref DEMO_IP_POOL_URL: String = format!("/system/ip-pools/{}", *DEMO_IP_POOL_NAME); pub static ref DEMO_IP_POOL_UPDATE: params::IpPoolUpdate = @@ -356,8 +355,7 @@ lazy_static! { pub static ref DEMO_IP_POOL_RANGES_DEL_URL: String = format!("{}/remove", *DEMO_IP_POOL_RANGES_URL); // IP Pools (Services) - pub static ref DEMO_IP_POOLS_SERVICE_URL: &'static str = "/system/ip-pools-service"; - pub static ref DEMO_IP_POOL_SERVICE_URL: String = format!("{}/{}", *DEMO_IP_POOLS_SERVICE_URL, RACK_UUID); + pub static ref DEMO_IP_POOL_SERVICE_URL: &'static str = "/system/ip-pools-service"; pub static ref DEMO_IP_POOL_SERVICE_RANGES_URL: String = format!("{}/ranges", *DEMO_IP_POOL_SERVICE_URL); pub static ref DEMO_IP_POOL_SERVICE_RANGES_ADD_URL: String = format!("{}/add", *DEMO_IP_POOL_SERVICE_RANGES_URL); pub static ref DEMO_IP_POOL_SERVICE_RANGES_DEL_URL: String = format!("{}/remove", *DEMO_IP_POOL_SERVICE_RANGES_URL); diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 594c957364..4e9079b39d 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -13,6 +13,7 @@ use nexus_test_utils::resource_helpers::create_disk; use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::object_create; use nexus_test_utils::resource_helpers::objects_list_page_authz; +use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use omicron_common::api::external::ByteCount; use omicron_common::api::external::Disk; @@ -48,7 +49,6 @@ use nexus_test_utils_macros::nexus_test; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; -static POOL_NAME: &str = "p0"; static ORGANIZATION_NAME: &str = "test-org"; static PROJECT_NAME: &str = "springfield-squidport"; @@ -61,7 +61,7 @@ fn get_instances_url() -> String { } async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None, None).await; + populate_ip_pool(&client, "default", None).await; create_organization(&client, ORGANIZATION_NAME).await; let project = create_project(client, ORGANIZATION_NAME, PROJECT_NAME).await; project.identity.id @@ -127,7 +127,7 @@ async fn test_instances_access_before_create_returns_not_found( async fn test_v1_instance_access(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - create_ip_pool(&client, "p0", None, None).await; + populate_ip_pool(&client, "default", None).await; let org = create_organization(&client, ORGANIZATION_NAME).await; let project = create_project(client, ORGANIZATION_NAME, PROJECT_NAME).await; @@ -190,14 +190,11 @@ async fn test_instances_create_reboot_halt( let apictx = &cptestctx.server.apictx; let nexus = &apictx.nexus; - // Create an IP pool and project that we'll use for testing. - create_ip_pool(&client, POOL_NAME, None, None).await; - create_organization(&client, ORGANIZATION_NAME).await; + create_org_and_project(&client).await; let url_instances = format!( "/organizations/{}/projects/{}/instances", ORGANIZATION_NAME, PROJECT_NAME ); - let _ = create_project(&client, ORGANIZATION_NAME, PROJECT_NAME).await; // Create an instance. let instance_url = format!("{}/just-rainsticks", url_instances); @@ -516,14 +513,11 @@ async fn test_instances_create_stopped_start( let apictx = &cptestctx.server.apictx; let nexus = &apictx.nexus; - // Create an IP pool and project that we'll use for testing. - create_ip_pool(&client, POOL_NAME, None, None).await; - create_organization(&client, ORGANIZATION_NAME).await; + create_org_and_project(&client).await; let url_instances = format!( "/organizations/{}/projects/{}/instances", ORGANIZATION_NAME, PROJECT_NAME ); - let _ = create_project(&client, ORGANIZATION_NAME, PROJECT_NAME).await; // Create an instance in a stopped state. let instance: Instance = object_create( @@ -572,14 +566,11 @@ async fn test_instances_delete_fails_when_running_succeeds_when_stopped( let apictx = &cptestctx.server.apictx; let nexus = &apictx.nexus; - // Create an IP pool and project that we'll use for testing. - create_ip_pool(&client, POOL_NAME, None, None).await; - create_organization(&client, ORGANIZATION_NAME).await; + create_org_and_project(&client).await; let url_instances = format!( "/organizations/{}/projects/{}/instances", ORGANIZATION_NAME, PROJECT_NAME ); - let _ = create_project(&client, ORGANIZATION_NAME, PROJECT_NAME).await; // Create an instance. let instance_url = format!("{}/just-rainsticks", url_instances); @@ -698,13 +689,11 @@ async fn test_instance_create_saga_removes_instance_database_record( let client = &cptestctx.external_client; // Create test IP pool, organization and project - create_ip_pool(&client, POOL_NAME, None, None).await; - create_organization(&client, ORGANIZATION_NAME).await; + create_org_and_project(&client).await; let url_instances = format!( "/organizations/{}/projects/{}/instances", ORGANIZATION_NAME, PROJECT_NAME ); - let _ = create_project(&client, ORGANIZATION_NAME, PROJECT_NAME).await; // The network interface parameters. let default_name = "default".parse::().unwrap(); @@ -810,14 +799,11 @@ async fn test_instance_with_single_explicit_ip_address( ) { let client = &cptestctx.external_client; - // Create test IP pool, organization and project - create_ip_pool(&client, POOL_NAME, None, None).await; - create_organization(&client, ORGANIZATION_NAME).await; + create_org_and_project(&client).await; let url_instances = format!( "/organizations/{}/projects/{}/instances", ORGANIZATION_NAME, PROJECT_NAME ); - let _ = create_project(&client, ORGANIZATION_NAME, PROJECT_NAME).await; // Create the parameters for the interface. let default_name = "default".parse::().unwrap(); @@ -886,14 +872,11 @@ async fn test_instance_with_new_custom_network_interfaces( ) { let client = &cptestctx.external_client; - // Create test IP pool, organization and project - create_ip_pool(&client, POOL_NAME, None, None).await; - create_organization(&client, ORGANIZATION_NAME).await; + create_org_and_project(&client).await; let url_instances = format!( "/organizations/{}/projects/{}/instances", ORGANIZATION_NAME, PROJECT_NAME ); - let _ = create_project(&client, ORGANIZATION_NAME, PROJECT_NAME).await; // Create a VPC Subnet other than the default. // @@ -1044,14 +1027,11 @@ async fn test_instance_create_delete_network_interface( let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx.nexus; - // Create test IP pool, organization and project - create_ip_pool(&client, POOL_NAME, None, None).await; - create_organization(&client, ORGANIZATION_NAME).await; + create_org_and_project(&client).await; let url_instances = format!( "/organizations/{}/projects/{}/instances", ORGANIZATION_NAME, PROJECT_NAME ); - let _ = create_project(&client, ORGANIZATION_NAME, PROJECT_NAME).await; // Create the VPC Subnet for the secondary interface let secondary_subnet = params::VpcSubnetCreate { @@ -1295,14 +1275,11 @@ async fn test_instance_update_network_interfaces( let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx.nexus; - // Create test IP pool, organization and project - create_ip_pool(&client, POOL_NAME, None, None).await; - create_organization(&client, ORGANIZATION_NAME).await; + create_org_and_project(&client).await; let url_instances = format!( "/organizations/{}/projects/{}/instances", ORGANIZATION_NAME, PROJECT_NAME ); - let _ = create_project(&client, ORGANIZATION_NAME, PROJECT_NAME).await; // Create the VPC Subnet for the secondary interface let secondary_subnet = params::VpcSubnetCreate { @@ -1778,15 +1755,9 @@ async fn test_instance_with_multiple_nics_unwinds_completely( async fn test_attach_one_disk_to_instance(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - const POOL_NAME: &str = "p0"; - const ORGANIZATION_NAME: &str = "bobs-barrel-of-bytes"; - const PROJECT_NAME: &str = "bit-barrel"; - // Test pre-reqs DiskTest::new(&cptestctx).await; - create_ip_pool(&client, POOL_NAME, None, None).await; - create_organization(&client, ORGANIZATION_NAME).await; - create_project(client, ORGANIZATION_NAME, PROJECT_NAME).await; + create_org_and_project(&client).await; // Create the "probablydata" disk create_disk(&client, ORGANIZATION_NAME, PROJECT_NAME, "probablydata").await; @@ -1872,15 +1843,9 @@ async fn test_instance_fails_to_boot_with_disk( // see: https://github.com/oxidecomputer/omicron/issues/1713 let client = &cptestctx.external_client; - const POOL_NAME: &str = "p0"; - const ORGANIZATION_NAME: &str = "bobs-barrel-of-bytes"; - const PROJECT_NAME: &str = "bit-barrel"; - // Test pre-reqs DiskTest::new(&cptestctx).await; - create_ip_pool(&client, POOL_NAME, None, None).await; - create_organization(&client, ORGANIZATION_NAME).await; - create_project(client, ORGANIZATION_NAME, PROJECT_NAME).await; + create_org_and_project(&client).await; // Create the "probablydata" disk create_disk(&client, ORGANIZATION_NAME, PROJECT_NAME, "probablydata").await; @@ -2175,15 +2140,9 @@ async fn test_attach_eight_disks_to_instance( ) { let client = &cptestctx.external_client; - const POOL_NAME: &str = "p0"; - const ORGANIZATION_NAME: &str = "bobs-barrel-of-bytes"; - const PROJECT_NAME: &str = "bit-barrel"; - // Test pre-reqs DiskTest::new(&cptestctx).await; - create_ip_pool(&client, POOL_NAME, None, None).await; - create_organization(&client, ORGANIZATION_NAME).await; - create_project(client, ORGANIZATION_NAME, PROJECT_NAME).await; + create_org_and_project(&client).await; // Make 8 disks for i in 0..8 { @@ -2388,17 +2347,9 @@ async fn test_cannot_attach_nine_disks_to_instance( #[nexus_test] async fn test_cannot_attach_faulted_disks(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - - // Test pre-reqs - const POOL_NAME: &str = "p0"; - const ORGANIZATION_NAME: &str = "bobs-barrel-of-bytes"; - const PROJECT_NAME: &str = "bit-barrel"; - // Test pre-reqs DiskTest::new(&cptestctx).await; - create_ip_pool(&client, POOL_NAME, None, None).await; - create_organization(&client, ORGANIZATION_NAME).await; - create_project(client, ORGANIZATION_NAME, PROJECT_NAME).await; + create_org_and_project(&client).await; // Make 8 disks for i in 0..8 { @@ -2527,15 +2478,9 @@ async fn test_disks_detached_when_instance_destroyed( ) { let client = &cptestctx.external_client; - const POOL_NAME: &str = "p0"; - const ORGANIZATION_NAME: &str = "bobs-barrel-of-bytes"; - const PROJECT_NAME: &str = "bit-barrel"; - // Test pre-reqs DiskTest::new(&cptestctx).await; - create_ip_pool(&client, POOL_NAME, None, None).await; - create_organization(&client, ORGANIZATION_NAME).await; - create_project(client, ORGANIZATION_NAME, PROJECT_NAME).await; + create_org_and_project(&client).await; // Make 8 disks for i in 0..8 { @@ -2778,14 +2723,11 @@ async fn test_instance_serial(cptestctx: &ControlPlaneTestContext) { let apictx = &cptestctx.server.apictx; let nexus = &apictx.nexus; - // Create a project that we'll use for testing. - create_ip_pool(client, POOL_NAME, None, None).await; - create_organization(client, ORGANIZATION_NAME).await; + create_org_and_project(&client).await; let url_instances = format!( "/organizations/{}/projects/{}/instances", ORGANIZATION_NAME, PROJECT_NAME ); - let _ = create_project(client, ORGANIZATION_NAME, PROJECT_NAME).await; // Make sure we get a 404 if we try to access the serial console before creation. let instance_serial_url = @@ -2863,27 +2805,23 @@ async fn test_instance_serial(cptestctx: &ControlPlaneTestContext) { } #[nexus_test] -async fn test_instance_ephemeral_ip_from_correct_project( +async fn test_instance_ephemeral_ip_from_correct_pool( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - // Create test organization and two projects. + // Create test organization and projects. create_organization(&client, ORGANIZATION_NAME).await; let url_instances = format!( "/organizations/{}/projects/{}/instances", ORGANIZATION_NAME, PROJECT_NAME ); let _ = create_project(&client, ORGANIZATION_NAME, PROJECT_NAME).await; - let _ = create_project(&client, ORGANIZATION_NAME, "restricted").await; // Create two IP pools. // - // The first is restricted to the "restricted" project, the second unrestricted. - let project_path = params::ProjectPath { - organization: Name::try_from(ORGANIZATION_NAME.to_string()).unwrap(), - project: Name::try_from("restricted".to_string()).unwrap(), - }; + // The first is given to the "default" pool, the provided to a distinct + // explicit pool. let first_range = IpRange::V4( Ipv4Range::new( std::net::Ipv4Addr::new(10, 0, 0, 1), @@ -2898,17 +2836,10 @@ async fn test_instance_ephemeral_ip_from_correct_project( ) .unwrap(), ); - create_ip_pool( - &client, - "restricted-pool", - Some(first_range), - Some(project_path), - ) - .await; - create_ip_pool(&client, "unrestricted-pool", Some(second_range), None) - .await; + populate_ip_pool(&client, "default", Some(first_range)).await; + create_ip_pool(&client, "other-pool", Some(second_range)).await; - // Create an instance in the default project, not the "dummy" project + // Create an instance explicitly using the "other-pool". let instance_params = params::InstanceCreate { identity: IdentityMetadataCreateParams { name: Name::try_from(String::from("ip-pool-test")).unwrap(), @@ -2920,7 +2851,9 @@ async fn test_instance_ephemeral_ip_from_correct_project( user_data: vec![], network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![params::ExternalIpCreate::Ephemeral { - pool_name: None, + pool_name: Some( + Name::try_from(String::from("other-pool")).unwrap(), + ), }], disks: vec![], start: true, @@ -2951,8 +2884,8 @@ async fn test_instance_ephemeral_ip_from_correct_project( ips.items[0].ip >= second_range.first_address() && ips.items[0].ip <= second_range.last_address(), "Expected the Ephemeral IP to come from the second address \ - range, since the first is reserved for a project different from \ - the instance's project." + range, since the first is reserved for the default pool, not \ + the requested pool." ); } diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 9bcd288d07..38f17a8535 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -43,20 +43,18 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { let ip_pool_add_range_url = format!("{}/add", ip_pool_ranges_url); // Verify the list of IP pools is empty - assert_eq!( - NexusRequest::iter_collection_authn::( - client, - ip_pools_url, - "", - None - ) - .await - .expect("Failed to list IP Pools") - .all_items - .len(), - 0, - "Expected a list of zero IP pools" - ); + let ip_pools = NexusRequest::iter_collection_authn::( + client, + ip_pools_url, + "", + None, + ) + .await + .expect("Failed to list IP Pools") + .all_items; + assert_eq!(ip_pools.len(), 1, "Expected to see default IP pool"); + + assert_eq!(ip_pools[0].identity.name, "default",); // Verify 404 if the pool doesn't exist yet, both for creating or deleting let error: HttpErrorResponseBody = NexusRequest::expect_failure( @@ -99,7 +97,6 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, - project: None, }; let created_pool: IpPool = NexusRequest::objects_post(client, ip_pools_url, ¶ms) @@ -121,8 +118,8 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { .await .expect("Failed to list IP Pools") .all_items; - assert_eq!(list.len(), 1, "Expected exactly one IP pool"); - assert_pools_eq(&created_pool, &list[0]); + assert_eq!(list.len(), 2, "Expected exactly two IP pools"); + assert_pools_eq(&created_pool, &list[1]); let fetched_pool: IpPool = NexusRequest::object_get(client, &ip_pool_url) .authn_as(AuthnMode::PrivilegedUser) @@ -295,7 +292,6 @@ async fn test_ip_pool_range_overlapping_ranges_fails( name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, - project: None, }; let created_pool: IpPool = NexusRequest::objects_post(client, ip_pools_url, ¶ms) @@ -477,7 +473,6 @@ async fn test_ip_pool_range_pagination(cptestctx: &ControlPlaneTestContext) { name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, - project: None, }; let created_pool: IpPool = NexusRequest::objects_post(client, ip_pools_url, ¶ms) @@ -566,31 +561,13 @@ async fn test_ip_range_delete_with_allocated_external_ip_fails( let apictx = &cptestctx.server.apictx; let nexus = &apictx.nexus; let ip_pools_url = "/system/ip-pools"; - let pool_name = "p0"; - let description = "an ip pool"; + let pool_name = "default"; let ip_pool_url = format!("{}/{}", ip_pools_url, pool_name); let ip_pool_ranges_url = format!("{}/ranges", ip_pool_url); let ip_pool_add_range_url = format!("{}/add", ip_pool_ranges_url); let ip_pool_rem_range_url = format!("{}/remove", ip_pool_ranges_url); - // Add a pool and range. - let params = IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: String::from(pool_name).parse().unwrap(), - description: String::from(description), - }, - project: None, - }; - let created_pool: IpPool = - NexusRequest::objects_post(client, ip_pools_url, ¶ms) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); - assert_eq!(created_pool.identity.name, pool_name); - assert_eq!(created_pool.identity.description, description); + // Add an IP range to the default pool let range = IpRange::V4( Ipv4Range::new( std::net::Ipv4Addr::new(10, 0, 0, 1), @@ -684,8 +661,7 @@ async fn test_ip_range_delete_with_allocated_external_ip_fails( #[nexus_test] async fn test_ip_pool_service(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let ip_pool_url = - format!("/system/ip-pools-service/{}", nexus_test_utils::RACK_UUID); + let ip_pool_url = "/system/ip-pools-service".to_string(); let ip_pool_ranges_url = format!("{}/ranges", ip_pool_url); let ip_pool_add_range_url = format!("{}/add", ip_pool_ranges_url); let ip_pool_remove_range_url = format!("{}/remove", ip_pool_ranges_url); diff --git a/nexus/tests/integration_tests/projects.rs b/nexus/tests/integration_tests/projects.rs index cc220ba4aa..2ebe24b9ed 100644 --- a/nexus/tests/integration_tests/projects.rs +++ b/nexus/tests/integration_tests/projects.rs @@ -10,8 +10,8 @@ use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::resource_helpers::{ - create_disk, create_ip_pool, create_organization, create_project, - create_vpc, object_create, project_get, DiskTest, + create_disk, create_organization, create_project, create_vpc, + object_create, populate_ip_pool, project_get, DiskTest, }; use nexus_test_utils_macros::nexus_test; use omicron_common::api::external::ByteCount; @@ -165,7 +165,7 @@ async fn test_project_deletion_with_instance( let client = &cptestctx.external_client; let org_name = "test-org"; - create_ip_pool(&client, "p0", None, None).await; + populate_ip_pool(&client, "default", None).await; create_organization(&client, &org_name).await; // Create a project that we'll use for testing. @@ -388,63 +388,3 @@ async fn test_project_deletion_with_vpc(cptestctx: &ControlPlaneTestContext) { .unwrap(); delete_project(&url, &client).await; } - -#[nexus_test] -async fn test_project_deletion_with_ip_pool( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - - let org_name = "test-org"; - create_organization(&client, &org_name).await; - - // Create a project that we'll use for testing. - let name = "springfield-squidport"; - let url = format!("/organizations/{}/projects/{}", org_name, name); - - create_project(&client, &org_name, &name).await; - delete_project_default_subnet(&url, &client).await; - delete_project_default_vpc(&url, &client).await; - - let pool_name = "p0"; - let params = params::IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: String::from(pool_name).parse().unwrap(), - description: String::from("description"), - }, - project: Some(params::ProjectPath { - organization: org_name.parse().unwrap(), - project: name.parse().unwrap(), - }), - }; - let ip_pools_url = "/system/ip-pools"; - let _: views::IpPool = - NexusRequest::objects_post(client, ip_pools_url, ¶ms) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); - - assert_eq!( - "project to be deleted contains an ip pool: p0", - delete_project_expect_fail(&url, &client).await, - ); - - let ip_pool_url = format!("{ip_pools_url}/{pool_name}"); - NexusRequest::object_delete(client, &ip_pool_url) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap(); - - delete_project(&url, &client).await; -} - -// TODO(https://github.com/oxidecomputer/omicron/issues/1334): Once floating IPs -// are implemented, we should: -// - Create a project-scoped external IP -// - Attempt to delete the project (show that we cannot) -// - Delete the external IP -// - Delete the project diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index 87060ca34c..c7c5d536ff 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -11,10 +11,10 @@ use http::StatusCode; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; -use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_organization; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::object_create; +use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use omicron_common::api::external; @@ -59,7 +59,7 @@ async fn create_org_and_project(client: &ClientTestContext) -> Uuid { async fn test_snapshot(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_ip_pool(&client, "p0", None, None).await; + populate_ip_pool(&client, "default", None).await; create_org_and_project(client).await; let disks_url = get_disks_url(); @@ -190,7 +190,7 @@ async fn test_snapshot(cptestctx: &ControlPlaneTestContext) { async fn test_snapshot_without_instance(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_ip_pool(&client, "p0", None, None).await; + populate_ip_pool(&client, "default", None).await; create_org_and_project(client).await; let disks_url = get_disks_url(); @@ -289,7 +289,7 @@ async fn test_snapshot_without_instance(cptestctx: &ControlPlaneTestContext) { async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_ip_pool(&client, "p0", None, None).await; + populate_ip_pool(&client, "default", None).await; create_org_and_project(client).await; let disks_url = get_disks_url(); @@ -643,7 +643,7 @@ async fn test_cannot_snapshot_if_no_space(cptestctx: &ControlPlaneTestContext) { // Test that snapshots cannot be created if there is no space for the blocks let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_ip_pool(&client, "p0", None, None).await; + populate_ip_pool(&client, "default", None).await; create_org_and_project(client).await; let disks_url = get_disks_url(); diff --git a/nexus/tests/integration_tests/subnet_allocation.rs b/nexus/tests/integration_tests/subnet_allocation.rs index 636157262e..b003265ea6 100644 --- a/nexus/tests/integration_tests/subnet_allocation.rs +++ b/nexus/tests/integration_tests/subnet_allocation.rs @@ -14,8 +14,8 @@ use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::resource_helpers::create_instance_with; -use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::objects_list_page_authz; +use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::{create_organization, create_project}; use nexus_test_utils_macros::nexus_test; use omicron_common::api::external::{ @@ -85,7 +85,7 @@ async fn test_subnet_allocation(cptestctx: &ControlPlaneTestContext) { let project_name = "springfield-squidport"; // Create a project that we'll use for testing. - create_ip_pool(&client, "p0", None, None).await; + populate_ip_pool(&client, "default", None).await; create_organization(&client, organization_name).await; create_project(&client, organization_name, project_name).await; let url_instances = format!( diff --git a/nexus/tests/integration_tests/unauthorized.rs b/nexus/tests/integration_tests/unauthorized.rs index 7f6a6c300f..5b2e792572 100644 --- a/nexus/tests/integration_tests/unauthorized.rs +++ b/nexus/tests/integration_tests/unauthorized.rs @@ -69,7 +69,7 @@ async fn test_unauthorized(cptestctx: &ControlPlaneTestContext) { .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .unwrap(), + .expect(&format!("Failed to GET from URL: {url}")), id_routes, ), SetupReq::Post { url, body, id_routes } => ( @@ -78,7 +78,7 @@ async fn test_unauthorized(cptestctx: &ControlPlaneTestContext) { .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .unwrap(), + .expect(&format!("Failed to POST to URL: {url}")), id_routes, ), }; @@ -200,13 +200,12 @@ lazy_static! { &*DEMO_SILO_USER_ID_SET_PASSWORD_URL, ], }, - // Create an IP pool - SetupReq::Post { - url: &*DEMO_IP_POOLS_URL, - body: serde_json::to_value(&*DEMO_IP_POOL_CREATE).unwrap(), + // Get the default IP pool + SetupReq::Get { + url: &*DEMO_IP_POOL_URL, id_routes: vec!["/system/by-id/ip-pools/{id}"], }, - // Create an IP Pool range + // Create an IP pool range SetupReq::Post { url: &*DEMO_IP_POOL_RANGES_ADD_URL, body: serde_json::to_value(&*DEMO_IP_POOL_RANGE).unwrap(), @@ -415,7 +414,7 @@ async fn verify_endpoint( .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .unwrap() + .expect(&format!("Failed to GET: {uri}")) .parsed_body::() .unwrap(), ) diff --git a/nexus/tests/integration_tests/volume_management.rs b/nexus/tests/integration_tests/volume_management.rs index f147389169..c6dcaf24d6 100644 --- a/nexus/tests/integration_tests/volume_management.rs +++ b/nexus/tests/integration_tests/volume_management.rs @@ -11,10 +11,10 @@ use http::StatusCode; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; -use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_organization; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::object_create; +use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use omicron_common::api::external::ByteCount; @@ -53,7 +53,7 @@ async fn create_org_and_project(client: &ClientTestContext) -> Uuid { } async fn create_global_image(client: &ClientTestContext) -> views::GlobalImage { - create_ip_pool(&client, "p0", None, None).await; + populate_ip_pool(&client, "default", None).await; create_org_and_project(client).await; // Define a global image @@ -461,7 +461,7 @@ async fn test_multiple_disks_multiple_snapshots_order_1( // Test multiple disks with multiple snapshots let client = &cptestctx.external_client; let disk_test = DiskTest::new(&cptestctx).await; - create_ip_pool(&client, "p0", None, None).await; + populate_ip_pool(&client, "default", None).await; create_org_and_project(client).await; let disks_url = get_disks_url(); @@ -608,7 +608,7 @@ async fn test_multiple_disks_multiple_snapshots_order_2( // Test multiple disks with multiple snapshots, varying the delete order let client = &cptestctx.external_client; let disk_test = DiskTest::new(&cptestctx).await; - create_ip_pool(&client, "p0", None, None).await; + populate_ip_pool(&client, "default", None).await; create_org_and_project(client).await; let disks_url = get_disks_url(); @@ -894,7 +894,7 @@ async fn test_multiple_layers_of_snapshots_delete_all_disks_first( // delete all disks, then delete all snapshots let client = &cptestctx.external_client; let disk_test = DiskTest::new(&cptestctx).await; - create_ip_pool(&client, "p0", None, None).await; + populate_ip_pool(&client, "default", None).await; create_org_and_project(client).await; prepare_for_test_multiple_layers_of_snapshots(&client).await; @@ -935,7 +935,7 @@ async fn test_multiple_layers_of_snapshots_delete_all_snapshots_first( // delete all snapshots, then delete all disks let client = &cptestctx.external_client; let disk_test = DiskTest::new(&cptestctx).await; - create_ip_pool(&client, "p0", None, None).await; + populate_ip_pool(&client, "default", None).await; create_org_and_project(client).await; prepare_for_test_multiple_layers_of_snapshots(&client).await; @@ -976,7 +976,7 @@ async fn test_multiple_layers_of_snapshots_random_delete_order( // delete snapshots and disks in a random order let client = &cptestctx.external_client; let disk_test = DiskTest::new(&cptestctx).await; - create_ip_pool(&client, "p0", None, None).await; + populate_ip_pool(&client, "default", None).await; create_org_and_project(client).await; prepare_for_test_multiple_layers_of_snapshots(&client).await; diff --git a/nexus/tests/integration_tests/vpc_subnets.rs b/nexus/tests/integration_tests/vpc_subnets.rs index 0bf8eab37e..67f6ed77d0 100644 --- a/nexus/tests/integration_tests/vpc_subnets.rs +++ b/nexus/tests/integration_tests/vpc_subnets.rs @@ -14,8 +14,8 @@ use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::identity_eq; use nexus_test_utils::resource_helpers::objects_list_page_authz; use nexus_test_utils::resource_helpers::{ - create_instance, create_ip_pool, create_organization, create_project, - create_vpc, + create_instance, create_organization, create_project, create_vpc, + populate_ip_pool, }; use nexus_test_utils_macros::nexus_test; use omicron_common::api::external::IdentityMetadataCreateParams; @@ -40,7 +40,7 @@ async fn test_delete_vpc_subnet_with_interfaces_fails( let project_name = "springfield-squidport"; create_organization(&client, &org_name).await; let _ = create_project(&client, org_name, project_name).await; - create_ip_pool(client, "pool0", None, None).await; + populate_ip_pool(client, "default", None).await; let vpcs_url = format!("/organizations/{}/projects/{}/vpcs", org_name, project_name); diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 3828fb5bfb..22a270e40b 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -131,10 +131,10 @@ ip_pool_list /system/ip-pools ip_pool_range_add /system/ip-pools/{pool_name}/ranges/add ip_pool_range_list /system/ip-pools/{pool_name}/ranges ip_pool_range_remove /system/ip-pools/{pool_name}/ranges/remove -ip_pool_service_range_add /system/ip-pools-service/{rack_id}/ranges/add -ip_pool_service_range_list /system/ip-pools-service/{rack_id}/ranges -ip_pool_service_range_remove /system/ip-pools-service/{rack_id}/ranges/remove -ip_pool_service_view /system/ip-pools-service/{rack_id} +ip_pool_service_range_add /system/ip-pools-service/ranges/add +ip_pool_service_range_list /system/ip-pools-service/ranges +ip_pool_service_range_remove /system/ip-pools-service/ranges/remove +ip_pool_service_view /system/ip-pools-service ip_pool_update /system/ip-pools/{pool_name} ip_pool_view /system/ip-pools/{pool_name} ip_pool_view_by_id /system/by-id/ip-pools/{id} diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index dd57a3935d..5d0b603bc2 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -508,14 +508,6 @@ pub struct NetworkInterfaceUpdate { // IP POOLS -// Type used to identify a Project in request bodies, where one may not have -// the path in the request URL. -#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema)] -pub struct ProjectPath { - pub organization: Name, - pub project: Name, -} - /// Create-time parameters for an IP Pool. /// /// See [`IpPool`](crate::external_api::views::IpPool) @@ -523,8 +515,6 @@ pub struct ProjectPath { pub struct IpPoolCreate { #[serde(flatten)] pub identity: IdentityMetadataCreateParams, - #[serde(flatten)] - pub project: Option, } /// Parameters for updating an IP Pool diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index c160a12f3d..9dd32206d4 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -271,7 +271,6 @@ pub struct VpcRouter { pub struct IpPool { #[serde(flatten)] pub identity: IdentityMetadata, - pub project_id: Option, } #[derive(Clone, Copy, Debug, Deserialize, Serialize, JsonSchema)] diff --git a/openapi/nexus.json b/openapi/nexus.json index 6cfaa8622e..1a3eef9aaf 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -6089,24 +6089,13 @@ } } }, - "/system/ip-pools-service/{rack_id}": { + "/system/ip-pools-service": { "get": { "tags": [ "system" ], - "summary": "Fetch an IP pool used for Oxide services.", + "summary": "Fetch the IP pool used for Oxide services.", "operationId": "ip_pool_service_view", - "parameters": [ - { - "in": "path", - "name": "rack_id", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], "responses": { "200": { "description": "successful operation", @@ -6127,24 +6116,15 @@ } } }, - "/system/ip-pools-service/{rack_id}/ranges": { + "/system/ip-pools-service/ranges": { "get": { "tags": [ "system" ], - "summary": "List ranges for an IP pool used for Oxide services.", + "summary": "List ranges for the IP pool used for Oxide services.", "description": "Ranges are ordered by their first address.", "operationId": "ip_pool_service_range_list", "parameters": [ - { - "in": "path", - "name": "rack_id", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - }, { "in": "query", "name": "limit", @@ -6187,24 +6167,13 @@ "x-dropshot-pagination": true } }, - "/system/ip-pools-service/{rack_id}/ranges/add": { + "/system/ip-pools-service/ranges/add": { "post": { "tags": [ "system" ], "summary": "Add a range to an IP pool used for Oxide services.", "operationId": "ip_pool_service_range_add", - "parameters": [ - { - "in": "path", - "name": "rack_id", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], "requestBody": { "content": { "application/json": { @@ -6235,24 +6204,13 @@ } } }, - "/system/ip-pools-service/{rack_id}/ranges/remove": { + "/system/ip-pools-service/ranges/remove": { "post": { "tags": [ "system" ], "summary": "Remove a range from an IP pool used for Oxide services.", "operationId": "ip_pool_service_range_remove", - "parameters": [ - { - "in": "path", - "name": "rack_id", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], "requestBody": { "content": { "application/json": { @@ -9968,11 +9926,6 @@ } ] }, - "project_id": { - "nullable": true, - "type": "string", - "format": "uuid" - }, "time_created": { "description": "timestamp when this resource was created", "type": "string", @@ -10001,12 +9954,6 @@ }, "name": { "$ref": "#/components/schemas/Name" - }, - "organization": { - "$ref": "#/components/schemas/Name" - }, - "project": { - "$ref": "#/components/schemas/Name" } }, "required": [