From 580594ea1791b8d4179bd78a7d63c36bebeb3b33 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 10 Oct 2023 14:36:34 -0500 Subject: [PATCH 01/67] migrations and diesel schema for ip_pool_resource join table --- nexus/db-model/src/ip_pool.rs | 24 ++++++++++++++++++++++++ nexus/db-model/src/schema.rs | 13 +++++++++++-- schema/crdb/7.0.0/up1.sql | 4 ++++ schema/crdb/7.0.0/up2.sql | 8 ++++++++ schema/crdb/7.0.0/up3.sql | 5 +++++ schema/crdb/7.0.0/up4.sql | 6 ++++++ schema/crdb/7.0.0/up5.sql | 6 ++++++ schema/crdb/dbinit.sql | 26 +++++++++++++++++++++++++- 8 files changed, 89 insertions(+), 3 deletions(-) create mode 100644 schema/crdb/7.0.0/up1.sql create mode 100644 schema/crdb/7.0.0/up2.sql create mode 100644 schema/crdb/7.0.0/up3.sql create mode 100644 schema/crdb/7.0.0/up4.sql create mode 100644 schema/crdb/7.0.0/up5.sql diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 8ad78af07b..0c9683a1e4 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -5,8 +5,10 @@ //! Model types for IP Pools and the CIDR blocks therein. use crate::collection::DatastoreCollectionConfig; +use crate::impl_enum_type; use crate::schema::ip_pool; use crate::schema::ip_pool_range; +use crate::schema::ip_pool_resource; use crate::Name; use chrono::DateTime; use chrono::Utc; @@ -93,6 +95,28 @@ impl From for IpPoolUpdate { } } +impl_enum_type!( + #[derive(SqlType, Debug, Clone, Copy, QueryId)] + #[diesel(postgres_type(name = "ip_pool_resource_type"))] + pub struct IpPoolResourceTypeEnum; + + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, PartialEq)] + #[diesel(sql_type = IpPoolResourceTypeEnum)] + pub enum IpPoolResourceType; + + Fleet => b"fleet" + Silo => b"silo" +); + +#[derive(Queryable, Insertable, Selectable, Clone, Debug)] +#[diesel(table_name = ip_pool_resource)] +pub struct IpPoolResource { + pub ip_pool_id: Uuid, + pub resource_type: IpPoolResourceType, + pub resource_id: Uuid, + pub is_default: bool, +} + /// A range of IP addresses for an IP Pool. #[derive(Queryable, Insertable, Selectable, Clone, Debug)] #[diesel(table_name = ip_pool_range)] diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 0165ab1568..4ffbd21443 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -458,6 +458,15 @@ table! { } } +table! { + ip_pool_resource (ip_pool_id, resource_type, resource_id) { + ip_pool_id -> Uuid, + resource_type -> crate::IpPoolResourceTypeEnum, + resource_id -> Uuid, + is_default -> Bool, + } +} + table! { ip_pool_range (id) { id -> Uuid, @@ -1131,7 +1140,7 @@ table! { /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(6, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(7, 0, 0); allow_tables_to_appear_in_same_query!( system_update, @@ -1140,7 +1149,7 @@ allow_tables_to_appear_in_same_query!( ); joinable!(system_update_component_update -> component_update (component_update_id)); -allow_tables_to_appear_in_same_query!(ip_pool_range, ip_pool); +allow_tables_to_appear_in_same_query!(ip_pool_range, ip_pool, ip_pool_resource); joinable!(ip_pool_range -> ip_pool (ip_pool_id)); allow_tables_to_appear_in_same_query!( diff --git a/schema/crdb/7.0.0/up1.sql b/schema/crdb/7.0.0/up1.sql new file mode 100644 index 0000000000..210c7c83ed --- /dev/null +++ b/schema/crdb/7.0.0/up1.sql @@ -0,0 +1,4 @@ +CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_resource_type AS ENUM ( + 'fleet', + 'silo' +); diff --git a/schema/crdb/7.0.0/up2.sql b/schema/crdb/7.0.0/up2.sql new file mode 100644 index 0000000000..c2be7785ce --- /dev/null +++ b/schema/crdb/7.0.0/up2.sql @@ -0,0 +1,8 @@ +CREATE TABLE IF NOT EXISTS omicron.public.ip_pool_resource ( + ip_pool_id UUID NOT NULL, + resource_type ip_pool_resource_type NOT NULL, + resource_id UUID NOT NULL, + is_default BOOL NOT NULL, + + PRIMARY KEY (ip_pool_id, resource_type, resource_id) +); diff --git a/schema/crdb/7.0.0/up3.sql b/schema/crdb/7.0.0/up3.sql new file mode 100644 index 0000000000..c345fd794e --- /dev/null +++ b/schema/crdb/7.0.0/up3.sql @@ -0,0 +1,5 @@ +CREATE UNIQUE INDEX IF NOT EXISTS one_default_ip_pool_per_resource ON omicron.public.ip_pool_resource ( + resource_id +) where + is_default = true; + diff --git a/schema/crdb/7.0.0/up4.sql b/schema/crdb/7.0.0/up4.sql new file mode 100644 index 0000000000..edcad062a8 --- /dev/null +++ b/schema/crdb/7.0.0/up4.sql @@ -0,0 +1,6 @@ +-- copy existing fleet assocations into association table. treat all existing +-- pools as fleet-associated because that is the current behavior +INSERT INTO ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) +SELECT id, 'fleet', '001de000-1334-4000-8000-000000000000', is_default +FROM ip_pool +WHERE time_deleted IS null; \ No newline at end of file diff --git a/schema/crdb/7.0.0/up5.sql b/schema/crdb/7.0.0/up5.sql new file mode 100644 index 0000000000..92f2201b86 --- /dev/null +++ b/schema/crdb/7.0.0/up5.sql @@ -0,0 +1,6 @@ +-- copy existing ip_pool-to-silo assocations into association table +INSERT INTO ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) +SELECT id, 'silo', silo_id, is_default +FROM ip_pool +WHERE silo_id IS NOT null + AND time_deleted IS null; \ No newline at end of file diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index a62cbae5ea..2f632bd4a6 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -2577,7 +2577,31 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '6.0.0', NULL) + ( TRUE, NOW(), NOW(), '7.0.0', NULL) ON CONFLICT DO NOTHING; +CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_resource_type AS ENUM ( + 'fleet', + 'silo' +); + +-- join table associating IP pools with resources like fleet or silo +CREATE TABLE IF NOT EXISTS omicron.public.ip_pool_resource ( + ip_pool_id UUID NOT NULL, + resource_type ip_pool_resource_type NOT NULL, + resource_id UUID NOT NULL, + is_default BOOL NOT NULL, + -- TODO: timestamps for soft deletes? + + -- resource_type is redundant because resource IDs are globally unique, but + -- logically it belongs here + PRIMARY KEY (ip_pool_id, resource_type, resource_id) +); + +-- a given resource can only have one default ip pool +CREATE UNIQUE INDEX IF NOT EXISTS one_default_ip_pool_per_resource ON omicron.public.ip_pool_resource ( + resource_id +) where + is_default = true; + COMMIT; From 3065a61083281d7081bd24157d4b50be83a2ed28 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 10 Oct 2023 16:31:04 -0500 Subject: [PATCH 02/67] add pools at rack setup time in the new way, update fetch_default_for --- nexus/db-model/src/ip_pool.rs | 9 +++ nexus/db-queries/src/db/datastore/ip_pool.rs | 80 ++++++++++++++++---- nexus/db-queries/src/db/datastore/rack.rs | 31 +++++++- nexus/src/app/ip_pool.rs | 25 ++++++ nexus/types/src/external_api/params.rs | 14 ++++ 5 files changed, 142 insertions(+), 17 deletions(-) diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 0c9683a1e4..8e9321da90 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -108,6 +108,15 @@ impl_enum_type!( Silo => b"silo" ); +impl From for IpPoolResourceType { + fn from(typ: params::IpPoolResourceType) -> Self { + match typ { + params::IpPoolResourceType::Fleet => IpPoolResourceType::Fleet, + params::IpPoolResourceType::Silo => IpPoolResourceType::Silo, + } + } +} + #[derive(Queryable, Insertable, Selectable, Clone, Debug)] #[diesel(table_name = ip_pool_resource)] pub struct IpPoolResource { diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index bd3148f2f7..6f5f1a12c4 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -17,6 +17,7 @@ use crate::db::fixed_data::silo::INTERNAL_SILO_ID; use crate::db::identity::Resource; use crate::db::model::IpPool; use crate::db::model::IpPoolRange; +use crate::db::model::IpPoolResource; use crate::db::model::IpPoolUpdate; use crate::db::model::Name; use crate::db::pagination::paginated; @@ -26,6 +27,7 @@ use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; use ipnetwork::IpNetwork; +use nexus_db_model::IpPoolResourceType; use nexus_types::external_api::shared::IpRange; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; @@ -79,7 +81,8 @@ impl DataStore { &self, opctx: &OpContext, ) -> LookupResult { - use db::schema::ip_pool::dsl; + use db::schema::ip_pool; + use db::schema::ip_pool_resource; let authz_silo_id = opctx.authn.silo_required()?.id(); @@ -92,17 +95,26 @@ impl DataStore { // .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) // .await?; - dsl::ip_pool - .filter(dsl::silo_id.eq(authz_silo_id).or(dsl::silo_id.is_null())) - .filter(dsl::is_default.eq(true)) - .filter(dsl::time_deleted.is_null()) - // this will sort by most specific first, i.e., - // - // (silo) - // (null) - // - // then by only taking the first result, we get the most specific one - .order(dsl::silo_id.asc().nulls_last()) + // join ip_pool to ip_pool_resource and filter + + ip_pool::table + .inner_join( + ip_pool_resource::table + .on(ip_pool::id.eq(ip_pool_resource::ip_pool_id)), + ) + .filter( + (ip_pool_resource::resource_type + .eq(IpPoolResourceType::Silo) + .and(ip_pool_resource::resource_id.eq(authz_silo_id))) + .or(ip_pool_resource::resource_type + .eq(IpPoolResourceType::Fleet)), + ) + .filter(ip_pool::is_default.eq(true)) + .filter(ip_pool::time_deleted.is_null()) + // TODO: order by most specific first so we get the most specific + // when we select the first one. alphabetical desc technically + // works but come on. won't work when we have project association + .order(ip_pool_resource::resource_type.desc()) .select(IpPool::as_select()) .first_async::( &*self.pool_connection_authorized(opctx).await?, @@ -261,6 +273,32 @@ impl DataStore { }) } + pub async fn ip_pool_associate_resource( + &self, + opctx: &OpContext, + ip_pool_resource: IpPoolResource, + ) -> CreateResult { + use db::schema::ip_pool_resource::dsl; + opctx + .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) + .await?; + + diesel::insert_into(dsl::ip_pool_resource) + .values(ip_pool_resource.clone()) + .returning(IpPoolResource::as_returning()) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::IpPool, + &ip_pool_resource.ip_pool_id.to_string(), + ), + ) + }) + } + pub async fn ip_pool_list_ranges( &self, opctx: &OpContext, @@ -430,7 +468,7 @@ impl DataStore { #[cfg(test)] mod test { use crate::db::datastore::datastore_test; - use crate::db::model::IpPool; + use crate::db::model::{IpPool, IpPoolResource, IpPoolResourceType}; use assert_matches::assert_matches; use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Resource; @@ -453,6 +491,8 @@ mod test { assert_eq!(fleet_default_pool.silo_id, None); // unique index prevents second fleet-level default + // TODO: create the pool, and then the failure is on the attempt to + // associate it with the fleet as default let identity = IdentityMetadataCreateParams { name: "another-fleet-default".parse().unwrap(), description: "".to_string(), @@ -497,10 +537,22 @@ mod test { name: "default-for-silo".parse().unwrap(), description: "".to_string(), }; - datastore + let default_for_silo = datastore .ip_pool_create(&opctx, IpPool::new(&identity, Some(silo_id), true)) .await .expect("Failed to create silo default IP pool"); + datastore + .ip_pool_associate_resource( + &opctx, + IpPoolResource { + ip_pool_id: default_for_silo.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: silo_id, + is_default: true, + }, + ) + .await + .expect("Failed to associate IP pool with silo"); // now when we ask for the default pool, we get the one we just made let ip_pool = datastore diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 1be3e1ee4c..77bab750a1 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -19,6 +19,7 @@ use crate::db::fixed_data::silo::INTERNAL_SILO_ID; use crate::db::fixed_data::vpc_subnet::DNS_VPC_SUBNET; use crate::db::fixed_data::vpc_subnet::NEXUS_VPC_SUBNET; use crate::db::fixed_data::vpc_subnet::NTP_VPC_SUBNET; +use crate::db::fixed_data::FLEET_ID; use crate::db::identity::Asset; use crate::db::model::Dataset; use crate::db::model::IncompleteExternalIp; @@ -565,12 +566,24 @@ impl DataStore { true, // default for internal silo ); - self.ip_pool_create(opctx, internal_pool).await.map(|_| ()).or_else( - |e| match e { + self.ip_pool_create(opctx, internal_pool.clone()) + .await + .map(|_| ()) + .or_else(|e| match e { Error::ObjectAlreadyExists { .. } => Ok(()), _ => Err(e), + })?; + + self.ip_pool_associate_resource( + opctx, + db::model::IpPoolResource { + ip_pool_id: internal_pool.id(), + resource_type: db::model::IpPoolResourceType::Silo, + resource_id: *INTERNAL_SILO_ID, + is_default: true, }, - )?; + ) + .await?; let default_pool = db::model::IpPool::new( &IdentityMetadataCreateParams { @@ -580,6 +593,18 @@ impl DataStore { None, // no silo ID, fleet scoped true, // default for fleet ); + + self.ip_pool_associate_resource( + opctx, + db::model::IpPoolResource { + ip_pool_id: default_pool.id(), + resource_type: db::model::IpPoolResourceType::Fleet, + resource_id: *FLEET_ID, + is_default: true, + }, + ) + .await?; + self.ip_pool_create(opctx, default_pool).await.map(|_| ()).or_else( |e| match e { Error::ObjectAlreadyExists { .. } => Ok(()), diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 5efdaf7b6f..8b5f6f2cdb 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -74,6 +74,31 @@ impl super::Nexus { self.db_datastore.ip_pool_create(opctx, pool).await } + pub(crate) async fn ip_pool_associate_resource( + &self, + opctx: &OpContext, + pool_lookup: &lookup::IpPool<'_>, + ip_pool_resource: ¶ms::IpPoolResource, + ) -> CreateResult { + // TODO: check for perms on specified resource + let (.., authz_pool) = + pool_lookup.lookup_for(authz::Action::Modify).await?; + self.db_datastore + .ip_pool_associate_resource( + opctx, + db::model::IpPoolResource { + ip_pool_id: authz_pool.id(), + resource_type: ip_pool_resource + .resource_type + .clone() + .into(), + resource_id: ip_pool_resource.resource_id, + is_default: ip_pool_resource.is_default, + }, + ) + .await + } + pub(crate) async fn ip_pools_list( &self, opctx: &OpContext, diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index b4e0e705d8..1a7bb36cb7 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -751,6 +751,20 @@ pub struct IpPoolUpdate { pub identity: IdentityMetadataUpdateParams, } +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub enum IpPoolResourceType { + Fleet, + Silo, +} + +/// Parameters for associating an IP pool with a resource (fleet, silo) +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct IpPoolResource { + pub resource_id: Uuid, + pub resource_type: IpPoolResourceType, + pub is_default: bool, +} + // INSTANCES /// Describes an attachment of an `InstanceNetworkInterface` to an `Instance`, From f3809b5050b08549599e2a68f1454389a27eb99a Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 10 Oct 2023 22:01:24 -0500 Subject: [PATCH 03/67] update service pool lookup logic --- nexus/db-queries/src/db/datastore/ip_pool.rs | 22 +++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 6f5f1a12c4..8a8b2095d9 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -53,6 +53,7 @@ impl DataStore { opctx .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) .await?; + // TODO: boy I hope we can paginate a join without too much trouble match pagparams { PaginatedBy::Id(pagparams) => { paginated(dsl::ip_pool, dsl::id, pagparams) @@ -130,16 +131,27 @@ impl DataStore { &self, opctx: &OpContext, ) -> LookupResult<(authz::IpPool, IpPool)> { - use db::schema::ip_pool::dsl; + use db::schema::ip_pool; + use db::schema::ip_pool_resource; opctx .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) .await?; - // Look up this IP pool by rack ID. - let (authz_pool, pool) = dsl::ip_pool - .filter(dsl::silo_id.eq(*INTERNAL_SILO_ID)) - .filter(dsl::time_deleted.is_null()) + // Look up IP pool by its association with the internal silo. + // We assume there is only one pool for that silo, or at least, + // if there is more than one, it doesn't matter which one we pick. + let (authz_pool, pool) = ip_pool::table + .inner_join( + ip_pool_resource::table + .on(ip_pool::id.eq(ip_pool_resource::ip_pool_id)), + ) + .filter(ip_pool::time_deleted.is_null()) + .filter( + ip_pool_resource::resource_type + .eq(IpPoolResourceType::Silo) + .and(ip_pool_resource::resource_id.eq(*INTERNAL_SILO_ID)), + ) .select(IpPool::as_select()) .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await From 6404e27726982321869f643928512c2b785406e3 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 11 Oct 2023 00:18:05 -0500 Subject: [PATCH 04/67] delete silo_id and is_default from ip_pool model, test still passes --- nexus/db-model/src/ip_pool.rs | 23 +------- .../src/db/datastore/external_ip.rs | 21 ++++--- nexus/db-queries/src/db/datastore/ip_pool.rs | 56 +++++++++++++++---- nexus/db-queries/src/db/datastore/rack.rs | 28 +++++----- .../db-queries/src/db/queries/external_ip.rs | 18 +++--- nexus/src/app/ip_pool.rs | 16 +----- nexus/src/external_api/http_entrypoints.rs | 12 ++-- nexus/types/src/external_api/views.rs | 2 - 8 files changed, 87 insertions(+), 89 deletions(-) diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 8e9321da90..3428ac7771 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -37,42 +37,23 @@ pub struct IpPool { /// Child resource generation number, for optimistic concurrency control of /// the contained ranges. pub rcgen: i64, - - /// Silo, if IP pool is associated with a particular silo. One special use - /// for this is associating a pool with the internal silo oxide-internal, - /// which is used for internal services. If there is no silo ID, the - /// pool is considered a fleet-wide pool and will be used for allocating - /// instance IPs in silos that don't have their own pool. - pub silo_id: Option, - - pub is_default: bool, } impl IpPool { - pub fn new( - pool_identity: &external::IdentityMetadataCreateParams, - silo_id: Option, - is_default: bool, - ) -> Self { + pub fn new(pool_identity: &external::IdentityMetadataCreateParams) -> Self { Self { identity: IpPoolIdentity::new( Uuid::new_v4(), pool_identity.clone(), ), rcgen: 0, - silo_id, - is_default, } } } impl From for views::IpPool { fn from(pool: IpPool) -> Self { - Self { - identity: pool.identity(), - silo_id: pool.silo_id, - is_default: pool.is_default, - } + Self { identity: pool.identity() } } } diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 268b284a0a..d8f8890ad1 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -6,7 +6,6 @@ use super::DataStore; use crate::authz; -use crate::authz::ApiResource; use crate::context::OpContext; use crate::db; use crate::db::error::public_error_from_diesel; @@ -57,24 +56,30 @@ impl DataStore { ) -> CreateResult { let pool = match pool_name { Some(name) => { - let (.., authz_pool, pool) = LookupPath::new(opctx, &self) + let (.., _authz_pool, pool) = LookupPath::new(opctx, &self) .ip_pool_name(&name) // any authenticated user can CreateChild on an IP pool. this is // meant to represent allocating an IP .fetch_for(authz::Action::CreateChild) .await?; + // TODO: this logic must change now that pools can be associated + // with many resources. The logic is slightly simpler now (at + // least conceptually, if not in implementation): Is this pool + // associated with either the fleet or the silo? otherwise, 404 + // If the named pool conflicts with user's current scope, i.e., // if it has a silo and it's different from the current silo, // then as far as IP allocation is concerned, that pool doesn't // exist. If the pool has no silo, it's fleet-scoped and can // always be used. - let authz_silo_id = opctx.authn.silo_required()?.id(); - if let Some(pool_silo_id) = pool.silo_id { - if pool_silo_id != authz_silo_id { - return Err(authz_pool.not_found()); - } - } + + // let authz_silo_id = opctx.authn.silo_required()?.id(); + // if let Some(pool_silo_id) = pool.silo_id { + // if pool_silo_id != authz_silo_id { + // return Err(authz_pool.not_found()); + // } + // } pool } diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 8a8b2095d9..90ed5f76c0 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -110,7 +110,7 @@ impl DataStore { .or(ip_pool_resource::resource_type .eq(IpPoolResourceType::Fleet)), ) - .filter(ip_pool::is_default.eq(true)) + .filter(ip_pool_resource::is_default.eq(true)) .filter(ip_pool::time_deleted.is_null()) // TODO: order by most specific first so we get the most specific // when we select the first one. alphabetical desc technically @@ -480,6 +480,7 @@ impl DataStore { #[cfg(test)] mod test { use crate::db::datastore::datastore_test; + use crate::db::fixed_data::FLEET_ID; use crate::db::model::{IpPool, IpPoolResource, IpPoolResourceType}; use assert_matches::assert_matches; use nexus_test_utils::db::test_setup_database; @@ -499,8 +500,6 @@ mod test { datastore.ip_pools_fetch_default(&opctx).await.unwrap(); assert_eq!(fleet_default_pool.identity.name.as_str(), "default"); - assert!(fleet_default_pool.is_default); - assert_eq!(fleet_default_pool.silo_id, None); // unique index prevents second fleet-level default // TODO: create the pool, and then the failure is on the attempt to @@ -509,13 +508,22 @@ mod test { name: "another-fleet-default".parse().unwrap(), description: "".to_string(), }; + let second_default = datastore + .ip_pool_create(&opctx, IpPool::new(&identity)) + .await + .expect("Failed to create pool"); let err = datastore - .ip_pool_create( + .ip_pool_associate_resource( &opctx, - IpPool::new(&identity, None, /*default= */ true), + IpPoolResource { + ip_pool_id: second_default.id(), + resource_type: IpPoolResourceType::Fleet, + resource_id: *FLEET_ID, + is_default: true, + }, ) .await - .expect_err("Failed to fail to create a second default fleet pool"); + .expect_err("Failed to fail to make IP pool fleet default"); assert_matches!(err, Error::ObjectAlreadyExists { .. }); // when we fetch the default pool for a silo, if those scopes do not @@ -528,13 +536,22 @@ mod test { name: "non-default-for-silo".parse().unwrap(), description: "".to_string(), }; + let default_for_silo = datastore + .ip_pool_create(&opctx, IpPool::new(&identity)) + .await + .expect("Failed to create silo non-default IP pool"); datastore - .ip_pool_create( + .ip_pool_associate_resource( &opctx, - IpPool::new(&identity, Some(silo_id), /*default= */ false), + IpPoolResource { + ip_pool_id: default_for_silo.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: silo_id, + is_default: false, + }, ) .await - .expect("Failed to create silo non-default IP pool"); + .expect("Failed to associate IP pool with silo"); // because that one was not a default, when we ask for the silo default // pool, we still get the fleet default @@ -544,13 +561,16 @@ mod test { .expect("Failed to get silo default IP pool"); assert_eq!(ip_pool.id(), fleet_default_pool.id()); + // TODO: instead of a separate pool, this could now be done by + // associating the same pool? Would be nice to test both + // now create a default pool for the silo let identity = IdentityMetadataCreateParams { name: "default-for-silo".parse().unwrap(), description: "".to_string(), }; let default_for_silo = datastore - .ip_pool_create(&opctx, IpPool::new(&identity, Some(silo_id), true)) + .ip_pool_create(&opctx, IpPool::new(&identity)) .await .expect("Failed to create silo default IP pool"); datastore @@ -578,10 +598,22 @@ mod test { name: "second-default-for-silo".parse().unwrap(), description: "".to_string(), }; + let second_silo_default = datastore + .ip_pool_create(&opctx, IpPool::new(&identity)) + .await + .expect("Failed to create pool"); let err = datastore - .ip_pool_create(&opctx, IpPool::new(&identity, Some(silo_id), true)) + .ip_pool_associate_resource( + &opctx, + IpPoolResource { + ip_pool_id: second_silo_default.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: silo_id, + is_default: true, + }, + ) .await - .expect_err("Failed to fail to create second default pool"); + .expect_err("Failed to fail to set a second default pool for silo"); assert_matches!(err, Error::ObjectAlreadyExists { .. }); db.cleanup().await.unwrap(); diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 77bab750a1..cf53b0a711 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -557,14 +557,11 @@ impl DataStore { self.rack_insert(opctx, &db::model::Rack::new(rack_id)).await?; - let internal_pool = db::model::IpPool::new( - &IdentityMetadataCreateParams { + let internal_pool = + db::model::IpPool::new(&IdentityMetadataCreateParams { name: SERVICE_IP_POOL_NAME.parse::().unwrap(), description: String::from("IP Pool for Oxide Services"), - }, - Some(*INTERNAL_SILO_ID), - true, // default for internal silo - ); + }); self.ip_pool_create(opctx, internal_pool.clone()) .await @@ -574,6 +571,7 @@ impl DataStore { _ => Err(e), })?; + // make default for the internal silo self.ip_pool_associate_resource( opctx, db::model::IpPoolResource { @@ -585,15 +583,13 @@ impl DataStore { ) .await?; - let default_pool = db::model::IpPool::new( - &IdentityMetadataCreateParams { + let default_pool = + db::model::IpPool::new(&IdentityMetadataCreateParams { name: "default".parse::().unwrap(), description: String::from("default IP pool"), - }, - None, // no silo ID, fleet scoped - true, // default for fleet - ); + }); + // make pool default for fleet self.ip_pool_associate_resource( opctx, db::model::IpPoolResource { @@ -1140,7 +1136,9 @@ mod test { // been allocated as a part of the service IP pool. let (.., svc_pool) = datastore.ip_pools_service_lookup(&opctx).await.unwrap(); - assert_eq!(svc_pool.silo_id, Some(*INTERNAL_SILO_ID)); + // TODO: do we care? we should just check that the name or ID of the + // pool itself matches the known name or ID of the service pool + // assert_eq!(svc_pool.silo_id, Some(*INTERNAL_SILO_ID)); let observed_ip_pool_ranges = get_all_ip_pool_ranges(&datastore).await; assert_eq!(observed_ip_pool_ranges.len(), 1); @@ -1342,7 +1340,9 @@ mod test { // allocated as a part of the service IP pool. let (.., svc_pool) = datastore.ip_pools_service_lookup(&opctx).await.unwrap(); - assert_eq!(svc_pool.silo_id, Some(*INTERNAL_SILO_ID)); + // TODO: do we care? we should just check that the name or ID of the + // pool itself matches the known name or ID of the service pool + // assert_eq!(svc_pool.silo_id, Some(*INTERNAL_SILO_ID)); let observed_ip_pool_ranges = get_all_ip_pool_ranges(&datastore).await; assert_eq!(observed_ip_pool_ranges.len(), 1); diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 18360e1045..9964adc59f 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -865,17 +865,12 @@ mod tests { &self, name: &str, range: IpRange, - is_default: bool, + _is_default: bool, ) { - let silo_id = self.opctx.authn.silo_required().unwrap().id(); - let pool = IpPool::new( - &IdentityMetadataCreateParams { - name: String::from(name).parse().unwrap(), - description: format!("ip pool {}", name), - }, - Some(silo_id), - is_default, - ); + let pool = IpPool::new(&IdentityMetadataCreateParams { + name: String::from(name).parse().unwrap(), + description: format!("ip pool {}", name), + }); let conn = self .db_datastore @@ -890,6 +885,9 @@ mod tests { .await .expect("Failed to create IP Pool"); + let _silo_id = self.opctx.authn.silo_required().unwrap().id(); + // TODO: associate with silo here to match previous behavior + self.initialize_ip_pool(name, range).await; } diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 8b5f6f2cdb..9e06fda435 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -56,21 +56,7 @@ impl super::Nexus { opctx: &OpContext, pool_params: ¶ms::IpPoolCreate, ) -> CreateResult { - let silo_id = match pool_params.clone().silo { - Some(silo) => { - let (.., authz_silo) = self - .silo_lookup(&opctx, silo)? - .lookup_for(authz::Action::Read) - .await?; - Some(authz_silo.id()) - } - _ => None, - }; - let pool = db::model::IpPool::new( - &pool_params.identity, - silo_id, - pool_params.is_default, - ); + let pool = db::model::IpPool::new(&pool_params.identity); self.db_datastore.ip_pool_create(opctx, pool).await } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index ac5cf76775..8ee78a1b07 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -41,9 +41,6 @@ use nexus_db_queries::db::identity::Resource; use nexus_db_queries::db::lookup::ImageLookup; use nexus_db_queries::db::lookup::ImageParentLookup; use nexus_db_queries::db::model::Name; -use nexus_db_queries::{ - authz::ApiResource, db::fixed_data::silo::INTERNAL_SILO_ID, -}; use nexus_types::external_api::params::ProjectSelector; use nexus_types::{ external_api::views::{SledInstance, Switch}, @@ -1161,15 +1158,16 @@ async fn project_ip_pool_view( } else { None }; - let (authz_pool, pool) = nexus + let (_authz_pool, pool) = nexus .project_ip_pool_lookup(&opctx, &pool_selector, &project_lookup)? .fetch() .await?; // TODO(2148): once we've actualy implemented filtering to pools belonging to // the specified project, we can remove this internal check. - if pool.silo_id == Some(*INTERNAL_SILO_ID) { - return Err(authz_pool.not_found().into()); - } + // TODO: do this? forget about it? + // if pool.silo_id == Some(*INTERNAL_SILO_ID) { + // return Err(authz_pool.not_found().into()); + // } Ok(HttpResponseOk(IpPool::from(pool))) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index ef3835c618..c5be6c565d 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -244,8 +244,6 @@ pub struct VpcRouter { pub struct IpPool { #[serde(flatten)] pub identity: IdentityMetadata, - pub silo_id: Option, - pub is_default: bool, } #[derive(Clone, Copy, Debug, Deserialize, Serialize, JsonSchema)] From 94d0ebfa99255a1aef1524fe89d267329e4e1b06 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 11 Oct 2023 09:47:05 -0500 Subject: [PATCH 05/67] rip out silo_id and is_default on ip_pools in db --- nexus/db-model/src/schema.rs | 2 -- nexus/db-queries/src/db/datastore/ip_pool.rs | 17 ++++++++------- nexus/db-queries/src/db/datastore/project.rs | 3 ++- nexus/src/app/ip_pool.rs | 4 +++- nexus/test-utils/src/resource_helpers.rs | 3 +-- nexus/tests/integration_tests/ip_pools.rs | 4 ++-- nexus/types/src/external_api/params.rs | 11 ---------- schema/crdb/7.0.0/up6.sql | 1 + schema/crdb/7.0.0/up7.sql | 3 +++ schema/crdb/dbinit.sql | 22 +------------------- 10 files changed, 23 insertions(+), 47 deletions(-) create mode 100644 schema/crdb/7.0.0/up6.sql create mode 100644 schema/crdb/7.0.0/up7.sql diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 4ffbd21443..c1ba3f4299 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -453,8 +453,6 @@ table! { time_modified -> Timestamptz, time_deleted -> Nullable, rcgen -> Int8, - silo_id -> Nullable, - is_default -> Bool, } } diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 90ed5f76c0..9feaef0c14 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -65,7 +65,8 @@ impl DataStore { ), } // != excludes nulls so we explicitly include them - .filter(dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null())) + // TODO: join to join table to exclude internal + // .filter(dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null())) .filter(dsl::time_deleted.is_null()) .select(db::model::IpPool::as_select()) .get_results_async(&*self.pool_connection_authorized(opctx).await?) @@ -233,9 +234,10 @@ impl DataStore { let now = Utc::now(); let updated_rows = diesel::update(dsl::ip_pool) // != excludes nulls so we explicitly include them - .filter( - dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null()), - ) + // TODO: + // .filter( + // dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null()), + // ) .filter(dsl::time_deleted.is_null()) .filter(dsl::id.eq(authz_pool.id())) .filter(dsl::rcgen.eq(db_pool.rcgen)) @@ -268,9 +270,10 @@ impl DataStore { opctx.authorize(authz::Action::Modify, authz_pool).await?; diesel::update(dsl::ip_pool) // != excludes nulls so we explicitly include them - .filter( - dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null()), - ) + // TODO: exclude internal pool the right way + // .filter( + // dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null()), + // ) .filter(dsl::id.eq(authz_pool.id())) .filter(dsl::time_deleted.is_null()) .set(updates) diff --git a/nexus/db-queries/src/db/datastore/project.rs b/nexus/db-queries/src/db/datastore/project.rs index 0285679cd5..97abbbe102 100644 --- a/nexus/db-queries/src/db/datastore/project.rs +++ b/nexus/db-queries/src/db/datastore/project.rs @@ -352,7 +352,8 @@ impl DataStore { // TODO(2148, 2056): filter only pools accessible by the given // project, once specific projects for pools are implemented // != excludes nulls so we explicitly include them - .filter(dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null())) + // TODO: filter out internal the right way + // .filter(dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null())) .filter(dsl::time_deleted.is_null()) .select(db::model::IpPool::as_select()) .get_results_async(&*self.pool_connection_authorized(opctx).await?) diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 9e06fda435..22b99a3b07 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -28,7 +28,9 @@ use omicron_common::api::external::UpdateResult; use ref_cast::RefCast; fn is_internal(pool: &IpPool) -> bool { - pool.silo_id == Some(*INTERNAL_SILO_ID) + // pool.silo_id == Some(*INTERNAL_SILO_ID) + // TODO: this is no longer a simple function of the pool itself + false } impl super::Nexus { diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 2368c3f568..35a4f6242b 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -140,11 +140,10 @@ pub async fn create_ip_pool( name: pool_name.parse().unwrap(), description: String::from("an ip pool"), }, - silo: None, - is_default: false, }, ) .await; + // TODO: associate with fleet as a non-default like before? let range = populate_ip_pool(client, pool_name, ip_range).await; (pool, range) } diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 27f4b04290..639ccd788a 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -285,8 +285,8 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { name: String::from("p0").parse().unwrap(), description: String::from(""), }, - silo: Some(NameOrId::Name(cptestctx.silo_name.clone())), - is_default: false, + // silo: Some(NameOrId::Name(cptestctx.silo_name.clone())), + // is_default: false, }; let created_pool = create_pool(client, ¶ms).await; assert_eq!(created_pool.identity.name, "p0"); diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 1a7bb36cb7..7fa984b4c0 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -731,17 +731,6 @@ impl std::fmt::Debug for CertificateCreate { pub struct IpPoolCreate { #[serde(flatten)] pub identity: IdentityMetadataCreateParams, - - /// If an IP pool is associated with a silo, instance IP allocations in that - /// silo can draw from that pool. - pub silo: Option, - - /// Whether the IP pool is considered a default pool for its scope (fleet - /// or silo). If a pool is marked default and is associated with a silo, - /// instances created in that silo will draw IPs from that pool unless - /// another pool is specified at instance create time. - #[serde(default)] - pub is_default: bool, } /// Parameters for updating an IP Pool diff --git a/schema/crdb/7.0.0/up6.sql b/schema/crdb/7.0.0/up6.sql new file mode 100644 index 0000000000..3e5347e78c --- /dev/null +++ b/schema/crdb/7.0.0/up6.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS one_default_pool_per_scope; \ No newline at end of file diff --git a/schema/crdb/7.0.0/up7.sql b/schema/crdb/7.0.0/up7.sql new file mode 100644 index 0000000000..a62bda1adc --- /dev/null +++ b/schema/crdb/7.0.0/up7.sql @@ -0,0 +1,3 @@ +ALTER TABLE omicron.public.ip_pool + DROP COLUMN IF EXISTS silo_id, + DROP COLUMN IF EXISTS is_default; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 2f632bd4a6..1b60bbb68c 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1498,29 +1498,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool ( time_deleted TIMESTAMPTZ, /* The collection's child-resource generation number */ - rcgen INT8 NOT NULL, - - /* - * Association with a silo. silo_id is also used to mark an IP pool as - * "internal" by associating it with the oxide-internal silo. Null silo_id - * means the pool is can be used fleet-wide. - */ - silo_id UUID, - - /* Is this the default pool for its scope (fleet or silo) */ - is_default BOOLEAN NOT NULL DEFAULT FALSE + rcgen INT8 NOT NULL ); -/* - * Ensure there can only be one default pool for the fleet or a given silo. - * Coalesce is needed because otherwise different nulls are considered to be - * distinct from each other. - */ -CREATE UNIQUE INDEX IF NOT EXISTS one_default_pool_per_scope ON omicron.public.ip_pool ( - COALESCE(silo_id, '00000000-0000-0000-0000-000000000000'::uuid) -) WHERE - is_default = true AND time_deleted IS NULL; - /* * Index ensuring uniqueness of IP Pool names, globally. */ From 1de49fc0f9a333dd98d59eb4297323d65d5224d3 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 11 Oct 2023 11:08:49 -0500 Subject: [PATCH 06/67] let's add an endpoint! --- nexus/src/external_api/http_entrypoints.rs | 31 ++++++++ nexus/test-utils/src/resource_helpers.rs | 14 ++++ nexus/tests/integration_tests/endpoints.rs | 2 - nexus/tests/integration_tests/instances.rs | 4 +- nexus/tests/integration_tests/ip_pools.rs | 46 +++++------ nexus/tests/output/nexus_tags.txt | 1 + nexus/types/src/external_api/params.rs | 1 + openapi/nexus.json | 92 ++++++++++++++++------ 8 files changed, 139 insertions(+), 52 deletions(-) diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 8ee78a1b07..5f502306e8 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -116,6 +116,7 @@ pub(crate) fn external_api() -> NexusApiDescription { // Operator-Accessible IP Pools API api.register(ip_pool_list)?; api.register(ip_pool_create)?; + api.register(ip_pool_associate)?; api.register(ip_pool_view)?; api.register(ip_pool_delete)?; api.register(ip_pool_update)?; @@ -1300,6 +1301,36 @@ async fn ip_pool_update( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } +// TODO: associate just seems like the wrong word and I'd like to change it +// across the board. What I really mean is "make available to" or "make availale +// for use in" +/// Associate an IP Pool with a silo or project +#[endpoint { + method = POST, + path = "/v1/system/ip-pools/{pool}/associate", + tags = ["system/networking"], +}] +async fn ip_pool_associate( + rqctx: RequestContext>, + path_params: Path, + resource_assoc: TypedBody, + // TODO: what does this return? Returning the association record seems silly +) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + let resource_assoc = resource_assoc.into_inner(); + let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; + nexus + .ip_pool_associate_resource(&opctx, &pool_lookup, &resource_assoc) + .await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + /// Fetch the IP pool used for Oxide services #[endpoint { method = GET, diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 35a4f6242b..ffbfca1d8e 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -12,6 +12,7 @@ use dropshot::test_util::ClientTestContext; use dropshot::HttpErrorResponseBody; use dropshot::Method; use http::StatusCode; +use nexus_db_queries::db::fixed_data::FLEET_ID; use nexus_test_interface::NexusServer; use nexus_types::external_api::params; use nexus_types::external_api::params::PhysicalDiskKind; @@ -143,6 +144,19 @@ pub async fn create_ip_pool( }, ) .await; + + // match previous behavior, makes pool available for use anywhere in fleet + let _: () = object_create( + client, + &format!("/v1/system/ip-pools/{pool_name}/associate"), + ¶ms::IpPoolResource { + resource_id: *FLEET_ID, + resource_type: params::IpPoolResourceType::Fleet, + is_default: false, + }, + ) + .await; + // TODO: associate with fleet as a non-default like before? let range = populate_ip_pool(client, pool_name, ip_range).await; (pool, range) diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index e9ae11c21f..0f7a0d6070 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -456,8 +456,6 @@ lazy_static! { name: DEMO_IP_POOL_NAME.clone(), description: String::from("an IP pool"), }, - silo: None, - is_default: true, }; pub static ref DEMO_IP_POOL_PROJ_URL: String = format!("/v1/ip-pools/{}?project={}", *DEMO_IP_POOL_NAME, *DEMO_PROJECT_NAME); diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 83fff2fbab..fff61bd7c4 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -3416,8 +3416,8 @@ async fn test_instance_ephemeral_ip_from_correct_pool( name: pool_name.parse().unwrap(), description: String::from("an ip pool"), }, - silo: Some(NameOrId::Id(DEFAULT_SILO.id())), - is_default: true, + // silo: Some(NameOrId::Id(DEFAULT_SILO.id())), + // is_default: true, }, ) .await; diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 639ccd788a..5084ab4f6f 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -28,7 +28,7 @@ use nexus_types::external_api::shared::Ipv6Range; use nexus_types::external_api::views::IpPool; use nexus_types::external_api::views::IpPoolRange; use omicron_common::api::external::IdentityMetadataUpdateParams; -use omicron_common::api::external::NameOrId; +// use omicron_common::api::external::NameOrId; use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; use omicron_nexus::TestInterfaces; use sled_agent_client::TestInterfaces as SledTestInterfaces; @@ -61,8 +61,8 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { assert_eq!(ip_pools.len(), 1, "Expected to see default IP pool"); assert_eq!(ip_pools[0].identity.name, "default",); - assert_eq!(ip_pools[0].silo_id, None); - assert!(ip_pools[0].is_default); + // assert_eq!(ip_pools[0].silo_id, None); + // assert!(ip_pools[0].is_default); // Verify 404 if the pool doesn't exist yet, both for creating or deleting let error: HttpErrorResponseBody = NexusRequest::expect_failure( @@ -105,8 +105,8 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, - silo: None, - is_default: false, + // silo: None, + // is_default: false, }; let created_pool: IpPool = NexusRequest::objects_post(client, ip_pools_url, ¶ms) @@ -118,7 +118,7 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { .unwrap(); assert_eq!(created_pool.identity.name, pool_name); assert_eq!(created_pool.identity.description, description); - assert_eq!(created_pool.silo_id, None); + // assert_eq!(created_pool.silo_id, None); let list = NexusRequest::iter_collection_authn::( client, @@ -288,11 +288,10 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { // silo: Some(NameOrId::Name(cptestctx.silo_name.clone())), // is_default: false, }; - let created_pool = create_pool(client, ¶ms).await; - assert_eq!(created_pool.identity.name, "p0"); + let _created_pool = create_pool(client, ¶ms).await; - let silo_id = - created_pool.silo_id.expect("Expected pool to have a silo_id"); + // let silo_id = + // created_pool.silo_id.expect("Expected pool to have a silo_id"); // now we'll create another IP pool using that silo ID let params = IpPoolCreate { @@ -300,12 +299,11 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { name: String::from("p1").parse().unwrap(), description: String::from(""), }, - silo: Some(NameOrId::Id(silo_id)), - is_default: false, + // silo: Some(NameOrId::Id(silo_id)), + // is_default: false, }; - let created_pool = create_pool(client, ¶ms).await; - assert_eq!(created_pool.identity.name, "p1"); - assert_eq!(created_pool.silo_id.unwrap(), silo_id); + let _created_pool = create_pool(client, ¶ms).await; + // assert_eq!(created_pool.silo_id.unwrap(), silo_id); // expect 404 if the specified silo doesn't exist let bad_silo_params = IpPoolCreate { @@ -313,10 +311,10 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { name: String::from("p2").parse().unwrap(), description: String::from(""), }, - silo: Some(NameOrId::Name( - String::from("not-a-thing").parse().unwrap(), - )), - is_default: false, + // silo: Some(NameOrId::Name( + // String::from("not-a-thing").parse().unwrap(), + // )), + // is_default: false, }; let error: HttpErrorResponseBody = NexusRequest::new( RequestBuilder::new(client, Method::POST, "/v1/system/ip-pools") @@ -374,8 +372,8 @@ async fn test_ip_pool_range_overlapping_ranges_fails( name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, - silo: None, - is_default: false, + // silo: None, + // is_default: false, }; let created_pool: IpPool = NexusRequest::objects_post(client, ip_pools_url, ¶ms) @@ -557,8 +555,6 @@ async fn test_ip_pool_range_pagination(cptestctx: &ControlPlaneTestContext) { name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, - silo: None, - is_default: false, }; let created_pool: IpPool = NexusRequest::objects_post(client, ip_pools_url, ¶ms) @@ -695,8 +691,8 @@ async fn test_ip_pool_list_usable_by_project( name: String::from(mypool_name).parse().unwrap(), description: String::from("right on cue"), }, - silo: None, - is_default: false, + // silo: None, + // is_default: false, }; NexusRequest::objects_post(client, ip_pools_url, ¶ms) .authn_as(AuthnMode::PrivilegedUser) diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 1d7f5556c2..06a5651622 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -129,6 +129,7 @@ system_metric GET /v1/system/metrics/{metric_nam API operations found with tag "system/networking" OPERATION ID METHOD URL PATH +ip_pool_associate POST /v1/system/ip-pools/{pool}/associate ip_pool_create POST /v1/system/ip-pools ip_pool_delete DELETE /v1/system/ip-pools/{pool} ip_pool_list GET /v1/system/ip-pools diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 7fa984b4c0..4bba13a530 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -741,6 +741,7 @@ pub struct IpPoolUpdate { } #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +#[serde(rename_all = "snake_case")] pub enum IpPoolResourceType { Fleet, Silo, diff --git a/openapi/nexus.json b/openapi/nexus.json index 9dda94f283..23948208da 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -4577,6 +4577,47 @@ } } }, + "/v1/system/ip-pools/{pool}/associate": { + "post": { + "tags": [ + "system/networking" + ], + "summary": "Associate an IP Pool with a silo or project", + "operationId": "ip_pool_associate", + "parameters": [ + { + "in": "path", + "name": "pool", + "description": "Name or ID of the IP pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolResource" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/v1/system/ip-pools/{pool}/ranges": { "get": { "tags": [ @@ -11512,9 +11553,6 @@ "type": "string", "format": "uuid" }, - "is_default": { - "type": "boolean" - }, "name": { "description": "unique, mutable, user-controlled identifier for each resource", "allOf": [ @@ -11523,11 +11561,6 @@ } ] }, - "silo_id": { - "nullable": true, - "type": "string", - "format": "uuid" - }, "time_created": { "description": "timestamp when this resource was created", "type": "string", @@ -11542,7 +11575,6 @@ "required": [ "description", "id", - "is_default", "name", "time_created", "time_modified" @@ -11555,22 +11587,8 @@ "description": { "type": "string" }, - "is_default": { - "description": "Whether the IP pool is considered a default pool for its scope (fleet or silo). If a pool is marked default and is associated with a silo, instances created in that silo will draw IPs from that pool unless another pool is specified at instance create time.", - "default": false, - "type": "boolean" - }, "name": { "$ref": "#/components/schemas/Name" - }, - "silo": { - "nullable": true, - "description": "If an IP pool is associated with a silo, instance IP allocations in that silo can draw from that pool.", - "allOf": [ - { - "$ref": "#/components/schemas/NameOrId" - } - ] } }, "required": [ @@ -11625,6 +11643,34 @@ "items" ] }, + "IpPoolResource": { + "description": "Parameters for associating an IP pool with a resource (fleet, silo)", + "type": "object", + "properties": { + "is_default": { + "type": "boolean" + }, + "resource_id": { + "type": "string", + "format": "uuid" + }, + "resource_type": { + "$ref": "#/components/schemas/IpPoolResourceType" + } + }, + "required": [ + "is_default", + "resource_id", + "resource_type" + ] + }, + "IpPoolResourceType": { + "type": "string", + "enum": [ + "fleet", + "silo" + ] + }, "IpPoolResultsPage": { "description": "A single page of results", "type": "object", From 7259531cf975ccd6ecda611243152dfcd411fb97 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 11 Oct 2023 11:43:57 -0500 Subject: [PATCH 07/67] use joinable to make our joins more cute --- nexus/db-model/src/schema.rs | 1 + nexus/db-queries/src/db/datastore/ip_pool.rs | 10 ++-------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index c1ba3f4299..4995e40286 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1149,6 +1149,7 @@ joinable!(system_update_component_update -> component_update (component_update_i allow_tables_to_appear_in_same_query!(ip_pool_range, ip_pool, ip_pool_resource); joinable!(ip_pool_range -> ip_pool (ip_pool_id)); +joinable!(ip_pool_resource -> ip_pool (ip_pool_id)); allow_tables_to_appear_in_same_query!( dataset, diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 9feaef0c14..1ec724002f 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -100,10 +100,7 @@ impl DataStore { // join ip_pool to ip_pool_resource and filter ip_pool::table - .inner_join( - ip_pool_resource::table - .on(ip_pool::id.eq(ip_pool_resource::ip_pool_id)), - ) + .inner_join(ip_pool_resource::table) .filter( (ip_pool_resource::resource_type .eq(IpPoolResourceType::Silo) @@ -143,10 +140,7 @@ impl DataStore { // We assume there is only one pool for that silo, or at least, // if there is more than one, it doesn't matter which one we pick. let (authz_pool, pool) = ip_pool::table - .inner_join( - ip_pool_resource::table - .on(ip_pool::id.eq(ip_pool_resource::ip_pool_id)), - ) + .inner_join(ip_pool_resource::table) .filter(ip_pool::time_deleted.is_null()) .filter( ip_pool_resource::resource_type From 4c8c5ea99c7c7b92dcfb717b14714f60db37a4c6 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 11 Oct 2023 13:52:36 -0500 Subject: [PATCH 08/67] get all ip_pools integration tests passing --- nexus/db-queries/src/db/datastore/ip_pool.rs | 30 ++++++--- nexus/db-queries/src/db/datastore/project.rs | 29 ++++++--- nexus/src/app/ip_pool.rs | 17 ++++- nexus/test-utils/src/resource_helpers.rs | 2 +- nexus/tests/integration_tests/ip_pools.rs | 65 +++++++++++++------- 5 files changed, 97 insertions(+), 46 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 1ec724002f..a82bcc168d 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -49,26 +49,36 @@ impl DataStore { opctx: &OpContext, pagparams: &PaginatedBy<'_>, ) -> ListResultVec { - use db::schema::ip_pool::dsl; + use db::schema::ip_pool; + use db::schema::ip_pool_resource; + opctx .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) .await?; - // TODO: boy I hope we can paginate a join without too much trouble match pagparams { PaginatedBy::Id(pagparams) => { - paginated(dsl::ip_pool, dsl::id, pagparams) + paginated(ip_pool::table, ip_pool::id, pagparams) } PaginatedBy::Name(pagparams) => paginated( - dsl::ip_pool, - dsl::name, + ip_pool::table, + ip_pool::name, &pagparams.map_name(|n| Name::ref_cast(n)), ), } - // != excludes nulls so we explicitly include them - // TODO: join to join table to exclude internal - // .filter(dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null())) - .filter(dsl::time_deleted.is_null()) - .select(db::model::IpPool::as_select()) + // TODO: make sure this join is compatible with pagination logic + .left_outer_join(ip_pool_resource::table) + // TODO: this filter is so unwieldy and confusing (see all the checks + // at the nexus layer too) that it makes me want to just put a boolean + // internal column on the ip_pool table + .filter( + ip_pool_resource::resource_id + .ne(*INTERNAL_SILO_ID) + // resource_id is not nullable -- null here means the + // pool has no entry in the join table + .or(ip_pool_resource::resource_id.is_null()), + ) + .filter(ip_pool::time_deleted.is_null()) + .select(IpPool::as_select()) .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) diff --git a/nexus/db-queries/src/db/datastore/project.rs b/nexus/db-queries/src/db/datastore/project.rs index 97abbbe102..f2b5ce1aed 100644 --- a/nexus/db-queries/src/db/datastore/project.rs +++ b/nexus/db-queries/src/db/datastore/project.rs @@ -28,6 +28,7 @@ use crate::db::pagination::paginated; use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; use chrono::Utc; use diesel::prelude::*; +use nexus_db_model::IpPoolResourceType; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DeleteResult; @@ -337,24 +338,32 @@ impl DataStore { authz_project: &authz::Project, pagparams: &PaginatedBy<'_>, ) -> ListResultVec { - use db::schema::ip_pool::dsl; + use db::schema::ip_pool; + use db::schema::ip_pool_resource; + opctx.authorize(authz::Action::ListChildren, authz_project).await?; + + let silo_id = opctx.authn.silo_required().unwrap().id(); + match pagparams { PaginatedBy::Id(pagparams) => { - paginated(dsl::ip_pool, dsl::id, pagparams) + paginated(ip_pool::table, ip_pool::id, pagparams) } PaginatedBy::Name(pagparams) => paginated( - dsl::ip_pool, - dsl::name, + ip_pool::table, + ip_pool::name, &pagparams.map_name(|n| Name::ref_cast(n)), ), } - // TODO(2148, 2056): filter only pools accessible by the given - // project, once specific projects for pools are implemented - // != excludes nulls so we explicitly include them - // TODO: filter out internal the right way - // .filter(dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null())) - .filter(dsl::time_deleted.is_null()) + // TODO: make sure this join is compatible with pagination logic + .inner_join(ip_pool_resource::table) + .filter( + (ip_pool_resource::resource_type + .eq(IpPoolResourceType::Silo) + .and(ip_pool_resource::resource_id.eq(silo_id))) + .or(ip_pool_resource::resource_type.eq(IpPoolResourceType::Fleet)), + ) + .filter(ip_pool::time_deleted.is_null()) .select(db::model::IpPool::as_select()) .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 22b99a3b07..d04c60b622 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -11,7 +11,7 @@ use nexus_db_model::IpPool; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; -use nexus_db_queries::db::fixed_data::silo::INTERNAL_SILO_ID; +// use nexus_db_queries::db::fixed_data::silo::INTERNAL_SILO_ID; use nexus_db_queries::db::lookup; use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::db::model::Name; @@ -27,7 +27,7 @@ use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use ref_cast::RefCast; -fn is_internal(pool: &IpPool) -> bool { +fn is_internal(_pool: &IpPool) -> bool { // pool.silo_id == Some(*INTERNAL_SILO_ID) // TODO: this is no longer a simple function of the pool itself false @@ -71,6 +71,19 @@ impl super::Nexus { // TODO: check for perms on specified resource let (.., authz_pool) = pool_lookup.lookup_for(authz::Action::Modify).await?; + match ip_pool_resource.resource_type { + params::IpPoolResourceType::Silo => { + self.silo_lookup( + &opctx, + NameOrId::Id(ip_pool_resource.resource_id), + )? + .lookup_for(authz::Action::Read) + .await?; + } + params::IpPoolResourceType::Fleet => { + // hope we don't need to be assured of the fleet's existence + } + }; self.db_datastore .ip_pool_associate_resource( opctx, diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index ffbfca1d8e..d2bcf5fb86 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -145,7 +145,7 @@ pub async fn create_ip_pool( ) .await; - // match previous behavior, makes pool available for use anywhere in fleet + // make pool available for use anywhere in fleet let _: () = object_create( client, &format!("/v1/system/ip-pools/{pool_name}/associate"), diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 5084ab4f6f..3729437c78 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -8,6 +8,7 @@ use dropshot::test_util::ClientTestContext; use dropshot::HttpErrorResponseBody; use http::method::Method; use http::StatusCode; +use nexus_db_queries::db::fixed_data::FLEET_ID; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; @@ -17,6 +18,7 @@ use nexus_test_utils::resource_helpers::{ create_instance, create_instance_with, }; use nexus_test_utils_macros::nexus_test; +use nexus_types::external_api::params; use nexus_types::external_api::params::ExternalIpCreate; use nexus_types::external_api::params::InstanceDiskAttachment; use nexus_types::external_api::params::InstanceNetworkInterfaceAttachment; @@ -33,6 +35,7 @@ use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; use omicron_nexus::TestInterfaces; use sled_agent_client::TestInterfaces as SledTestInterfaces; use std::collections::HashSet; +use uuid::Uuid; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -59,8 +62,7 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { .expect("Failed to list IP Pools") .all_items; assert_eq!(ip_pools.len(), 1, "Expected to see default IP pool"); - - assert_eq!(ip_pools[0].identity.name, "default",); + assert_eq!(ip_pools[0].identity.name, "default"); // assert_eq!(ip_pools[0].silo_id, None); // assert!(ip_pools[0].is_default); @@ -305,30 +307,33 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { let _created_pool = create_pool(client, ¶ms).await; // assert_eq!(created_pool.silo_id.unwrap(), silo_id); - // expect 404 if the specified silo doesn't exist - let bad_silo_params = IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: String::from("p2").parse().unwrap(), - description: String::from(""), - }, - // silo: Some(NameOrId::Name( - // String::from("not-a-thing").parse().unwrap(), - // )), - // is_default: false, + let nonexistent_silo_id = Uuid::new_v4(); + // expect 404 on association if the specified silo doesn't exist + let params = params::IpPoolResource { + resource_id: nonexistent_silo_id, + resource_type: params::IpPoolResourceType::Silo, + is_default: false, }; - let error: HttpErrorResponseBody = NexusRequest::new( - RequestBuilder::new(client, Method::POST, "/v1/system/ip-pools") - .body(Some(&bad_silo_params)) - .expect_status(Some(StatusCode::NOT_FOUND)), + let error = NexusRequest::new( + RequestBuilder::new( + client, + Method::POST, + "/v1/system/ip-pools/p1/associate", + ) + .body(Some(¶ms)) + .expect_status(Some(StatusCode::NOT_FOUND)), ) .authn_as(AuthnMode::PrivilegedUser) .execute() .await .unwrap() - .parsed_body() + .parsed_body::() .unwrap(); - assert_eq!(error.message, "not found: silo with name \"not-a-thing\""); + assert_eq!( + error.message, + format!("not found: silo with id \"{nonexistent_silo_id}\"") + ); } async fn create_pool( @@ -691,14 +696,28 @@ async fn test_ip_pool_list_usable_by_project( name: String::from(mypool_name).parse().unwrap(), description: String::from("right on cue"), }, - // silo: None, - // is_default: false, }; NexusRequest::objects_post(client, ip_pools_url, ¶ms) .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap(); + .execute_and_parse_unwrap::() + .await; + + // add to fleet since we can't add to project yet + // TODO: could do silo, might as well? need the ID, though. at least + // until I make it so you can specify the resource by name + let params = params::IpPoolResource { + resource_id: *FLEET_ID, + resource_type: params::IpPoolResourceType::Fleet, + is_default: false, + }; + let _ = NexusRequest::objects_post( + client, + &format!("/v1/system/ip-pools/{mypool_name}/associate"), + ¶ms, + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await; // Add an IP range to mypool let mypool_range = IpRange::V4( From 53860437a76e67b03835b9433549365aea176282 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 11 Oct 2023 17:58:05 -0500 Subject: [PATCH 09/67] instance ephemeral IP test passes --- nexus/src/external_api/http_entrypoints.rs | 4 ++-- nexus/test-utils/src/resource_helpers.rs | 2 +- nexus/tests/integration_tests/instances.rs | 14 +++++++++++--- nexus/types/src/external_api/views.rs | 4 ++++ openapi/nexus.json | 11 +++++++++-- 5 files changed, 27 insertions(+), 8 deletions(-) diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 5f502306e8..ea5d3b5159 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -1315,7 +1315,7 @@ async fn ip_pool_associate( path_params: Path, resource_assoc: TypedBody, // TODO: what does this return? Returning the association record seems silly -) -> Result { +) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -1326,7 +1326,7 @@ async fn ip_pool_associate( nexus .ip_pool_associate_resource(&opctx, &pool_lookup, &resource_assoc) .await?; - Ok(HttpResponseUpdatedNoContent()) + Ok(HttpResponseCreated(views::IpPoolResource {})) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index d2bcf5fb86..a0d1dbfb48 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -146,7 +146,7 @@ pub async fn create_ip_pool( .await; // make pool available for use anywhere in fleet - let _: () = object_create( + let _assoc: views::IpPoolResource = object_create( client, &format!("/v1/system/ip-pools/{pool_name}/associate"), ¶ms::IpPoolResource { diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index fff61bd7c4..d6d91efba0 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -44,7 +44,6 @@ use omicron_common::api::external::InstanceNetworkInterface; use omicron_common::api::external::InstanceState; use omicron_common::api::external::Ipv4Net; use omicron_common::api::external::Name; -use omicron_common::api::external::NameOrId; use omicron_common::api::external::Vni; use omicron_nexus::app::MAX_MEMORY_BYTES_PER_INSTANCE; use omicron_nexus::app::MAX_VCPU_PER_INSTANCE; @@ -3416,11 +3415,20 @@ async fn test_instance_ephemeral_ip_from_correct_pool( name: pool_name.parse().unwrap(), description: String::from("an ip pool"), }, - // silo: Some(NameOrId::Id(DEFAULT_SILO.id())), - // is_default: true, }, ) .await; + let params = params::IpPoolResource { + resource_id: DEFAULT_SILO.id(), + resource_type: params::IpPoolResourceType::Silo, + is_default: true, + }; + let assoc_url = format!("/v1/system/ip-pools/{pool_name}/associate"); + let _ = NexusRequest::objects_post(client, &assoc_url, ¶ms) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await; + let silo_pool_range = IpRange::V4( Ipv4Range::new( std::net::Ipv4Addr::new(10, 2, 0, 1), diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index c5be6c565d..c2cba6f3e0 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -246,6 +246,10 @@ pub struct IpPool { pub identity: IdentityMetadata, } +// TODO: placeholder response for IP pool associate POST +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct IpPoolResource {} + #[derive(Clone, Copy, Debug, Deserialize, Serialize, JsonSchema)] pub struct IpPoolRange { pub id: Uuid, diff --git a/openapi/nexus.json b/openapi/nexus.json index 23948208da..b7a7e1b422 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -4606,8 +4606,15 @@ "required": true }, "responses": { - "204": { - "description": "resource updated" + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolResource" + } + } + } }, "4XX": { "$ref": "#/components/responses/Error" From a1eb6e44b555f5e06207712792c896042403928e Mon Sep 17 00:00:00 2001 From: David Crespo Date: Thu, 12 Oct 2023 12:08:20 -0500 Subject: [PATCH 10/67] shuffle migration files in anticipation of merging main --- nexus/db-model/src/schema.rs | 2 +- schema/crdb/{7.0.0 => 8.0.0}/up1.sql | 0 schema/crdb/{7.0.0 => 8.0.0}/up2.sql | 0 schema/crdb/{7.0.0 => 8.0.0}/up3.sql | 0 schema/crdb/{7.0.0 => 8.0.0}/up4.sql | 0 schema/crdb/{7.0.0 => 8.0.0}/up5.sql | 0 schema/crdb/{7.0.0/up6.sql => 8.0.1/up1.sql} | 0 schema/crdb/{7.0.0/up7.sql => 8.0.1/up2.sql} | 0 schema/crdb/dbinit.sql | 2 +- 9 files changed, 2 insertions(+), 2 deletions(-) rename schema/crdb/{7.0.0 => 8.0.0}/up1.sql (100%) rename schema/crdb/{7.0.0 => 8.0.0}/up2.sql (100%) rename schema/crdb/{7.0.0 => 8.0.0}/up3.sql (100%) rename schema/crdb/{7.0.0 => 8.0.0}/up4.sql (100%) rename schema/crdb/{7.0.0 => 8.0.0}/up5.sql (100%) rename schema/crdb/{7.0.0/up6.sql => 8.0.1/up1.sql} (100%) rename schema/crdb/{7.0.0/up7.sql => 8.0.1/up2.sql} (100%) diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 4995e40286..f2f4401369 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1138,7 +1138,7 @@ table! { /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(7, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(8, 0, 1); allow_tables_to_appear_in_same_query!( system_update, diff --git a/schema/crdb/7.0.0/up1.sql b/schema/crdb/8.0.0/up1.sql similarity index 100% rename from schema/crdb/7.0.0/up1.sql rename to schema/crdb/8.0.0/up1.sql diff --git a/schema/crdb/7.0.0/up2.sql b/schema/crdb/8.0.0/up2.sql similarity index 100% rename from schema/crdb/7.0.0/up2.sql rename to schema/crdb/8.0.0/up2.sql diff --git a/schema/crdb/7.0.0/up3.sql b/schema/crdb/8.0.0/up3.sql similarity index 100% rename from schema/crdb/7.0.0/up3.sql rename to schema/crdb/8.0.0/up3.sql diff --git a/schema/crdb/7.0.0/up4.sql b/schema/crdb/8.0.0/up4.sql similarity index 100% rename from schema/crdb/7.0.0/up4.sql rename to schema/crdb/8.0.0/up4.sql diff --git a/schema/crdb/7.0.0/up5.sql b/schema/crdb/8.0.0/up5.sql similarity index 100% rename from schema/crdb/7.0.0/up5.sql rename to schema/crdb/8.0.0/up5.sql diff --git a/schema/crdb/7.0.0/up6.sql b/schema/crdb/8.0.1/up1.sql similarity index 100% rename from schema/crdb/7.0.0/up6.sql rename to schema/crdb/8.0.1/up1.sql diff --git a/schema/crdb/7.0.0/up7.sql b/schema/crdb/8.0.1/up2.sql similarity index 100% rename from schema/crdb/7.0.0/up7.sql rename to schema/crdb/8.0.1/up2.sql diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 1b60bbb68c..bcc6885db0 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -2557,7 +2557,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '7.0.0', NULL) + ( TRUE, NOW(), NOW(), '8.0.1', NULL) ON CONFLICT DO NOTHING; CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_resource_type AS ENUM ( From 49c9927e0ded36582872f52b0c9bfda7996a0a1f Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 18 Oct 2023 12:06:08 -0500 Subject: [PATCH 11/67] unauthorized test --- nexus/tests/integration_tests/endpoints.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 0f7a0d6070..11878a175e 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -467,6 +467,13 @@ lazy_static! { description: Some(String::from("a new IP pool")), }, }; + pub static ref DEMO_IP_POOL_ASSOC_URL: String = format!("{}/associate", *DEMO_IP_POOL_URL); + pub static ref DEMO_IP_POOL_ASSOC_BODY: params::IpPoolResource = + params::IpPoolResource { + resource_id: DEFAULT_SILO.identity().id, + resource_type: params::IpPoolResourceType::Silo, + is_default: false, + }; pub static ref DEMO_IP_POOL_RANGE: IpRange = IpRange::V4(Ipv4Range::new( std::net::Ipv4Addr::new(10, 0, 0, 0), std::net::Ipv4Addr::new(10, 0, 0, 255), @@ -767,6 +774,16 @@ lazy_static! { ], }, + // IP pool resource association endpoint + VerifyEndpoint { + url: &DEMO_IP_POOL_ASSOC_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Post(serde_json::to_value(&*DEMO_IP_POOL_ASSOC_BODY).unwrap()) + ], + }, + // IP Pool ranges endpoint VerifyEndpoint { url: &DEMO_IP_POOL_RANGES_URL, From 9c8d2d5e4f42c196b217d4b7cab0034a953ab7dc Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 18 Oct 2023 14:00:25 -0500 Subject: [PATCH 12/67] test for pagination logic --- nexus/tests/integration_tests/ip_pools.rs | 82 ++++++++++++++++------- 1 file changed, 58 insertions(+), 24 deletions(-) diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 4a0e247706..d4057fd761 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -282,33 +282,21 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; // can create a pool with an existing silo by name - let params = IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: String::from("p0").parse().unwrap(), - description: String::from(""), - }, - // silo: Some(NameOrId::Name(cptestctx.silo_name.clone())), - // is_default: false, - }; - let _created_pool = create_pool(client, ¶ms).await; + // TODO: confirm association post works with existing silo ID + // silo: Some(NameOrId::Name(cptestctx.silo_name.clone())), + // is_default: false, + let _created_pool = create_pool(client, "p0").await; // let silo_id = // created_pool.silo_id.expect("Expected pool to have a silo_id"); // now we'll create another IP pool using that silo ID - let params = IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: String::from("p1").parse().unwrap(), - description: String::from(""), - }, - // silo: Some(NameOrId::Id(silo_id)), - // is_default: false, - }; - let _created_pool = create_pool(client, ¶ms).await; + let _created_pool = create_pool(client, "p1").await; // assert_eq!(created_pool.silo_id.unwrap(), silo_id); + // TODO: confirm a second pool can be assocatied with the same silo ID - let nonexistent_silo_id = Uuid::new_v4(); // expect 404 on association if the specified silo doesn't exist + let nonexistent_silo_id = Uuid::new_v4(); let params = params::IpPoolResource { resource_id: nonexistent_silo_id, resource_type: params::IpPoolResourceType::Silo, @@ -336,11 +324,57 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { ); } -async fn create_pool( - client: &ClientTestContext, - params: &IpPoolCreate, -) -> IpPool { - NexusRequest::objects_post(client, "/v1/system/ip-pools", params) +// IP pool list fetch logic includes a join to ip_pool_resource, which is +// unusual, so we want to make sure pagination logic still works +#[nexus_test] +async fn test_ip_pool_pagination(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + let base_url = "/v1/system/ip-pools"; + let first_page = objects_list_page_authz::(client, &base_url).await; + + // we start out with one pool, and it's the default pool + assert_eq!(first_page.items.len(), 1); + assert_eq!(first_page.items[0].identity.name, "default"); + + let mut pool_names = vec!["default".to_string()]; + + // create more pools to work with, adding their names to the list so we + // can use it to check order + for i in 1..=8 { + let name = format!("other-pool-{}", i); + pool_names.push(name.clone()); + create_pool(client, &name).await; + } + + let first_five_url = format!("{}?limit=5", base_url); + let first_five = + objects_list_page_authz::(client, &first_five_url).await; + assert!(first_five.next_page.is_some()); + assert_eq!(get_names(first_five.items), &pool_names[0..5]); + + let next_page_url = format!( + "{}?limit=5&page_token={}", + base_url, + first_five.next_page.unwrap() + ); + let next_page = + objects_list_page_authz::(client, &next_page_url).await; + assert_eq!(get_names(next_page.items), &pool_names[5..9]); +} + +/// helper to make tests less ugly +fn get_names(pools: Vec) -> Vec { + pools.iter().map(|p| p.identity.name.to_string()).collect() +} + +async fn create_pool(client: &ClientTestContext, name: &str) -> IpPool { + let params = IpPoolCreate { + identity: IdentityMetadataCreateParams { + name: Name::try_from(name.to_string()).unwrap(), + description: "".to_string(), + }, + }; + NexusRequest::objects_post(client, "/v1/system/ip-pools", ¶ms) .authn_as(AuthnMode::PrivilegedUser) .execute() .await From ff3a4e2c3c0327bcf37f88b1c53fdeb8bdf8135b Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 18 Oct 2023 16:17:40 -0500 Subject: [PATCH 13/67] exclude service pool from list and block delete/update/modification --- nexus/db-queries/src/db/datastore/ip_pool.rs | 50 ++++++++++---- nexus/src/app/ip_pool.rs | 73 +++++++++++++------- nexus/src/external_api/http_entrypoints.rs | 2 + nexus/tests/integration_tests/ip_pools.rs | 72 +++++++++++++++++-- 4 files changed, 153 insertions(+), 44 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 2a10603b16..6be2c60d45 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -64,11 +64,7 @@ impl DataStore { &pagparams.map_name(|n| Name::ref_cast(n)), ), } - // TODO: make sure this join is compatible with pagination logic .left_outer_join(ip_pool_resource::table) - // TODO: this filter is so unwieldy and confusing (see all the checks - // at the nexus layer too) that it makes me want to just put a boolean - // internal column on the ip_pool table .filter( ip_pool_resource::resource_id .ne(*INTERNAL_SILO_ID) @@ -235,11 +231,6 @@ impl DataStore { // in between the above check for children and this query. let now = Utc::now(); let updated_rows = diesel::update(dsl::ip_pool) - // != excludes nulls so we explicitly include them - // TODO: - // .filter( - // dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null()), - // ) .filter(dsl::time_deleted.is_null()) .filter(dsl::id.eq(authz_pool.id())) .filter(dsl::rcgen.eq(db_pool.rcgen)) @@ -262,6 +253,41 @@ impl DataStore { Ok(()) } + /// Check whether the pool is internal by checking that it exists and is + /// associated with the internal silo + pub async fn ip_pool_is_internal( + &self, + opctx: &OpContext, + authz_pool: &authz::IpPool, + ) -> LookupResult { + use db::schema::ip_pool; + use db::schema::ip_pool_resource; + + let result = ip_pool::table + .inner_join(ip_pool_resource::table) + .filter(ip_pool::id.eq(authz_pool.id())) + .filter( + ip_pool_resource::resource_type + .eq(IpPoolResourceType::Silo) + .and(ip_pool_resource::resource_id.eq(*INTERNAL_SILO_ID)), + ) + .filter(ip_pool::time_deleted.is_null()) + // TODO: order by most specific first so we get the most specific + // when we select the first one. alphabetical desc technically + // works but come on. won't work when we have project association + .order(ip_pool_resource::resource_type.desc()) + .select(IpPool::as_select()) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + // if there is a result, the pool is associated with the internal silo, + // which makes it the internal pool + Ok(result.len() > 0) + } + pub async fn ip_pool_update( &self, opctx: &OpContext, @@ -270,12 +296,8 @@ impl DataStore { ) -> UpdateResult { use db::schema::ip_pool::dsl; opctx.authorize(authz::Action::Modify, authz_pool).await?; + diesel::update(dsl::ip_pool) - // != excludes nulls so we explicitly include them - // TODO: exclude internal pool the right way - // .filter( - // dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null()), - // ) .filter(dsl::id.eq(authz_pool.id())) .filter(dsl::time_deleted.is_null()) .set(updates) diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index d04c60b622..8f3bb908aa 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -7,7 +7,6 @@ use crate::external_api::params; use crate::external_api::shared::IpRange; use ipnetwork::IpNetwork; -use nexus_db_model::IpPool; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; @@ -27,10 +26,20 @@ use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use ref_cast::RefCast; -fn is_internal(_pool: &IpPool) -> bool { - // pool.silo_id == Some(*INTERNAL_SILO_ID) - // TODO: this is no longer a simple function of the pool itself - false +/// Helper to make it easier to 404 on attempts to manipulate internal pools +fn not_found_from_lookup(pool_lookup: &lookup::IpPool<'_>) -> Error { + match pool_lookup { + lookup::IpPool::Name(_, name) => { + Error::not_found_by_name(ResourceType::IpPool, &name) + } + lookup::IpPool::OwnedName(_, name) => { + Error::not_found_by_name(ResourceType::IpPool, &name) + } + lookup::IpPool::PrimaryKey(_, id) => { + Error::not_found_by_id(ResourceType::IpPool, &id) + } + lookup::IpPool::Error(_, error) => error.to_owned(), + } } impl super::Nexus { @@ -115,6 +124,13 @@ impl super::Nexus { ) -> DeleteResult { let (.., authz_pool, db_pool) = pool_lookup.fetch_for(authz::Action::Delete).await?; + + let is_internal = + self.db_datastore.ip_pool_is_internal(opctx, &authz_pool).await?; + if is_internal { + return Err(not_found_from_lookup(pool_lookup)); + } + self.db_datastore.ip_pool_delete(opctx, &authz_pool, &db_pool).await } @@ -126,6 +142,13 @@ impl super::Nexus { ) -> UpdateResult { let (.., authz_pool) = pool_lookup.lookup_for(authz::Action::Modify).await?; + + let is_internal = + self.db_datastore.ip_pool_is_internal(opctx, &authz_pool).await?; + if is_internal { + return Err(not_found_from_lookup(pool_lookup)); + } + self.db_datastore .ip_pool_update(opctx, &authz_pool, updates.clone().into()) .await @@ -137,13 +160,13 @@ impl super::Nexus { pool_lookup: &lookup::IpPool<'_>, pagparams: &DataPageParams<'_, IpNetwork>, ) -> ListResultVec { - let (.., authz_pool, db_pool) = - pool_lookup.fetch_for(authz::Action::ListChildren).await?; - if is_internal(&db_pool) { - return Err(Error::not_found_by_name( - ResourceType::IpPool, - &db_pool.identity.name, - )); + let (.., authz_pool) = + pool_lookup.lookup_for(authz::Action::ListChildren).await?; + + let is_internal = + self.db_datastore.ip_pool_is_internal(opctx, &authz_pool).await?; + if is_internal { + return Err(not_found_from_lookup(pool_lookup)); } self.db_datastore @@ -157,13 +180,13 @@ impl super::Nexus { pool_lookup: &lookup::IpPool<'_>, range: &IpRange, ) -> UpdateResult { - let (.., authz_pool, db_pool) = + let (.., authz_pool, _db_pool) = pool_lookup.fetch_for(authz::Action::Modify).await?; - if is_internal(&db_pool) { - return Err(Error::not_found_by_name( - ResourceType::IpPool, - &db_pool.identity.name, - )); + + let is_internal = + self.db_datastore.ip_pool_is_internal(opctx, &authz_pool).await?; + if is_internal { + return Err(not_found_from_lookup(pool_lookup)); } self.db_datastore.ip_pool_add_range(opctx, &authz_pool, range).await } @@ -174,14 +197,16 @@ impl super::Nexus { pool_lookup: &lookup::IpPool<'_>, range: &IpRange, ) -> DeleteResult { - let (.., authz_pool, db_pool) = + let (.., authz_pool, _db_pool) = pool_lookup.fetch_for(authz::Action::Modify).await?; - if is_internal(&db_pool) { - return Err(Error::not_found_by_name( - ResourceType::IpPool, - &db_pool.identity.name, - )); + + let is_internal = + self.db_datastore.ip_pool_is_internal(opctx, &authz_pool).await?; + + if is_internal { + return Err(not_found_from_lookup(pool_lookup)); } + self.db_datastore.ip_pool_delete_range(opctx, &authz_pool, range).await } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 40a81c3de0..54c5c56fb6 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -1248,6 +1248,8 @@ async fn ip_pool_view( let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let nexus = &apictx.nexus; let pool_selector = path_params.into_inner().pool; + // We do not prevent the service pool from being fetched by name or ID + // like we do for update, delete, associate. let (.., pool) = nexus.ip_pool_lookup(&opctx, &pool_selector)?.fetch().await?; Ok(HttpResponseOk(IpPool::from(pool))) diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index d4057fd761..ef31607a14 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -8,6 +8,7 @@ use dropshot::test_util::ClientTestContext; use dropshot::HttpErrorResponseBody; use http::method::Method; use http::StatusCode; +use nexus_db_queries::db::datastore::SERVICE_IP_POOL_NAME; use nexus_db_queries::db::fixed_data::FLEET_ID; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; @@ -277,6 +278,68 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { .expect("Expected to be able to delete an empty IP Pool"); } +/// The internal IP pool, defined by its association with the internal silo, +/// cannot be interacted with through the operator API. CRUD operations should +/// all 404 except fetch by name or ID. +#[nexus_test] +async fn test_ip_pool_service_no_cud(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + + let internal_pool_name_url = + format!("/v1/system/ip-pools/{}", (*SERVICE_IP_POOL_NAME).to_string()); + + // we can fetch the service pool by name or ID + let pool = NexusRequest::object_get(client, &internal_pool_name_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await; + + let internal_pool_id_url = + format!("/v1/system/ip-pools/{}", pool.identity.id); + let pool = NexusRequest::object_get(client, &internal_pool_id_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await; + + // but it does not come back in the list. there's one in there and it's the default + let pools = + objects_list_page_authz::(client, "/v1/system/ip-pools").await; + assert_eq!(pools.items.len(), 1); + assert_ne!(pools.items[0].identity.id, pool.identity.id); + + // deletes fail + + let error = NexusRequest::expect_failure( + client, + StatusCode::NOT_FOUND, + Method::DELETE, + &internal_pool_name_url, + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await; + assert_eq!( + error.message, + "not found: ip-pool with name \"oxide-service-pool\"" + ); + + let error = NexusRequest::expect_failure( + client, + StatusCode::NOT_FOUND, + Method::DELETE, + &internal_pool_id_url, + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await; + assert_eq!( + error.message, + format!("not found: ip-pool with id \"{}\"", pool.identity.id) + ); + + // TODO: update, assoc, dissoc, add/remove range by name or ID should all fail +} + #[nexus_test] async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; @@ -832,13 +895,10 @@ async fn test_ip_pool_list_usable_by_project( "{}/{}?project={}", scoped_ip_pools_url, pool_name, PROJECT_NAME ); - let pool: IpPool = NexusRequest::object_get(client, &view_pool_url) + let pool = NexusRequest::object_get(client, &view_pool_url) .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + .execute_and_parse_unwrap::() + .await; assert_eq!(pool.identity.name.as_str(), pool_name.as_str()); } From db29bd3c5da021ed715f367c4c17af26b69a012a Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 18 Oct 2023 17:10:12 -0500 Subject: [PATCH 14/67] fix the tests! --- dev-tools/omdb/tests/env.out | 6 +- dev-tools/omdb/tests/successes.out | 12 ++-- nexus/db-queries/src/db/datastore/ip_pool.rs | 3 + nexus/db-queries/src/db/datastore/rack.rs | 75 ++++++++++++-------- nexus/tests/integration_tests/ip_pools.rs | 7 +- 5 files changed, 63 insertions(+), 40 deletions(-) diff --git a/dev-tools/omdb/tests/env.out b/dev-tools/omdb/tests/env.out index 8e345b78d1..468432ec58 100644 --- a/dev-tools/omdb/tests/env.out +++ b/dev-tools/omdb/tests/env.out @@ -7,7 +7,7 @@ sim-b6d65341 [::1]:REDACTED_PORT - REDACTED_UUID_REDACTED_UUID_REDACTED --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (7.0.0) +note: database schema version matches expected (8.0.1) ============================================= EXECUTING COMMAND: omdb ["db", "--db-url", "junk", "sleds"] termination: Exited(2) @@ -172,7 +172,7 @@ stderr: note: database URL not specified. Will search DNS. note: (override with --db-url or OMDB_DB_URL) note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (7.0.0) +note: database schema version matches expected (8.0.1) ============================================= EXECUTING COMMAND: omdb ["--dns-server", "[::1]:REDACTED_PORT", "db", "sleds"] termination: Exited(0) @@ -185,5 +185,5 @@ stderr: note: database URL not specified. Will search DNS. note: (override with --db-url or OMDB_DB_URL) note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (7.0.0) +note: database schema version matches expected (8.0.1) ============================================= diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 6fd84c5eb3..7ba47e7ddb 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -8,7 +8,7 @@ external oxide-dev.test 2 create silo: "tes --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (7.0.0) +note: database schema version matches expected (8.0.1) ============================================= EXECUTING COMMAND: omdb ["db", "dns", "diff", "external", "2"] termination: Exited(0) @@ -24,7 +24,7 @@ changes: names added: 1, names removed: 0 --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (7.0.0) +note: database schema version matches expected (8.0.1) ============================================= EXECUTING COMMAND: omdb ["db", "dns", "names", "external", "2"] termination: Exited(0) @@ -36,7 +36,7 @@ External zone: oxide-dev.test --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (7.0.0) +note: database schema version matches expected (8.0.1) ============================================= EXECUTING COMMAND: omdb ["db", "services", "list-instances"] termination: Exited(0) @@ -52,7 +52,7 @@ Nexus REDACTED_UUID_REDACTED_UUID_REDACTED [::ffff:127.0.0.1]:REDACTED_ --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (7.0.0) +note: database schema version matches expected (8.0.1) ============================================= EXECUTING COMMAND: omdb ["db", "services", "list-by-sled"] termination: Exited(0) @@ -71,7 +71,7 @@ sled: sim-b6d65341 (id REDACTED_UUID_REDACTED_UUID_REDACTED) --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (7.0.0) +note: database schema version matches expected (8.0.1) ============================================= EXECUTING COMMAND: omdb ["db", "sleds"] termination: Exited(0) @@ -82,7 +82,7 @@ sim-b6d65341 [::1]:REDACTED_PORT - REDACTED_UUID_REDACTED_UUID_REDACTED --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (7.0.0) +note: database schema version matches expected (8.0.1) ============================================= EXECUTING COMMAND: omdb ["mgs", "inventory"] termination: Exited(0) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 6be2c60d45..abc5286c23 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -322,9 +322,12 @@ impl DataStore { .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) .await?; + // TODO: make sure on_conflict_do_nothing doesn't suppress the error for + // trying to give a silo two default pools diesel::insert_into(dsl::ip_pool_resource) .values(ip_pool_resource.clone()) .returning(IpPoolResource::as_returning()) + .on_conflict_do_nothing() .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index d756a73362..4aca246020 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -556,7 +556,9 @@ impl DataStore { ) -> Result<(), Error> { use omicron_common::api::external::Name; + dbg!("a"); self.rack_insert(opctx, &db::model::Rack::new(rack_id)).await?; + dbg!("b"); let internal_pool = db::model::IpPool::new(&IdentityMetadataCreateParams { @@ -564,25 +566,31 @@ impl DataStore { description: String::from("IP Pool for Oxide Services"), }); - self.ip_pool_create(opctx, internal_pool.clone()) + let internal_pool_id = internal_pool.id(); + + let internal_created = self + .ip_pool_create(opctx, internal_pool) .await - .map(|_| ()) + .map(|_| true) .or_else(|e| match e { - Error::ObjectAlreadyExists { .. } => Ok(()), + Error::ObjectAlreadyExists { .. } => Ok(false), _ => Err(e), })?; - // make default for the internal silo - self.ip_pool_associate_resource( - opctx, - db::model::IpPoolResource { - ip_pool_id: internal_pool.id(), - resource_type: db::model::IpPoolResourceType::Silo, - resource_id: *INTERNAL_SILO_ID, - is_default: true, - }, - ) - .await?; + // make default for the internal silo. only need to do this if + // the create went through, i.e., if it wasn't already there + if internal_created { + self.ip_pool_associate_resource( + opctx, + db::model::IpPoolResource { + ip_pool_id: internal_pool_id, + resource_type: db::model::IpPoolResourceType::Silo, + resource_id: *INTERNAL_SILO_ID, + is_default: true, + }, + ) + .await?; + } let default_pool = db::model::IpPool::new(&IdentityMetadataCreateParams { @@ -590,24 +598,31 @@ impl DataStore { description: String::from("default IP pool"), }); - // make pool default for fleet - self.ip_pool_associate_resource( - opctx, - db::model::IpPoolResource { - ip_pool_id: default_pool.id(), - resource_type: db::model::IpPoolResourceType::Fleet, - resource_id: *FLEET_ID, - is_default: true, - }, - ) - .await?; + let default_pool_id = default_pool.id(); - self.ip_pool_create(opctx, default_pool).await.map(|_| ()).or_else( - |e| match e { - Error::ObjectAlreadyExists { .. } => Ok(()), + let default_created = self + .ip_pool_create(opctx, default_pool) + .await + .map(|_| true) + .or_else(|e| match e { + Error::ObjectAlreadyExists { .. } => Ok(false), _ => Err(e), - }, - )?; + })?; + + // make pool default for fleet. only need to do this if the create went + // through, i.e., if it wasn't already there + if default_created { + self.ip_pool_associate_resource( + opctx, + db::model::IpPoolResource { + ip_pool_id: default_pool_id, + resource_type: db::model::IpPoolResourceType::Fleet, + resource_id: *FLEET_ID, + is_default: true, + }, + ) + .await?; + } Ok(()) } diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index ef31607a14..bb07a0486b 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -286,7 +286,7 @@ async fn test_ip_pool_service_no_cud(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; let internal_pool_name_url = - format!("/v1/system/ip-pools/{}", (*SERVICE_IP_POOL_NAME).to_string()); + format!("/v1/system/ip-pools/{}", SERVICE_IP_POOL_NAME); // we can fetch the service pool by name or ID let pool = NexusRequest::object_get(client, &internal_pool_name_url) @@ -385,6 +385,11 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { error.message, format!("not found: silo with id \"{nonexistent_silo_id}\"") ); + + // TODO: associating a resource that is already associated should be a noop + // and return a success message + + // TODO: trying to set a second default for a resource should fail } // IP pool list fetch logic includes a join to ip_pool_resource, which is From 9cbc4231893eba0599bdbff1082231d44cd8d200 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Thu, 19 Oct 2023 10:48:09 -0500 Subject: [PATCH 15/67] get conflict/update logic right around is_default --- common/src/api/external/mod.rs | 1 + nexus/db-queries/src/db/datastore/ip_pool.rs | 61 +++++++++++--------- nexus/db-queries/src/db/datastore/rack.rs | 2 - schema/crdb/dbinit.sql | 48 +++++++-------- 4 files changed, 58 insertions(+), 54 deletions(-) diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 53512408af..c7cabc5f97 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -711,6 +711,7 @@ pub enum ResourceType { LoopbackAddress, SwitchPortSettings, IpPool, + IpPoolResource, InstanceNetworkInterface, PhysicalDisk, Rack, diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index abc5286c23..e1d4ec5dcc 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -322,19 +322,36 @@ impl DataStore { .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) .await?; - // TODO: make sure on_conflict_do_nothing doesn't suppress the error for - // trying to give a silo two default pools diesel::insert_into(dsl::ip_pool_resource) .values(ip_pool_resource.clone()) + // We have two constraints that are relevant here, and we need to + // make this behave correctly with respect to both. If the entry + // matches an existing (ip pool, silo/fleet), we want to update + // is_default because we want to handle the case where someone is + // trying to change is_default on an existing association. But + // you can only have one default pool for a given resource, so + // if that update violates the unique index ensuring one default, + // the insert should still fail. + // note that this on_conflict has to have all three because that's + // how the pk is defined. if it only has the IDs and not the type, + // CRDB complains that the tuple doesn't match any constraints it's + // aware of + .on_conflict(( + dsl::ip_pool_id, + dsl::resource_type, + dsl::resource_id, + )) + .do_update() + .set(dsl::is_default.eq(ip_pool_resource.is_default)) .returning(IpPoolResource::as_returning()) - .on_conflict_do_nothing() .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { public_error_from_diesel( e, ErrorHandler::Conflict( - ResourceType::IpPool, + ResourceType::IpPoolResource, + // TODO: make string more useful &ip_pool_resource.ip_pool_id.to_string(), ), ) @@ -530,8 +547,6 @@ mod test { assert_eq!(fleet_default_pool.identity.name.as_str(), "default"); // unique index prevents second fleet-level default - // TODO: create the pool, and then the failure is on the attempt to - // associate it with the fleet as default let identity = IdentityMetadataCreateParams { name: "another-fleet-default".parse().unwrap(), description: "".to_string(), @@ -552,27 +567,27 @@ mod test { ) .await .expect_err("Failed to fail to make IP pool fleet default"); + assert_matches!(err, Error::ObjectAlreadyExists { .. }); - // when we fetch the default pool for a silo, if those scopes do not - // have a default IP pool, we will still get back the fleet default + // now test logic preferring most specific available default let silo_id = opctx.authn.silo_required().unwrap().id(); // create a non-default pool for the silo let identity = IdentityMetadataCreateParams { - name: "non-default-for-silo".parse().unwrap(), + name: "pool1-for-silo".parse().unwrap(), description: "".to_string(), }; - let default_for_silo = datastore + let pool1_for_silo = datastore .ip_pool_create(&opctx, IpPool::new(&identity)) .await - .expect("Failed to create silo non-default IP pool"); + .expect("Failed to create IP pool"); datastore .ip_pool_associate_resource( &opctx, IpPoolResource { - ip_pool_id: default_for_silo.id(), + ip_pool_id: pool1_for_silo.id(), resource_type: IpPoolResourceType::Silo, resource_id: silo_id, is_default: false, @@ -589,37 +604,27 @@ mod test { .expect("Failed to get silo default IP pool"); assert_eq!(ip_pool.id(), fleet_default_pool.id()); - // TODO: instead of a separate pool, this could now be done by - // associating the same pool? Would be nice to test both - - // now create a default pool for the silo - let identity = IdentityMetadataCreateParams { - name: "default-for-silo".parse().unwrap(), - description: "".to_string(), - }; - let default_for_silo = datastore - .ip_pool_create(&opctx, IpPool::new(&identity)) - .await - .expect("Failed to create silo default IP pool"); + // now we can change that association to is_default=true and + // it should update rather than erroring out datastore .ip_pool_associate_resource( &opctx, IpPoolResource { - ip_pool_id: default_for_silo.id(), + ip_pool_id: pool1_for_silo.id(), resource_type: IpPoolResourceType::Silo, resource_id: silo_id, is_default: true, }, ) .await - .expect("Failed to associate IP pool with silo"); + .expect("Failed to make IP pool default for silo"); - // now when we ask for the default pool, we get the one we just made + // now when we ask for the default pool again, we get the one we just changed let ip_pool = datastore .ip_pools_fetch_default(&opctx) .await .expect("Failed to get silo's default IP pool"); - assert_eq!(ip_pool.name().as_str(), "default-for-silo"); + assert_eq!(ip_pool.name().as_str(), "pool1-for-silo"); // and we can't create a second default pool for the silo let identity = IdentityMetadataCreateParams { diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 4aca246020..b7059b3db4 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -556,9 +556,7 @@ impl DataStore { ) -> Result<(), Error> { use omicron_common::api::external::Name; - dbg!("a"); self.rack_insert(opctx, &db::model::Rack::new(rack_id)).await?; - dbg!("b"); let internal_pool = db::model::IpPool::new(&IdentityMetadataCreateParams { diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 88454d1841..8bb35f4e13 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1466,6 +1466,30 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_pool_by_name ON omicron.public.ip_pool ) WHERE time_deleted IS NULL; +CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_resource_type AS ENUM ( + 'fleet', + 'silo' +); + +-- join table associating IP pools with resources like fleet or silo +CREATE TABLE IF NOT EXISTS omicron.public.ip_pool_resource ( + ip_pool_id UUID NOT NULL, + resource_type ip_pool_resource_type NOT NULL, + resource_id UUID NOT NULL, + is_default BOOL NOT NULL, + -- TODO: timestamps for soft deletes? + + -- resource_type is redundant because resource IDs are globally unique, but + -- logically it belongs here + PRIMARY KEY (ip_pool_id, resource_type, resource_id) +); + +-- a given resource can only have one default ip pool +CREATE UNIQUE INDEX IF NOT EXISTS one_default_ip_pool_per_resource ON omicron.public.ip_pool_resource ( + resource_id +) where + is_default = true; + /* * IP Pools are made up of a set of IP ranges, which are start/stop addresses. * Note that these need not be CIDR blocks or well-behaved subnets with a @@ -2539,30 +2563,6 @@ FROM WHERE instance.time_deleted IS NULL AND vmm.time_deleted IS NULL; -CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_resource_type AS ENUM ( - 'fleet', - 'silo' -); - --- join table associating IP pools with resources like fleet or silo -CREATE TABLE IF NOT EXISTS omicron.public.ip_pool_resource ( - ip_pool_id UUID NOT NULL, - resource_type ip_pool_resource_type NOT NULL, - resource_id UUID NOT NULL, - is_default BOOL NOT NULL, - -- TODO: timestamps for soft deletes? - - -- resource_type is redundant because resource IDs are globally unique, but - -- logically it belongs here - PRIMARY KEY (ip_pool_id, resource_type, resource_id) -); - --- a given resource can only have one default ip pool -CREATE UNIQUE INDEX IF NOT EXISTS one_default_ip_pool_per_resource ON omicron.public.ip_pool_resource ( - resource_id -) where - is_default = true; - CREATE TABLE IF NOT EXISTS omicron.public.db_metadata ( -- There should only be one row of this table for the whole DB. -- It's a little goofy, but filter on "singleton = true" before querying From ee5549ca38c20de9c39b25f0f57b97dbdd76289f Mon Sep 17 00:00:00 2001 From: David Crespo Date: Fri, 20 Oct 2023 15:58:14 -0500 Subject: [PATCH 16/67] add dissociate endpoint as DELETE /associate --- nexus/db-queries/src/db/datastore/ip_pool.rs | 40 +++++++++- nexus/src/app/ip_pool.rs | 21 ++++- nexus/src/external_api/http_entrypoints.rs | 30 +++++++- nexus/test-utils/src/resource_helpers.rs | 2 +- nexus/tests/integration_tests/endpoints.rs | 4 +- nexus/tests/integration_tests/instances.rs | 2 +- nexus/tests/integration_tests/ip_pools.rs | 29 +++++-- nexus/tests/output/nexus_tags.txt | 1 + nexus/types/src/external_api/params.rs | 28 ++++++- openapi/nexus.json | 81 +++++++++++++++----- 10 files changed, 201 insertions(+), 37 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index e1d4ec5dcc..aee5a53d5a 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -358,6 +358,32 @@ impl DataStore { }) } + pub async fn ip_pool_dissociate_resource( + &self, + opctx: &OpContext, + // TODO: this could take the authz_pool, it's just more annoying to test that way + ip_pool_id: Uuid, + resource_id: Uuid, + ) -> DeleteResult { + use db::schema::ip_pool_resource; + opctx + .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) + .await?; + + diesel::delete(ip_pool_resource::table) + .filter(ip_pool_resource::ip_pool_id.eq(ip_pool_id)) + .filter(ip_pool_resource::resource_id.eq(resource_id)) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map(|_rows_deleted| ()) + .map_err(|e| { + Error::internal_error(&format!( + "error deleting IP pool association to resource: {:?}", + e + )) + }) + } + pub async fn ip_pool_list_ranges( &self, opctx: &OpContext, @@ -601,7 +627,7 @@ mod test { let ip_pool = datastore .ip_pools_fetch_default(&opctx) .await - .expect("Failed to get silo default IP pool"); + .expect("Failed to get default IP pool"); assert_eq!(ip_pool.id(), fleet_default_pool.id()); // now we can change that association to is_default=true and @@ -649,6 +675,18 @@ mod test { .expect_err("Failed to fail to set a second default pool for silo"); assert_matches!(err, Error::ObjectAlreadyExists { .. }); + // now remove the association and we should get the default fleet pool again + datastore + .ip_pool_dissociate_resource(&opctx, pool1_for_silo.id(), silo_id) + .await + .expect("Failed to dissociate IP pool from silo"); + + let ip_pool = datastore + .ip_pools_fetch_default(&opctx) + .await + .expect("Failed to get default IP pool"); + assert_eq!(ip_pool.id(), fleet_default_pool.id()); + db.cleanup().await.unwrap(); logctx.cleanup_successful(); } diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 8f3bb908aa..9a77533dd4 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -75,9 +75,9 @@ impl super::Nexus { &self, opctx: &OpContext, pool_lookup: &lookup::IpPool<'_>, - ip_pool_resource: ¶ms::IpPoolResource, + ip_pool_resource: ¶ms::IpPoolAssociate, ) -> CreateResult { - // TODO: check for perms on specified resource + // TODO: check for perms on specified resource? or unnecessary because this is an operator action? let (.., authz_pool) = pool_lookup.lookup_for(authz::Action::Modify).await?; match ip_pool_resource.resource_type { @@ -109,6 +109,23 @@ impl super::Nexus { .await } + pub(crate) async fn ip_pool_dissociate_resource( + &self, + opctx: &OpContext, + pool_lookup: &lookup::IpPool<'_>, + ip_pool_dissoc: ¶ms::IpPoolDissociate, + ) -> DeleteResult { + let (.., authz_pool) = + pool_lookup.lookup_for(authz::Action::Modify).await?; + self.db_datastore + .ip_pool_dissociate_resource( + opctx, + authz_pool.id(), + ip_pool_dissoc.resource_id, + ) + .await + } + pub(crate) async fn ip_pools_list( &self, opctx: &OpContext, diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 54c5c56fb6..e00184fc4b 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -117,6 +117,7 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(ip_pool_list)?; api.register(ip_pool_create)?; api.register(ip_pool_associate)?; + api.register(ip_pool_dissociate)?; api.register(ip_pool_view)?; api.register(ip_pool_delete)?; api.register(ip_pool_update)?; @@ -1306,16 +1307,18 @@ async fn ip_pool_update( // TODO: associate just seems like the wrong word and I'd like to change it // across the board. What I really mean is "make available to" or "make availale // for use in" + /// Associate an IP Pool with a silo or project #[endpoint { method = POST, + // TODO: change this to /association path = "/v1/system/ip-pools/{pool}/associate", tags = ["system/networking"], }] async fn ip_pool_associate( rqctx: RequestContext>, path_params: Path, - resource_assoc: TypedBody, + resource_assoc: TypedBody, // TODO: what does this return? Returning the association record seems silly ) -> Result, HttpError> { let apictx = rqctx.context(); @@ -1333,6 +1336,31 @@ async fn ip_pool_associate( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } +/// Remove an IP pool's association with a silo or project +#[endpoint { + method = DELETE, + path = "/v1/system/ip-pools/{pool}/associate", + tags = ["system/networking"], +}] +async fn ip_pool_dissociate( + rqctx: RequestContext>, + path_params: Path, + // TODO: should this just be a path param? we have been trying to avoid that + query_params: Query, +) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; + nexus.ip_pool_dissociate_resource(&opctx, &pool_lookup, &query).await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + /// Fetch the IP pool used for Oxide services #[endpoint { method = GET, diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index a0d1dbfb48..62f498694e 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -149,7 +149,7 @@ pub async fn create_ip_pool( let _assoc: views::IpPoolResource = object_create( client, &format!("/v1/system/ip-pools/{pool_name}/associate"), - ¶ms::IpPoolResource { + ¶ms::IpPoolAssociate { resource_id: *FLEET_ID, resource_type: params::IpPoolResourceType::Fleet, is_default: false, diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 11878a175e..4b3deba3cd 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -468,8 +468,8 @@ lazy_static! { }, }; pub static ref DEMO_IP_POOL_ASSOC_URL: String = format!("{}/associate", *DEMO_IP_POOL_URL); - pub static ref DEMO_IP_POOL_ASSOC_BODY: params::IpPoolResource = - params::IpPoolResource { + pub static ref DEMO_IP_POOL_ASSOC_BODY: params::IpPoolAssociate = + params::IpPoolAssociate { resource_id: DEFAULT_SILO.identity().id, resource_type: params::IpPoolResourceType::Silo, is_default: false, diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 73ab694a9d..9197847da0 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -3493,7 +3493,7 @@ async fn test_instance_ephemeral_ip_from_correct_pool( }, ) .await; - let params = params::IpPoolResource { + let params = params::IpPoolAssociate { resource_id: DEFAULT_SILO.id(), resource_type: params::IpPoolResourceType::Silo, is_default: true, diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index bb07a0486b..80c13ca992 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -14,6 +14,7 @@ use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::resource_helpers::create_project; +use nexus_test_utils::resource_helpers::object_create; use nexus_test_utils::resource_helpers::objects_list_page_authz; use nexus_test_utils::resource_helpers::{ create_instance, create_instance_with, @@ -30,6 +31,7 @@ use nexus_types::external_api::shared::Ipv4Range; use nexus_types::external_api::shared::Ipv6Range; use nexus_types::external_api::views::IpPool; use nexus_types::external_api::views::IpPoolRange; +use nexus_types::external_api::views::IpPoolResource; use omicron_common::api::external::IdentityMetadataUpdateParams; // use omicron_common::api::external::NameOrId; use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; @@ -345,13 +347,11 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; // can create a pool with an existing silo by name - // TODO: confirm association post works with existing silo ID - // silo: Some(NameOrId::Name(cptestctx.silo_name.clone())), - // is_default: false, let _created_pool = create_pool(client, "p0").await; // let silo_id = // created_pool.silo_id.expect("Expected pool to have a silo_id"); + // silo: Some(NameOrId::Name()), // now we'll create another IP pool using that silo ID let _created_pool = create_pool(client, "p1").await; @@ -360,7 +360,7 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { // expect 404 on association if the specified silo doesn't exist let nonexistent_silo_id = Uuid::new_v4(); - let params = params::IpPoolResource { + let params = params::IpPoolAssociate { resource_id: nonexistent_silo_id, resource_type: params::IpPoolResourceType::Silo, is_default: false, @@ -386,10 +386,25 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { format!("not found: silo with id \"{nonexistent_silo_id}\"") ); + // associate with silo that exists + // let params = params::IpPoolAssociate { + // resource_id: *FLEET_ID, + // resource_type: params::IpPoolResourceType::Silo, + // is_default: false, + // }; + // let _: IpPoolResource = object_create( + // client, + // &format!("/v1/system/ip-pools/p1/associate"), + // ¶ms, + // ) + // .await; + // TODO: associating a resource that is already associated should be a noop // and return a success message // TODO: trying to set a second default for a resource should fail + + // TODO: dissociate silo from pool } // IP pool list fetch logic includes a join to ip_pool_resource, which is @@ -807,18 +822,16 @@ async fn test_ip_pool_list_usable_by_project( // add to fleet since we can't add to project yet // TODO: could do silo, might as well? need the ID, though. at least // until I make it so you can specify the resource by name - let params = params::IpPoolResource { + let params = params::IpPoolAssociate { resource_id: *FLEET_ID, resource_type: params::IpPoolResourceType::Fleet, is_default: false, }; - let _ = NexusRequest::objects_post( + let _: IpPoolResource = object_create( client, &format!("/v1/system/ip-pools/{mypool_name}/associate"), ¶ms, ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() .await; // Add an IP range to mypool diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 06a5651622..4d4a87b746 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -132,6 +132,7 @@ OPERATION ID METHOD URL PATH ip_pool_associate POST /v1/system/ip-pools/{pool}/associate ip_pool_create POST /v1/system/ip-pools ip_pool_delete DELETE /v1/system/ip-pools/{pool} +ip_pool_dissociate DELETE /v1/system/ip-pools/{pool}/associate ip_pool_list GET /v1/system/ip-pools ip_pool_range_add POST /v1/system/ip-pools/{pool}/ranges/add ip_pool_range_list GET /v1/system/ip-pools/{pool}/ranges diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 4bba13a530..eeb88c8b70 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -749,12 +749,38 @@ pub enum IpPoolResourceType { /// Parameters for associating an IP pool with a resource (fleet, silo) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct IpPoolResource { +pub struct IpPoolAssociate { pub resource_id: Uuid, pub resource_type: IpPoolResourceType, pub is_default: bool, } +// #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +// pub struct IpPoolAssociateSilo { +// pub silo: NameOrId, +// pub is_default: bool, +// } +// +// #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +// pub struct IpPoolAssociateFleet { +// pub is_default: bool, +// } +// +// TODO: IpPoolAssociate as tagged enum +// #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +// #[serde(tag = "resource_type", rename_all = "snake_case")] +// pub enum IpPoolAssociate { +// Fleet(IpPoolAssociateFleet), +// Silo(IpPoolAssociateFleet), +// } + +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct IpPoolDissociate { + pub resource_id: Uuid, + // TODO: not technically necessary, should we include it for completeness? feels user-unfriendly + // pub resource_type: IpPoolResourceType, +} + // INSTANCES /// Describes an attachment of an `InstanceNetworkInterface` to an `Instance`, diff --git a/openapi/nexus.json b/openapi/nexus.json index b7a7e1b422..db78e648b0 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -4599,7 +4599,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/IpPoolResource" + "$ref": "#/components/schemas/IpPoolAssociate" } } }, @@ -4623,6 +4623,44 @@ "$ref": "#/components/responses/Error" } } + }, + "delete": { + "tags": [ + "system/networking" + ], + "summary": "Remove an IP pool's association with a silo or project", + "operationId": "ip_pool_dissociate", + "parameters": [ + { + "in": "path", + "name": "pool", + "description": "Name or ID of the IP pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "resource_id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } } }, "/v1/system/ip-pools/{pool}/ranges": { @@ -11587,6 +11625,27 @@ "time_modified" ] }, + "IpPoolAssociate": { + "description": "Parameters for associating an IP pool with a resource (fleet, silo)", + "type": "object", + "properties": { + "is_default": { + "type": "boolean" + }, + "resource_id": { + "type": "string", + "format": "uuid" + }, + "resource_type": { + "$ref": "#/components/schemas/IpPoolResourceType" + } + }, + "required": [ + "is_default", + "resource_id", + "resource_type" + ] + }, "IpPoolCreate": { "description": "Create-time parameters for an `IpPool`", "type": "object", @@ -11651,25 +11710,7 @@ ] }, "IpPoolResource": { - "description": "Parameters for associating an IP pool with a resource (fleet, silo)", - "type": "object", - "properties": { - "is_default": { - "type": "boolean" - }, - "resource_id": { - "type": "string", - "format": "uuid" - }, - "resource_type": { - "$ref": "#/components/schemas/IpPoolResourceType" - } - }, - "required": [ - "is_default", - "resource_id", - "resource_type" - ] + "type": "object" }, "IpPoolResourceType": { "type": "string", From 0ab0cfc24f3e9bae416602067a04847e21207a2f Mon Sep 17 00:00:00 2001 From: David Crespo Date: Fri, 20 Oct 2023 16:23:25 -0500 Subject: [PATCH 17/67] rename endpoints and params to match convention better --- nexus/src/app/ip_pool.rs | 4 ++-- nexus/src/external_api/http_entrypoints.rs | 17 ++++++++--------- nexus/test-utils/src/resource_helpers.rs | 4 ++-- nexus/tests/integration_tests/endpoints.rs | 6 +++--- nexus/tests/integration_tests/instances.rs | 4 ++-- nexus/tests/integration_tests/ip_pools.rs | 10 +++++----- nexus/tests/output/nexus_tags.txt | 4 ++-- nexus/types/src/external_api/params.rs | 4 ++-- openapi/nexus.json | 10 +++++----- 9 files changed, 31 insertions(+), 32 deletions(-) diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 9a77533dd4..c0954e124c 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -75,7 +75,7 @@ impl super::Nexus { &self, opctx: &OpContext, pool_lookup: &lookup::IpPool<'_>, - ip_pool_resource: ¶ms::IpPoolAssociate, + ip_pool_resource: ¶ms::IpPoolAssociationCreate, ) -> CreateResult { // TODO: check for perms on specified resource? or unnecessary because this is an operator action? let (.., authz_pool) = @@ -113,7 +113,7 @@ impl super::Nexus { &self, opctx: &OpContext, pool_lookup: &lookup::IpPool<'_>, - ip_pool_dissoc: ¶ms::IpPoolDissociate, + ip_pool_dissoc: ¶ms::IpPoolAssociationDelete, ) -> DeleteResult { let (.., authz_pool) = pool_lookup.lookup_for(authz::Action::Modify).await?; diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index e00184fc4b..a5ca0dc33b 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -116,8 +116,8 @@ pub(crate) fn external_api() -> NexusApiDescription { // Operator-Accessible IP Pools API api.register(ip_pool_list)?; api.register(ip_pool_create)?; - api.register(ip_pool_associate)?; - api.register(ip_pool_dissociate)?; + api.register(ip_pool_association_create)?; + api.register(ip_pool_association_delete)?; api.register(ip_pool_view)?; api.register(ip_pool_delete)?; api.register(ip_pool_update)?; @@ -1311,14 +1311,13 @@ async fn ip_pool_update( /// Associate an IP Pool with a silo or project #[endpoint { method = POST, - // TODO: change this to /association - path = "/v1/system/ip-pools/{pool}/associate", + path = "/v1/system/ip-pools/{pool}/association", tags = ["system/networking"], }] -async fn ip_pool_associate( +async fn ip_pool_association_create( rqctx: RequestContext>, path_params: Path, - resource_assoc: TypedBody, + resource_assoc: TypedBody, // TODO: what does this return? Returning the association record seems silly ) -> Result, HttpError> { let apictx = rqctx.context(); @@ -1339,14 +1338,14 @@ async fn ip_pool_associate( /// Remove an IP pool's association with a silo or project #[endpoint { method = DELETE, - path = "/v1/system/ip-pools/{pool}/associate", + path = "/v1/system/ip-pools/{pool}/association", tags = ["system/networking"], }] -async fn ip_pool_dissociate( +async fn ip_pool_association_delete( rqctx: RequestContext>, path_params: Path, // TODO: should this just be a path param? we have been trying to avoid that - query_params: Query, + query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 62f498694e..99019a17c3 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -148,8 +148,8 @@ pub async fn create_ip_pool( // make pool available for use anywhere in fleet let _assoc: views::IpPoolResource = object_create( client, - &format!("/v1/system/ip-pools/{pool_name}/associate"), - ¶ms::IpPoolAssociate { + &format!("/v1/system/ip-pools/{pool_name}/association"), + ¶ms::IpPoolAssociationCreate { resource_id: *FLEET_ID, resource_type: params::IpPoolResourceType::Fleet, is_default: false, diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 4b3deba3cd..517f9e4f81 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -467,9 +467,9 @@ lazy_static! { description: Some(String::from("a new IP pool")), }, }; - pub static ref DEMO_IP_POOL_ASSOC_URL: String = format!("{}/associate", *DEMO_IP_POOL_URL); - pub static ref DEMO_IP_POOL_ASSOC_BODY: params::IpPoolAssociate = - params::IpPoolAssociate { + pub static ref DEMO_IP_POOL_ASSOC_URL: String = format!("{}/association", *DEMO_IP_POOL_URL); + pub static ref DEMO_IP_POOL_ASSOC_BODY: params::IpPoolAssociationCreate = + params::IpPoolAssociationCreate { resource_id: DEFAULT_SILO.identity().id, resource_type: params::IpPoolResourceType::Silo, is_default: false, diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 9197847da0..5f271f95b3 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -3493,12 +3493,12 @@ async fn test_instance_ephemeral_ip_from_correct_pool( }, ) .await; - let params = params::IpPoolAssociate { + let params = params::IpPoolAssociationCreate { resource_id: DEFAULT_SILO.id(), resource_type: params::IpPoolResourceType::Silo, is_default: true, }; - let assoc_url = format!("/v1/system/ip-pools/{pool_name}/associate"); + let assoc_url = format!("/v1/system/ip-pools/{pool_name}/association"); let _ = NexusRequest::objects_post(client, &assoc_url, ¶ms) .authn_as(AuthnMode::PrivilegedUser) .execute() diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 80c13ca992..ce3f6249f1 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -360,7 +360,7 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { // expect 404 on association if the specified silo doesn't exist let nonexistent_silo_id = Uuid::new_v4(); - let params = params::IpPoolAssociate { + let params = params::IpPoolAssociationCreate { resource_id: nonexistent_silo_id, resource_type: params::IpPoolResourceType::Silo, is_default: false, @@ -369,7 +369,7 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { RequestBuilder::new( client, Method::POST, - "/v1/system/ip-pools/p1/associate", + "/v1/system/ip-pools/p1/association", ) .body(Some(¶ms)) .expect_status(Some(StatusCode::NOT_FOUND)), @@ -394,7 +394,7 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { // }; // let _: IpPoolResource = object_create( // client, - // &format!("/v1/system/ip-pools/p1/associate"), + // &format!("/v1/system/ip-pools/p1/association"), // ¶ms, // ) // .await; @@ -822,14 +822,14 @@ async fn test_ip_pool_list_usable_by_project( // add to fleet since we can't add to project yet // TODO: could do silo, might as well? need the ID, though. at least // until I make it so you can specify the resource by name - let params = params::IpPoolAssociate { + let params = params::IpPoolAssociationCreate { resource_id: *FLEET_ID, resource_type: params::IpPoolResourceType::Fleet, is_default: false, }; let _: IpPoolResource = object_create( client, - &format!("/v1/system/ip-pools/{mypool_name}/associate"), + &format!("/v1/system/ip-pools/{mypool_name}/association"), ¶ms, ) .await; diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 4d4a87b746..e63e0bc8dc 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -129,10 +129,10 @@ system_metric GET /v1/system/metrics/{metric_nam API operations found with tag "system/networking" OPERATION ID METHOD URL PATH -ip_pool_associate POST /v1/system/ip-pools/{pool}/associate +ip_pool_association_create POST /v1/system/ip-pools/{pool}/association +ip_pool_association_delete DELETE /v1/system/ip-pools/{pool}/association ip_pool_create POST /v1/system/ip-pools ip_pool_delete DELETE /v1/system/ip-pools/{pool} -ip_pool_dissociate DELETE /v1/system/ip-pools/{pool}/associate ip_pool_list GET /v1/system/ip-pools ip_pool_range_add POST /v1/system/ip-pools/{pool}/ranges/add ip_pool_range_list GET /v1/system/ip-pools/{pool}/ranges diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index eeb88c8b70..bc463e9b70 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -749,7 +749,7 @@ pub enum IpPoolResourceType { /// Parameters for associating an IP pool with a resource (fleet, silo) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct IpPoolAssociate { +pub struct IpPoolAssociationCreate { pub resource_id: Uuid, pub resource_type: IpPoolResourceType, pub is_default: bool, @@ -775,7 +775,7 @@ pub struct IpPoolAssociate { // } #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct IpPoolDissociate { +pub struct IpPoolAssociationDelete { pub resource_id: Uuid, // TODO: not technically necessary, should we include it for completeness? feels user-unfriendly // pub resource_type: IpPoolResourceType, diff --git a/openapi/nexus.json b/openapi/nexus.json index db78e648b0..a152a9bafa 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -4577,13 +4577,13 @@ } } }, - "/v1/system/ip-pools/{pool}/associate": { + "/v1/system/ip-pools/{pool}/association": { "post": { "tags": [ "system/networking" ], "summary": "Associate an IP Pool with a silo or project", - "operationId": "ip_pool_associate", + "operationId": "ip_pool_association_create", "parameters": [ { "in": "path", @@ -4599,7 +4599,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/IpPoolAssociate" + "$ref": "#/components/schemas/IpPoolAssociationCreate" } } }, @@ -4629,7 +4629,7 @@ "system/networking" ], "summary": "Remove an IP pool's association with a silo or project", - "operationId": "ip_pool_dissociate", + "operationId": "ip_pool_association_delete", "parameters": [ { "in": "path", @@ -11625,7 +11625,7 @@ "time_modified" ] }, - "IpPoolAssociate": { + "IpPoolAssociationCreate": { "description": "Parameters for associating an IP pool with a resource (fleet, silo)", "type": "object", "properties": { From 2b4f326f30be0d037a075ad7d9386779218c3b92 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Fri, 20 Oct 2023 16:57:38 -0500 Subject: [PATCH 18/67] change API to allow associating silo by name. it's beautiful --- nexus/db-model/src/ip_pool.rs | 9 ---- nexus/src/app/ip_pool.rs | 40 ++++++++------ nexus/test-utils/src/resource_helpers.rs | 7 +-- nexus/tests/integration_tests/endpoints.rs | 7 ++- nexus/tests/integration_tests/instances.rs | 11 ++-- nexus/tests/integration_tests/ip_pools.rs | 28 +++++----- nexus/types/src/external_api/params.rs | 37 ++++--------- openapi/nexus.json | 61 ++++++++++++++-------- 8 files changed, 96 insertions(+), 104 deletions(-) diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 3428ac7771..78bc21c09c 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -89,15 +89,6 @@ impl_enum_type!( Silo => b"silo" ); -impl From for IpPoolResourceType { - fn from(typ: params::IpPoolResourceType) -> Self { - match typ { - params::IpPoolResourceType::Fleet => IpPoolResourceType::Fleet, - params::IpPoolResourceType::Silo => IpPoolResourceType::Silo, - } - } -} - #[derive(Queryable, Insertable, Selectable, Clone, Debug)] #[diesel(table_name = ip_pool_resource)] pub struct IpPoolResource { diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index c0954e124c..da115cc972 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -10,6 +10,7 @@ use ipnetwork::IpNetwork; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; +use nexus_db_queries::db::fixed_data::FLEET_ID; // use nexus_db_queries::db::fixed_data::silo::INTERNAL_SILO_ID; use nexus_db_queries::db::lookup; use nexus_db_queries::db::lookup::LookupPath; @@ -75,22 +76,30 @@ impl super::Nexus { &self, opctx: &OpContext, pool_lookup: &lookup::IpPool<'_>, - ip_pool_resource: ¶ms::IpPoolAssociationCreate, + assoc_create: ¶ms::IpPoolAssociationCreate, ) -> CreateResult { // TODO: check for perms on specified resource? or unnecessary because this is an operator action? let (.., authz_pool) = pool_lookup.lookup_for(authz::Action::Modify).await?; - match ip_pool_resource.resource_type { - params::IpPoolResourceType::Silo => { - self.silo_lookup( - &opctx, - NameOrId::Id(ip_pool_resource.resource_id), - )? - .lookup_for(authz::Action::Read) - .await?; + let (resource_type, resource_id, is_default) = match assoc_create { + params::IpPoolAssociationCreate::Silo(assoc_silo) => { + let (silo,) = self + .silo_lookup(&opctx, assoc_silo.silo.clone())? + .lookup_for(authz::Action::Read) + .await?; + ( + db::model::IpPoolResourceType::Silo, + silo.id(), + assoc_silo.is_default, + ) } - params::IpPoolResourceType::Fleet => { - // hope we don't need to be assured of the fleet's existence + params::IpPoolAssociationCreate::Fleet(assoc_fleet) => { + // we don't need to be assured of the fleet's existence + ( + db::model::IpPoolResourceType::Fleet, + *FLEET_ID, + assoc_fleet.is_default, + ) } }; self.db_datastore @@ -98,12 +107,9 @@ impl super::Nexus { opctx, db::model::IpPoolResource { ip_pool_id: authz_pool.id(), - resource_type: ip_pool_resource - .resource_type - .clone() - .into(), - resource_id: ip_pool_resource.resource_id, - is_default: ip_pool_resource.is_default, + resource_type, + resource_id, + is_default, }, ) .await diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 99019a17c3..fb001fcf11 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -12,7 +12,6 @@ use dropshot::test_util::ClientTestContext; use dropshot::HttpErrorResponseBody; use dropshot::Method; use http::StatusCode; -use nexus_db_queries::db::fixed_data::FLEET_ID; use nexus_test_interface::NexusServer; use nexus_types::external_api::params; use nexus_types::external_api::params::PhysicalDiskKind; @@ -149,11 +148,9 @@ pub async fn create_ip_pool( let _assoc: views::IpPoolResource = object_create( client, &format!("/v1/system/ip-pools/{pool_name}/association"), - ¶ms::IpPoolAssociationCreate { - resource_id: *FLEET_ID, - resource_type: params::IpPoolResourceType::Fleet, + ¶ms::IpPoolAssociationCreate::Fleet(params::IpPoolAssociateFleet { is_default: false, - }, + }), ) .await; diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 517f9e4f81..48b2140db4 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -469,11 +469,10 @@ lazy_static! { }; pub static ref DEMO_IP_POOL_ASSOC_URL: String = format!("{}/association", *DEMO_IP_POOL_URL); pub static ref DEMO_IP_POOL_ASSOC_BODY: params::IpPoolAssociationCreate = - params::IpPoolAssociationCreate { - resource_id: DEFAULT_SILO.identity().id, - resource_type: params::IpPoolResourceType::Silo, + params::IpPoolAssociationCreate::Silo(params::IpPoolAssociateSilo { + silo: NameOrId::Id(DEFAULT_SILO.identity().id), is_default: false, - }; + }); pub static ref DEMO_IP_POOL_RANGE: IpRange = IpRange::V4(Ipv4Range::new( std::net::Ipv4Addr::new(10, 0, 0, 0), std::net::Ipv4Addr::new(10, 0, 0, 255), diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 5f271f95b3..eaf90fc159 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -44,6 +44,7 @@ use omicron_common::api::external::InstanceNetworkInterface; use omicron_common::api::external::InstanceState; use omicron_common::api::external::Ipv4Net; use omicron_common::api::external::Name; +use omicron_common::api::external::NameOrId; use omicron_common::api::external::Vni; use omicron_nexus::app::MAX_MEMORY_BYTES_PER_INSTANCE; use omicron_nexus::app::MAX_VCPU_PER_INSTANCE; @@ -3493,11 +3494,11 @@ async fn test_instance_ephemeral_ip_from_correct_pool( }, ) .await; - let params = params::IpPoolAssociationCreate { - resource_id: DEFAULT_SILO.id(), - resource_type: params::IpPoolResourceType::Silo, - is_default: true, - }; + let params = + params::IpPoolAssociationCreate::Silo(params::IpPoolAssociateSilo { + silo: NameOrId::Id(DEFAULT_SILO.id()), + is_default: true, + }); let assoc_url = format!("/v1/system/ip-pools/{pool_name}/association"); let _ = NexusRequest::objects_post(client, &assoc_url, ¶ms) .authn_as(AuthnMode::PrivilegedUser) diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index ce3f6249f1..ee7cb874a4 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -9,7 +9,6 @@ use dropshot::HttpErrorResponseBody; use http::method::Method; use http::StatusCode; use nexus_db_queries::db::datastore::SERVICE_IP_POOL_NAME; -use nexus_db_queries::db::fixed_data::FLEET_ID; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; @@ -33,7 +32,7 @@ use nexus_types::external_api::views::IpPool; use nexus_types::external_api::views::IpPoolRange; use nexus_types::external_api::views::IpPoolResource; use omicron_common::api::external::IdentityMetadataUpdateParams; -// use omicron_common::api::external::NameOrId; +use omicron_common::api::external::NameOrId; use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; use omicron_nexus::TestInterfaces; use sled_agent_client::TestInterfaces as SledTestInterfaces; @@ -360,11 +359,11 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { // expect 404 on association if the specified silo doesn't exist let nonexistent_silo_id = Uuid::new_v4(); - let params = params::IpPoolAssociationCreate { - resource_id: nonexistent_silo_id, - resource_type: params::IpPoolResourceType::Silo, - is_default: false, - }; + let params = + params::IpPoolAssociationCreate::Silo(params::IpPoolAssociateSilo { + silo: NameOrId::Id(nonexistent_silo_id), + is_default: false, + }); let error = NexusRequest::new( RequestBuilder::new( client, @@ -387,11 +386,9 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { ); // associate with silo that exists - // let params = params::IpPoolAssociate { - // resource_id: *FLEET_ID, - // resource_type: params::IpPoolResourceType::Silo, + // let params = params::IpPoolAssociationCreate::Fleet(params::IpPoolAssociateFleet { // is_default: false, - // }; + // }); // let _: IpPoolResource = object_create( // client, // &format!("/v1/system/ip-pools/p1/association"), @@ -822,11 +819,10 @@ async fn test_ip_pool_list_usable_by_project( // add to fleet since we can't add to project yet // TODO: could do silo, might as well? need the ID, though. at least // until I make it so you can specify the resource by name - let params = params::IpPoolAssociationCreate { - resource_id: *FLEET_ID, - resource_type: params::IpPoolResourceType::Fleet, - is_default: false, - }; + let params = + params::IpPoolAssociationCreate::Fleet(params::IpPoolAssociateFleet { + is_default: false, + }); let _: IpPoolResource = object_create( client, &format!("/v1/system/ip-pools/{mypool_name}/association"), diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index bc463e9b70..1593b62c74 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -741,38 +741,23 @@ pub struct IpPoolUpdate { } #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -#[serde(rename_all = "snake_case")] -pub enum IpPoolResourceType { - Fleet, - Silo, +pub struct IpPoolAssociateSilo { + pub silo: NameOrId, + pub is_default: bool, } -/// Parameters for associating an IP pool with a resource (fleet, silo) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct IpPoolAssociationCreate { - pub resource_id: Uuid, - pub resource_type: IpPoolResourceType, +pub struct IpPoolAssociateFleet { pub is_default: bool, } -// #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -// pub struct IpPoolAssociateSilo { -// pub silo: NameOrId, -// pub is_default: bool, -// } -// -// #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -// pub struct IpPoolAssociateFleet { -// pub is_default: bool, -// } -// -// TODO: IpPoolAssociate as tagged enum -// #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -// #[serde(tag = "resource_type", rename_all = "snake_case")] -// pub enum IpPoolAssociate { -// Fleet(IpPoolAssociateFleet), -// Silo(IpPoolAssociateFleet), -// } +/// Parameters for associating an IP pool with a resource (fleet, silo) +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +#[serde(tag = "resource_type", rename_all = "snake_case")] +pub enum IpPoolAssociationCreate { + Silo(IpPoolAssociateSilo), + Fleet(IpPoolAssociateFleet), +} #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct IpPoolAssociationDelete { diff --git a/openapi/nexus.json b/openapi/nexus.json index a152a9bafa..9c4739cb6c 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -11627,23 +11627,47 @@ }, "IpPoolAssociationCreate": { "description": "Parameters for associating an IP pool with a resource (fleet, silo)", - "type": "object", - "properties": { - "is_default": { - "type": "boolean" - }, - "resource_id": { - "type": "string", - "format": "uuid" + "oneOf": [ + { + "type": "object", + "properties": { + "is_default": { + "type": "boolean" + }, + "resource_type": { + "type": "string", + "enum": [ + "silo" + ] + }, + "silo": { + "$ref": "#/components/schemas/NameOrId" + } + }, + "required": [ + "is_default", + "resource_type", + "silo" + ] }, - "resource_type": { - "$ref": "#/components/schemas/IpPoolResourceType" + { + "type": "object", + "properties": { + "is_default": { + "type": "boolean" + }, + "resource_type": { + "type": "string", + "enum": [ + "fleet" + ] + } + }, + "required": [ + "is_default", + "resource_type" + ] } - }, - "required": [ - "is_default", - "resource_id", - "resource_type" ] }, "IpPoolCreate": { @@ -11712,13 +11736,6 @@ "IpPoolResource": { "type": "object" }, - "IpPoolResourceType": { - "type": "string", - "enum": [ - "fleet", - "silo" - ] - }, "IpPoolResultsPage": { "description": "A single page of results", "type": "object", From e2b7600cef9753a2822f084b8845bad5a36d1654 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Mon, 23 Oct 2023 16:02:19 -0500 Subject: [PATCH 19/67] move migration up a number before merging main --- dev-tools/omdb/tests/env.out | 6 +++--- dev-tools/omdb/tests/successes.out | 12 ++++++------ nexus/db-model/src/schema.rs | 2 +- schema/crdb/{8.0.0 => 9.0.0}/up1.sql | 0 schema/crdb/{8.0.0 => 9.0.0}/up2.sql | 0 schema/crdb/{8.0.0 => 9.0.0}/up3.sql | 0 schema/crdb/{8.0.0 => 9.0.0}/up4.sql | 0 schema/crdb/{8.0.0 => 9.0.0}/up5.sql | 0 schema/crdb/{8.0.1 => 9.0.1}/up1.sql | 0 schema/crdb/{8.0.1 => 9.0.1}/up2.sql | 0 schema/crdb/dbinit.sql | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) rename schema/crdb/{8.0.0 => 9.0.0}/up1.sql (100%) rename schema/crdb/{8.0.0 => 9.0.0}/up2.sql (100%) rename schema/crdb/{8.0.0 => 9.0.0}/up3.sql (100%) rename schema/crdb/{8.0.0 => 9.0.0}/up4.sql (100%) rename schema/crdb/{8.0.0 => 9.0.0}/up5.sql (100%) rename schema/crdb/{8.0.1 => 9.0.1}/up1.sql (100%) rename schema/crdb/{8.0.1 => 9.0.1}/up2.sql (100%) diff --git a/dev-tools/omdb/tests/env.out b/dev-tools/omdb/tests/env.out index 468432ec58..174d0aa441 100644 --- a/dev-tools/omdb/tests/env.out +++ b/dev-tools/omdb/tests/env.out @@ -7,7 +7,7 @@ sim-b6d65341 [::1]:REDACTED_PORT - REDACTED_UUID_REDACTED_UUID_REDACTED --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (8.0.1) +note: database schema version matches expected (9.0.1) ============================================= EXECUTING COMMAND: omdb ["db", "--db-url", "junk", "sleds"] termination: Exited(2) @@ -172,7 +172,7 @@ stderr: note: database URL not specified. Will search DNS. note: (override with --db-url or OMDB_DB_URL) note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (8.0.1) +note: database schema version matches expected (9.0.1) ============================================= EXECUTING COMMAND: omdb ["--dns-server", "[::1]:REDACTED_PORT", "db", "sleds"] termination: Exited(0) @@ -185,5 +185,5 @@ stderr: note: database URL not specified. Will search DNS. note: (override with --db-url or OMDB_DB_URL) note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (8.0.1) +note: database schema version matches expected (9.0.1) ============================================= diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 7ba47e7ddb..4780d5f045 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -8,7 +8,7 @@ external oxide-dev.test 2 create silo: "tes --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (8.0.1) +note: database schema version matches expected (9.0.1) ============================================= EXECUTING COMMAND: omdb ["db", "dns", "diff", "external", "2"] termination: Exited(0) @@ -24,7 +24,7 @@ changes: names added: 1, names removed: 0 --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (8.0.1) +note: database schema version matches expected (9.0.1) ============================================= EXECUTING COMMAND: omdb ["db", "dns", "names", "external", "2"] termination: Exited(0) @@ -36,7 +36,7 @@ External zone: oxide-dev.test --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (8.0.1) +note: database schema version matches expected (9.0.1) ============================================= EXECUTING COMMAND: omdb ["db", "services", "list-instances"] termination: Exited(0) @@ -52,7 +52,7 @@ Nexus REDACTED_UUID_REDACTED_UUID_REDACTED [::ffff:127.0.0.1]:REDACTED_ --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (8.0.1) +note: database schema version matches expected (9.0.1) ============================================= EXECUTING COMMAND: omdb ["db", "services", "list-by-sled"] termination: Exited(0) @@ -71,7 +71,7 @@ sled: sim-b6d65341 (id REDACTED_UUID_REDACTED_UUID_REDACTED) --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (8.0.1) +note: database schema version matches expected (9.0.1) ============================================= EXECUTING COMMAND: omdb ["db", "sleds"] termination: Exited(0) @@ -82,7 +82,7 @@ sim-b6d65341 [::1]:REDACTED_PORT - REDACTED_UUID_REDACTED_UUID_REDACTED --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (8.0.1) +note: database schema version matches expected (9.0.1) ============================================= EXECUTING COMMAND: omdb ["mgs", "inventory"] termination: Exited(0) diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 91aba441ef..3b26740521 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1149,7 +1149,7 @@ table! { /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(8, 0, 1); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(9, 0, 1); allow_tables_to_appear_in_same_query!( system_update, diff --git a/schema/crdb/8.0.0/up1.sql b/schema/crdb/9.0.0/up1.sql similarity index 100% rename from schema/crdb/8.0.0/up1.sql rename to schema/crdb/9.0.0/up1.sql diff --git a/schema/crdb/8.0.0/up2.sql b/schema/crdb/9.0.0/up2.sql similarity index 100% rename from schema/crdb/8.0.0/up2.sql rename to schema/crdb/9.0.0/up2.sql diff --git a/schema/crdb/8.0.0/up3.sql b/schema/crdb/9.0.0/up3.sql similarity index 100% rename from schema/crdb/8.0.0/up3.sql rename to schema/crdb/9.0.0/up3.sql diff --git a/schema/crdb/8.0.0/up4.sql b/schema/crdb/9.0.0/up4.sql similarity index 100% rename from schema/crdb/8.0.0/up4.sql rename to schema/crdb/9.0.0/up4.sql diff --git a/schema/crdb/8.0.0/up5.sql b/schema/crdb/9.0.0/up5.sql similarity index 100% rename from schema/crdb/8.0.0/up5.sql rename to schema/crdb/9.0.0/up5.sql diff --git a/schema/crdb/8.0.1/up1.sql b/schema/crdb/9.0.1/up1.sql similarity index 100% rename from schema/crdb/8.0.1/up1.sql rename to schema/crdb/9.0.1/up1.sql diff --git a/schema/crdb/8.0.1/up2.sql b/schema/crdb/9.0.1/up2.sql similarity index 100% rename from schema/crdb/8.0.1/up2.sql rename to schema/crdb/9.0.1/up2.sql diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 8bb35f4e13..8ceac06582 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -2589,7 +2589,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '8.0.1', NULL) + ( TRUE, NOW(), NOW(), '9.0.1', NULL) ON CONFLICT DO NOTHING; COMMIT; From 80a4d22c9f7f82dce3cfa1d3d3e5dbf3d76d3fb4 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 25 Oct 2023 15:06:01 -0500 Subject: [PATCH 20/67] get silo ID so we can test association by name or id --- nexus/tests/integration_tests/ip_pools.rs | 59 +++++++++++++++-------- 1 file changed, 39 insertions(+), 20 deletions(-) diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index ee7cb874a4..9e9932157a 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -31,6 +31,7 @@ use nexus_types::external_api::shared::Ipv6Range; use nexus_types::external_api::views::IpPool; use nexus_types::external_api::views::IpPoolRange; use nexus_types::external_api::views::IpPoolResource; +use nexus_types::external_api::views::Silo; use omicron_common::api::external::IdentityMetadataUpdateParams; use omicron_common::api::external::NameOrId; use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; @@ -345,17 +346,8 @@ async fn test_ip_pool_service_no_cud(cptestctx: &ControlPlaneTestContext) { async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - // can create a pool with an existing silo by name let _created_pool = create_pool(client, "p0").await; - - // let silo_id = - // created_pool.silo_id.expect("Expected pool to have a silo_id"); - // silo: Some(NameOrId::Name()), - - // now we'll create another IP pool using that silo ID let _created_pool = create_pool(client, "p1").await; - // assert_eq!(created_pool.silo_id.unwrap(), silo_id); - // TODO: confirm a second pool can be assocatied with the same silo ID // expect 404 on association if the specified silo doesn't exist let nonexistent_silo_id = Uuid::new_v4(); @@ -364,6 +356,7 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { silo: NameOrId::Id(nonexistent_silo_id), is_default: false, }); + let error = NexusRequest::new( RequestBuilder::new( client, @@ -386,22 +379,48 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { ); // associate with silo that exists - // let params = params::IpPoolAssociationCreate::Fleet(params::IpPoolAssociateFleet { - // is_default: false, - // }); - // let _: IpPoolResource = object_create( - // client, - // &format!("/v1/system/ip-pools/p1/association"), - // ¶ms, - // ) - // .await; + let params = + params::IpPoolAssociationCreate::Silo(params::IpPoolAssociateSilo { + // TODO: this is probably not the best silo ID to use + silo: NameOrId::Name(cptestctx.silo_name.clone()), + is_default: false, + }); + let _: IpPoolResource = object_create( + client, + &format!("/v1/system/ip-pools/p1/association"), + ¶ms, + ) + .await; + + // TODO: test assocation worked, or at least comes back in association list + + // get silo ID so we can test assocation by ID as well + let silo_url = format!("/v1/system/silos/{}", cptestctx.silo_name); + let silo = NexusRequest::object_get(client, &silo_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await; + + // TODO: dissociate silo + // TODO: confirm dissociation + + // associate same silo by ID + let params = + params::IpPoolAssociationCreate::Silo(params::IpPoolAssociateSilo { + silo: NameOrId::Id(silo.identity.id), + is_default: false, + }); + let _: IpPoolResource = object_create( + client, + &format!("/v1/system/ip-pools/p1/association"), + ¶ms, + ) + .await; // TODO: associating a resource that is already associated should be a noop // and return a success message // TODO: trying to set a second default for a resource should fail - - // TODO: dissociate silo from pool } // IP pool list fetch logic includes a join to ip_pool_resource, which is From 9c101ace1da37167202f1eb2cb8227ac506d49e4 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 25 Oct 2023 15:25:24 -0500 Subject: [PATCH 21/67] pluralize /associations for restiness, prep for list endpoint --- nexus/src/external_api/http_entrypoints.rs | 4 ++-- nexus/test-utils/src/resource_helpers.rs | 2 +- nexus/tests/integration_tests/endpoints.rs | 2 +- nexus/tests/integration_tests/instances.rs | 2 +- nexus/tests/integration_tests/ip_pools.rs | 8 ++++---- nexus/tests/output/nexus_tags.txt | 4 ++-- openapi/nexus.json | 2 +- 7 files changed, 12 insertions(+), 12 deletions(-) diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 71f106e1bd..03ad0fbbdc 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -1325,7 +1325,7 @@ async fn ip_pool_update( /// Associate an IP Pool with a silo or project #[endpoint { method = POST, - path = "/v1/system/ip-pools/{pool}/association", + path = "/v1/system/ip-pools/{pool}/associations", tags = ["system/networking"], }] async fn ip_pool_association_create( @@ -1352,7 +1352,7 @@ async fn ip_pool_association_create( /// Remove an IP pool's association with a silo or project #[endpoint { method = DELETE, - path = "/v1/system/ip-pools/{pool}/association", + path = "/v1/system/ip-pools/{pool}/associations", tags = ["system/networking"], }] async fn ip_pool_association_delete( diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index fb001fcf11..b575e1b378 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -147,7 +147,7 @@ pub async fn create_ip_pool( // make pool available for use anywhere in fleet let _assoc: views::IpPoolResource = object_create( client, - &format!("/v1/system/ip-pools/{pool_name}/association"), + &format!("/v1/system/ip-pools/{pool_name}/associations"), ¶ms::IpPoolAssociationCreate::Fleet(params::IpPoolAssociateFleet { is_default: false, }), diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 200f40fa5b..0ad87236d7 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -501,7 +501,7 @@ lazy_static! { description: Some(String::from("a new IP pool")), }, }; - pub static ref DEMO_IP_POOL_ASSOC_URL: String = format!("{}/association", *DEMO_IP_POOL_URL); + pub static ref DEMO_IP_POOL_ASSOC_URL: String = format!("{}/associations", *DEMO_IP_POOL_URL); pub static ref DEMO_IP_POOL_ASSOC_BODY: params::IpPoolAssociationCreate = params::IpPoolAssociationCreate::Silo(params::IpPoolAssociateSilo { silo: NameOrId::Id(DEFAULT_SILO.identity().id), diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 7a46618a13..a6aee0b87e 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -3618,7 +3618,7 @@ async fn test_instance_ephemeral_ip_from_correct_pool( silo: NameOrId::Id(DEFAULT_SILO.id()), is_default: true, }); - let assoc_url = format!("/v1/system/ip-pools/{pool_name}/association"); + let assoc_url = format!("/v1/system/ip-pools/{pool_name}/associations"); let _ = NexusRequest::objects_post(client, &assoc_url, ¶ms) .authn_as(AuthnMode::PrivilegedUser) .execute() diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 9e9932157a..62ce4d2dde 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -361,7 +361,7 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { RequestBuilder::new( client, Method::POST, - "/v1/system/ip-pools/p1/association", + "/v1/system/ip-pools/p1/associations", ) .body(Some(¶ms)) .expect_status(Some(StatusCode::NOT_FOUND)), @@ -387,7 +387,7 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { }); let _: IpPoolResource = object_create( client, - &format!("/v1/system/ip-pools/p1/association"), + &format!("/v1/system/ip-pools/p1/associations"), ¶ms, ) .await; @@ -412,7 +412,7 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { }); let _: IpPoolResource = object_create( client, - &format!("/v1/system/ip-pools/p1/association"), + &format!("/v1/system/ip-pools/p1/associations"), ¶ms, ) .await; @@ -844,7 +844,7 @@ async fn test_ip_pool_list_usable_by_project( }); let _: IpPoolResource = object_create( client, - &format!("/v1/system/ip-pools/{mypool_name}/association"), + &format!("/v1/system/ip-pools/{mypool_name}/associations"), ¶ms, ) .await; diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index f3082cc05b..2039c33812 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -129,8 +129,8 @@ system_metric GET /v1/system/metrics/{metric_nam API operations found with tag "system/networking" OPERATION ID METHOD URL PATH -ip_pool_association_create POST /v1/system/ip-pools/{pool}/association -ip_pool_association_delete DELETE /v1/system/ip-pools/{pool}/association +ip_pool_association_create POST /v1/system/ip-pools/{pool}/associations +ip_pool_association_delete DELETE /v1/system/ip-pools/{pool}/associations ip_pool_create POST /v1/system/ip-pools ip_pool_delete DELETE /v1/system/ip-pools/{pool} ip_pool_list GET /v1/system/ip-pools diff --git a/openapi/nexus.json b/openapi/nexus.json index 56e7281b5f..dc0a406496 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -4577,7 +4577,7 @@ } } }, - "/v1/system/ip-pools/{pool}/association": { + "/v1/system/ip-pools/{pool}/associations": { "post": { "tags": [ "system/networking" From 32b2f757896662420c3e57892648467ed4fae6f3 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 25 Oct 2023 16:34:29 -0500 Subject: [PATCH 22/67] endpoint to list associations for IP pool --- nexus/db-model/src/ip_pool.rs | 19 +++ nexus/db-queries/src/db/datastore/ip_pool.rs | 27 +++++ nexus/src/app/ip_pool.rs | 16 ++- nexus/src/external_api/http_entrypoints.rs | 49 +++++++- nexus/tests/integration_tests/ip_pools.rs | 52 ++++++-- nexus/tests/output/nexus_tags.txt | 1 + nexus/types/src/external_api/views.rs | 19 ++- openapi/nexus.json | 120 ++++++++++++++++++- schema/crdb/9.0.0/up4.sql | 2 +- schema/crdb/9.0.0/up5.sql | 2 +- 10 files changed, 286 insertions(+), 21 deletions(-) diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 78bc21c09c..b6f496c99e 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -97,6 +97,25 @@ pub struct IpPoolResource { pub resource_id: Uuid, pub is_default: bool, } +impl From for views::IpPoolResourceType { + fn from(typ: IpPoolResourceType) -> Self { + match typ { + IpPoolResourceType::Fleet => Self::Fleet, + IpPoolResourceType::Silo => Self::Silo, + } + } +} + +impl From for views::IpPoolResource { + fn from(assoc: IpPoolResource) -> Self { + Self { + ip_pool_id: assoc.ip_pool_id, + resource_type: assoc.resource_type.into(), + resource_id: assoc.resource_id, + is_default: assoc.is_default, + } + } +} /// A range of IP addresses for an IP Pool. #[derive(Queryable, Insertable, Selectable, Clone, Debug)] diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index aee5a53d5a..c3df60c27a 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -312,6 +312,31 @@ impl DataStore { }) } + pub async fn ip_pool_association_list( + &self, + opctx: &OpContext, + authz_pool: &authz::IpPool, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + use db::schema::ip_pool; + use db::schema::ip_pool_resource; + + paginated( + ip_pool_resource::table, + ip_pool_resource::ip_pool_id, + pagparams, + ) + .inner_join(ip_pool::table) + .filter(ip_pool::id.eq(authz_pool.id())) + .filter(ip_pool::time_deleted.is_null()) + .select(IpPoolResource::as_select()) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + pub async fn ip_pool_associate_resource( &self, opctx: &OpContext, @@ -559,6 +584,8 @@ mod test { use omicron_common::api::external::{Error, IdentityMetadataCreateParams}; use omicron_test_utils::dev; + // TODO: add calls to the list endpoint throughout all this + #[tokio::test] async fn test_default_ip_pools() { let logctx = dev::test_setup_log("test_default_ip_pools"); diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index da115cc972..95b1e3adbb 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -11,7 +11,6 @@ use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; use nexus_db_queries::db::fixed_data::FLEET_ID; -// use nexus_db_queries::db::fixed_data::silo::INTERNAL_SILO_ID; use nexus_db_queries::db::lookup; use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::db::model::Name; @@ -26,6 +25,7 @@ use omicron_common::api::external::NameOrId; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use ref_cast::RefCast; +use uuid::Uuid; /// Helper to make it easier to 404 on attempts to manipulate internal pools fn not_found_from_lookup(pool_lookup: &lookup::IpPool<'_>) -> Error { @@ -72,6 +72,20 @@ impl super::Nexus { self.db_datastore.ip_pool_create(opctx, pool).await } + pub(crate) async fn ip_pool_association_list( + &self, + opctx: &OpContext, + pool_lookup: &lookup::IpPool<'_>, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + // TODO: is this the right action to check? + let (.., authz_pool) = + pool_lookup.lookup_for(authz::Action::ListChildren).await?; + self.db_datastore + .ip_pool_association_list(opctx, &authz_pool, pagparams) + .await + } + pub(crate) async fn ip_pool_associate_resource( &self, opctx: &OpContext, diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 03ad0fbbdc..1b5bfaf25d 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -121,6 +121,7 @@ pub(crate) fn external_api() -> NexusApiDescription { // Operator-Accessible IP Pools API api.register(ip_pool_list)?; api.register(ip_pool_create)?; + api.register(ip_pool_association_list)?; api.register(ip_pool_association_create)?; api.register(ip_pool_association_delete)?; api.register(ip_pool_view)?; @@ -1322,7 +1323,48 @@ async fn ip_pool_update( // across the board. What I really mean is "make available to" or "make availale // for use in" -/// Associate an IP Pool with a silo or project +/// List IP pool resource associations +#[endpoint { + method = GET, + path = "/v1/system/ip-pools/{pool}/associations", + tags = ["system/networking"], +}] +async fn ip_pool_association_list( + rqctx: RequestContext>, + path_params: Path, + // paginating by resource_id because they're unique per pool. most robust + // option would be to paginate by a composite key representing the (pool, + // resource_type, resource) + query_params: Query, +) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.nexus; + + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + + let path = path_params.into_inner(); + let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; + + let assocs = nexus + .ip_pool_association_list(&opctx, &pool_lookup, &pag_params) + .await? + .into_iter() + .map(|assoc| assoc.into()) + .collect(); + + Ok(HttpResponseOk(ScanById::results_page( + &query, + assocs, + &|_, x: &views::IpPoolResource| x.resource_id, + )?)) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Associate an IP Pool with a silo or the fleet #[endpoint { method = POST, path = "/v1/system/ip-pools/{pool}/associations", @@ -1332,7 +1374,6 @@ async fn ip_pool_association_create( rqctx: RequestContext>, path_params: Path, resource_assoc: TypedBody, - // TODO: what does this return? Returning the association record seems silly ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { @@ -1341,10 +1382,10 @@ async fn ip_pool_association_create( let path = path_params.into_inner(); let resource_assoc = resource_assoc.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; - nexus + let assoc = nexus .ip_pool_associate_resource(&opctx, &pool_lookup, &resource_assoc) .await?; - Ok(HttpResponseCreated(views::IpPoolResource {})) + Ok(HttpResponseCreated(assoc.into())) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 62ce4d2dde..bb62749689 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -6,6 +6,7 @@ use dropshot::test_util::ClientTestContext; use dropshot::HttpErrorResponseBody; +use dropshot::ResultsPage; use http::method::Method; use http::StatusCode; use nexus_db_queries::db::datastore::SERVICE_IP_POOL_NAME; @@ -31,6 +32,7 @@ use nexus_types::external_api::shared::Ipv6Range; use nexus_types::external_api::views::IpPool; use nexus_types::external_api::views::IpPoolRange; use nexus_types::external_api::views::IpPoolResource; +use nexus_types::external_api::views::IpPoolResourceType; use nexus_types::external_api::views::Silo; use omicron_common::api::external::IdentityMetadataUpdateParams; use omicron_common::api::external::NameOrId; @@ -346,8 +348,12 @@ async fn test_ip_pool_service_no_cud(cptestctx: &ControlPlaneTestContext) { async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let _created_pool = create_pool(client, "p0").await; - let _created_pool = create_pool(client, "p1").await; + let p0 = create_pool(client, "p0").await; + let p1 = create_pool(client, "p1").await; + + // there should be no associations + let assocs_p0 = get_associations(client, "p0").await; + assert_eq!(assocs_p0.items.len(), 0); // expect 404 on association if the specified silo doesn't exist let nonexistent_silo_id = Uuid::new_v4(); @@ -361,7 +367,7 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { RequestBuilder::new( client, Method::POST, - "/v1/system/ip-pools/p1/associations", + "/v1/system/ip-pools/p0/associations", ) .body(Some(¶ms)) .expect_status(Some(StatusCode::NOT_FOUND)), @@ -378,7 +384,7 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { format!("not found: silo with id \"{nonexistent_silo_id}\"") ); - // associate with silo that exists + // associate by name with silo that exists let params = params::IpPoolAssociationCreate::Silo(params::IpPoolAssociateSilo { // TODO: this is probably not the best silo ID to use @@ -387,24 +393,33 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { }); let _: IpPoolResource = object_create( client, - &format!("/v1/system/ip-pools/p1/associations"), + &format!("/v1/system/ip-pools/p0/associations"), ¶ms, ) .await; - // TODO: test assocation worked, or at least comes back in association list - - // get silo ID so we can test assocation by ID as well + // get silo ID so we can test association by ID as well let silo_url = format!("/v1/system/silos/{}", cptestctx.silo_name); let silo = NexusRequest::object_get(client, &silo_url) .authn_as(AuthnMode::PrivilegedUser) .execute_and_parse_unwrap::() .await; + let silo_id = silo.identity.id; + + let assocs_p0 = get_associations(client, "p0").await; + let silo_assoc = IpPoolResource { + ip_pool_id: p0.identity.id, + resource_type: IpPoolResourceType::Silo, + resource_id: silo_id, + is_default: false, + }; + assert_eq!(assocs_p0.items.len(), 1); + assert_eq!(assocs_p0.items[0], silo_assoc); // TODO: dissociate silo // TODO: confirm dissociation - // associate same silo by ID + // associate same silo to other pool by ID let params = params::IpPoolAssociationCreate::Silo(params::IpPoolAssociateSilo { silo: NameOrId::Id(silo.identity.id), @@ -417,6 +432,14 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { ) .await; + // association should look the same as the other one, except different pool ID + let assocs_p1 = get_associations(client, "p1").await; + assert_eq!(assocs_p1.items.len(), 1); + assert_eq!( + assocs_p1.items[0], + IpPoolResource { ip_pool_id: p1.identity.id, ..silo_assoc } + ); + // TODO: associating a resource that is already associated should be a noop // and return a success message @@ -466,6 +489,17 @@ fn get_names(pools: Vec) -> Vec { pools.iter().map(|p| p.identity.name.to_string()).collect() } +async fn get_associations( + client: &ClientTestContext, + id: &str, +) -> ResultsPage { + objects_list_page_authz::( + client, + &format!("/v1/system/ip-pools/{}/associations", id), + ) + .await +} + async fn create_pool(client: &ClientTestContext, name: &str) -> IpPool { let params = IpPoolCreate { identity: IdentityMetadataCreateParams { diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 2039c33812..df2fdeed19 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -131,6 +131,7 @@ API operations found with tag "system/networking" OPERATION ID METHOD URL PATH ip_pool_association_create POST /v1/system/ip-pools/{pool}/associations ip_pool_association_delete DELETE /v1/system/ip-pools/{pool}/associations +ip_pool_association_list GET /v1/system/ip-pools/{pool}/associations ip_pool_create POST /v1/system/ip-pools ip_pool_delete DELETE /v1/system/ip-pools/{pool} ip_pool_list GET /v1/system/ip-pools diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index c2cba6f3e0..b6dcad8553 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -246,9 +246,22 @@ pub struct IpPool { pub identity: IdentityMetadata, } -// TODO: placeholder response for IP pool associate POST -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct IpPoolResource {} +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum IpPoolResourceType { + Fleet, + Silo, +} + +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +pub struct IpPoolResource { + // TODO: is including the pool ID redundant? it's convenient to have and + // makes this response a cohesive whole + pub ip_pool_id: Uuid, + pub resource_type: IpPoolResourceType, + pub resource_id: Uuid, + pub is_default: bool, +} #[derive(Clone, Copy, Debug, Deserialize, Serialize, JsonSchema)] pub struct IpPoolRange { diff --git a/openapi/nexus.json b/openapi/nexus.json index dc0a406496..eadc31367c 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -4578,11 +4578,77 @@ } }, "/v1/system/ip-pools/{pool}/associations": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "List IP pool resource associations", + "operationId": "ip_pool_association_list", + "parameters": [ + { + "in": "path", + "name": "pool", + "description": "Name or ID of the IP pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolResourceResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, "post": { "tags": [ "system/networking" ], - "summary": "Associate an IP Pool with a silo or project", + "summary": "Associate an IP Pool with a silo or the fleet", "operationId": "ip_pool_association_create", "parameters": [ { @@ -12455,7 +12521,57 @@ ] }, "IpPoolResource": { - "type": "object" + "type": "object", + "properties": { + "ip_pool_id": { + "type": "string", + "format": "uuid" + }, + "is_default": { + "type": "boolean" + }, + "resource_id": { + "type": "string", + "format": "uuid" + }, + "resource_type": { + "$ref": "#/components/schemas/IpPoolResourceType" + } + }, + "required": [ + "ip_pool_id", + "is_default", + "resource_id", + "resource_type" + ] + }, + "IpPoolResourceResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/IpPoolResource" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "IpPoolResourceType": { + "type": "string", + "enum": [ + "fleet", + "silo" + ] }, "IpPoolResultsPage": { "description": "A single page of results", diff --git a/schema/crdb/9.0.0/up4.sql b/schema/crdb/9.0.0/up4.sql index edcad062a8..52ce081453 100644 --- a/schema/crdb/9.0.0/up4.sql +++ b/schema/crdb/9.0.0/up4.sql @@ -1,4 +1,4 @@ --- copy existing fleet assocations into association table. treat all existing +-- copy existing fleet associations into association table. treat all existing -- pools as fleet-associated because that is the current behavior INSERT INTO ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) SELECT id, 'fleet', '001de000-1334-4000-8000-000000000000', is_default diff --git a/schema/crdb/9.0.0/up5.sql b/schema/crdb/9.0.0/up5.sql index 92f2201b86..656b45027b 100644 --- a/schema/crdb/9.0.0/up5.sql +++ b/schema/crdb/9.0.0/up5.sql @@ -1,4 +1,4 @@ --- copy existing ip_pool-to-silo assocations into association table +-- copy existing ip_pool-to-silo associations into association table INSERT INTO ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) SELECT id, 'silo', silo_id, is_default FROM ip_pool From 9519ed1f3374890f56fb4f2ab112197f774a70d1 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Mon, 6 Nov 2023 17:16:50 -0600 Subject: [PATCH 23/67] don't allow creating IP with not-associated pool, orphan pool test --- .../src/db/datastore/external_ip.rs | 28 +++---- nexus/db-queries/src/db/datastore/ip_pool.rs | 32 ++++++++ nexus/tests/integration_tests/instances.rs | 81 +++++++++++++++++++ nexus/tests/integration_tests/ip_pools.rs | 18 ++--- 4 files changed, 129 insertions(+), 30 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index a205d4d1cf..0ffcc89aae 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -6,6 +6,7 @@ use super::DataStore; use crate::authz; +use crate::authz::ApiResource; use crate::context::OpContext; use crate::db; use crate::db::error::public_error_from_diesel; @@ -56,30 +57,21 @@ impl DataStore { ) -> CreateResult { let pool = match pool_name { Some(name) => { - let (.., _authz_pool, pool) = LookupPath::new(opctx, &self) + let (.., authz_pool, pool) = LookupPath::new(opctx, &self) .ip_pool_name(&name) // any authenticated user can CreateChild on an IP pool. this is // meant to represent allocating an IP .fetch_for(authz::Action::CreateChild) .await?; - // TODO: this logic must change now that pools can be associated - // with many resources. The logic is slightly simpler now (at - // least conceptually, if not in implementation): Is this pool - // associated with either the fleet or the silo? otherwise, 404 - - // If the named pool conflicts with user's current scope, i.e., - // if it has a silo and it's different from the current silo, - // then as far as IP allocation is concerned, that pool doesn't - // exist. If the pool has no silo, it's fleet-scoped and can - // always be used. - - // let authz_silo_id = opctx.authn.silo_required()?.id(); - // if let Some(pool_silo_id) = pool.silo_id { - // if pool_silo_id != authz_silo_id { - // return Err(authz_pool.not_found()); - // } - // } + // Is this pool associated with either the fleet or the silo? otherwise, 404 + if self + .ip_pool_fetch_association(opctx, &authz_pool) + .await + .is_err() + { + return Err(authz_pool.not_found()); + } pool } diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index c3df60c27a..9c3b8c100f 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -79,6 +79,38 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } + /// Look up whether the given pool is available to users in the given silo, + /// i.e., whether there is an entry in the association table associating the + /// pool with either that silo or the fleet + pub async fn ip_pool_fetch_association( + &self, + opctx: &OpContext, + authz_pool: &authz::IpPool, + ) -> LookupResult { + use db::schema::ip_pool; + use db::schema::ip_pool_resource; + + let authz_silo = opctx.authn.silo_required()?; + + ip_pool::table + .inner_join(ip_pool_resource::table) + .filter( + (ip_pool_resource::resource_type + .eq(IpPoolResourceType::Silo) + .and(ip_pool_resource::resource_id.eq(authz_silo.id()))) + .or(ip_pool_resource::resource_type + .eq(IpPoolResourceType::Fleet)), + ) + .filter(ip_pool::id.eq(authz_pool.id())) + .filter(ip_pool::time_deleted.is_null()) + .select(IpPoolResource::as_select()) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + /// Look up the default IP pool for the current silo. If there is no default /// at silo scope, fall back to the next level up, namely the fleet default. /// There should always be a default pool at the fleet level, though this diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index a6aee0b87e..346ee32839 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -3654,6 +3654,87 @@ async fn test_instance_ephemeral_ip_from_correct_pool( ); } +#[nexus_test] +async fn test_instance_ephemeral_ip_from_orphan_pool( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + let _ = create_project(&client, PROJECT_NAME).await; + + let pool_name = "orphan-pool"; + let _: views::IpPool = object_create( + client, + "/v1/system/ip-pools", + ¶ms::IpPoolCreate { + identity: IdentityMetadataCreateParams { + name: String::from(pool_name).parse().unwrap(), + description: String::from("an ip pool"), + }, + }, + ) + .await; + + let default_pool_range = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(10, 0, 0, 1), + std::net::Ipv4Addr::new(10, 0, 0, 5), + ) + .unwrap(), + ); + let orphan_pool_range = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(10, 1, 0, 1), + std::net::Ipv4Addr::new(10, 1, 0, 5), + ) + .unwrap(), + ); + // have to populate default pool or snat IP allocation fails before it can + // get to failing on ephemeral IP allocation + populate_ip_pool(&client, "default", Some(default_pool_range)).await; + populate_ip_pool(client, pool_name, Some(orphan_pool_range)).await; + + // this should 404 + let instance_name = "orphan-pool-inst"; + let body = params::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: instance_name.parse().unwrap(), + description: format!("instance {:?}", instance_name), + }, + ncpus: InstanceCpuCount(4), + memory: ByteCount::from_gibibytes_u32(1), + hostname: String::from("the_host"), + user_data: + b"#cloud-config\nsystem_info:\n default_user:\n name: oxide" + .to_vec(), + network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![params::ExternalIpCreate::Ephemeral { + pool_name: Some("orphan-pool".parse().unwrap()), + }], + disks: vec![], + start: true, + }; + + let url = format!("/v1/instances?project={}", PROJECT_NAME); + let error = NexusRequest::new( + RequestBuilder::new(&client, http::Method::POST, &url) + .expect_status(Some(StatusCode::NOT_FOUND)) + .body(Some(&body)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await; + + assert_eq!(error.error_code.unwrap(), "ObjectNotFound".to_string()); + assert_eq!( + error.message, + "not found: ip-pool with name \"orphan-pool\"".to_string() + ); + + // TODO: associate the pool with a different silo and we should get the same + // error on instance create +} + async fn create_instance_with_pool( client: &ClientTestContext, instance_name: &str, diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index bb62749689..13bbf44bc9 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -391,12 +391,9 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { silo: NameOrId::Name(cptestctx.silo_name.clone()), is_default: false, }); - let _: IpPoolResource = object_create( - client, - &format!("/v1/system/ip-pools/p0/associations"), - ¶ms, - ) - .await; + let _: IpPoolResource = + object_create(client, "/v1/system/ip-pools/p0/associations", ¶ms) + .await; // get silo ID so we can test association by ID as well let silo_url = format!("/v1/system/silos/{}", cptestctx.silo_name); @@ -425,12 +422,9 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { silo: NameOrId::Id(silo.identity.id), is_default: false, }); - let _: IpPoolResource = object_create( - client, - &format!("/v1/system/ip-pools/p1/associations"), - ¶ms, - ) - .await; + let _: IpPoolResource = + object_create(client, "/v1/system/ip-pools/p1/associations", ¶ms) + .await; // association should look the same as the other one, except different pool ID let assocs_p1 = get_associations(client, "p1").await; From d7a0bdb5fa46805a8a568d4949d65db180f68684 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 7 Nov 2023 12:23:28 -0600 Subject: [PATCH 24/67] note that we are relying on DB enum order for most specific, flip order --- nexus/db-queries/src/db/datastore/ip_pool.rs | 14 ++++++-------- schema/crdb/dbinit.sql | 8 ++++++-- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 9c3b8c100f..1d67dd6916 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -147,10 +147,12 @@ impl DataStore { ) .filter(ip_pool_resource::is_default.eq(true)) .filter(ip_pool::time_deleted.is_null()) - // TODO: order by most specific first so we get the most specific - // when we select the first one. alphabetical desc technically - // works but come on. won't work when we have project association - .order(ip_pool_resource::resource_type.desc()) + // Order by most specific first so we get the most specific. + // resource_type is an enum in the DB and therefore gets its order + // from the definition; it's not lexicographic. So correctness here + // relies on the types being most-specific-first in the definition. + // There are tests for this. + .order(ip_pool_resource::resource_type.asc()) .select(IpPool::as_select()) .first_async::( &*self.pool_connection_authorized(opctx).await?, @@ -304,10 +306,6 @@ impl DataStore { .and(ip_pool_resource::resource_id.eq(*INTERNAL_SILO_ID)), ) .filter(ip_pool::time_deleted.is_null()) - // TODO: order by most specific first so we get the most specific - // when we select the first one. alphabetical desc technically - // works but come on. won't work when we have project association - .order(ip_pool_resource::resource_type.desc()) .select(IpPool::as_select()) .load_async::( &*self.pool_connection_authorized(opctx).await?, diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index b70df563cb..0a941490e9 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1470,9 +1470,13 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_pool_by_name ON omicron.public.ip_pool ) WHERE time_deleted IS NULL; +-- The order here is most-specific first, and it matters because we use this +-- fact to select the most specific default in the case where there is both a +-- silo default and a fleet default. If we were to add a project type, it should +-- be added before silo. CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_resource_type AS ENUM ( - 'fleet', - 'silo' + 'silo', + 'fleet' ); -- join table associating IP pools with resources like fleet or silo From e79b8627a4bc01eef927e59822b92b5a9d2fe724 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 7 Nov 2023 14:37:46 -0600 Subject: [PATCH 25/67] fix tests broken by lack of ip association by default --- .../db-queries/src/db/queries/external_ip.rs | 41 +++++++++---------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index d5acee47ca..df45b41012 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -825,6 +825,8 @@ mod tests { use async_bb8_diesel::AsyncRunQueryDsl; use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; use dropshot::test_util::LogContext; + use nexus_db_model::IpPoolResource; + use nexus_db_model::IpPoolResourceType; use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::shared::IpRange; use omicron_common::address::NUM_SOURCE_NAT_PORTS; @@ -862,32 +864,29 @@ mod tests { Self { logctx, opctx, db, db_datastore } } - async fn create_ip_pool( - &self, - name: &str, - range: IpRange, - _is_default: bool, - ) { + /// Create pool and associate with current silo + async fn create_ip_pool(&self, name: &str, range: IpRange) { let pool = IpPool::new(&IdentityMetadataCreateParams { name: String::from(name).parse().unwrap(), description: format!("ip pool {}", name), }); - let conn = self - .db_datastore - .pool_connection_authorized(&self.opctx) + self.db_datastore + .ip_pool_create(&self.opctx, pool.clone()) .await - .unwrap(); - - use crate::db::schema::ip_pool::dsl as ip_pool_dsl; - diesel::insert_into(ip_pool_dsl::ip_pool) - .values(pool.clone()) - .execute_async(&*conn) + .expect("Failed to create IP pool"); + + let silo_id = self.opctx.authn.silo_required().unwrap().id(); + let association = IpPoolResource { + resource_id: silo_id, + resource_type: IpPoolResourceType::Silo, + ip_pool_id: pool.id(), + is_default: false, + }; + self.db_datastore + .ip_pool_associate_resource(&self.opctx, association) .await - .expect("Failed to create IP Pool"); - - let _silo_id = self.opctx.authn.silo_required().unwrap().id(); - // TODO: associate with silo here to match previous behavior + .expect("Failed to associate IP dool with silo"); self.initialize_ip_pool(name, range).await; } @@ -1713,7 +1712,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 6), )) .unwrap(); - context.create_ip_pool("p1", second_range, /*default*/ false).await; + context.create_ip_pool("p1", second_range).await; // Allocating an address on an instance in the second pool should be // respected, even though there are IPs available in the first. @@ -1756,7 +1755,7 @@ mod tests { let last_address = Ipv4Addr::new(10, 0, 0, 6); let second_range = IpRange::try_from((first_address, last_address)).unwrap(); - context.create_ip_pool("p1", second_range, /* default */ false).await; + context.create_ip_pool("p1", second_range).await; // Allocate all available addresses in the second pool. let instance_id = Uuid::new_v4(); From c60ea2afa867dd74e2c1d385521e6f9a07b351e7 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 7 Nov 2023 16:35:20 -0600 Subject: [PATCH 26/67] first pass at checking for allocated IPs in associated pool before delete --- nexus/db-model/src/schema.rs | 5 ++ nexus/db-queries/src/db/datastore/ip_pool.rs | 66 ++++++++++++++++++++ 2 files changed, 71 insertions(+) diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 4f79288ac3..421465bd3b 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1221,6 +1221,11 @@ allow_tables_to_appear_in_same_query!( allow_tables_to_appear_in_same_query!(dns_zone, dns_version, dns_name); allow_tables_to_appear_in_same_query!(external_ip, service); +// used for query to check whether an IP pool association has any allocated IPs before deleting +allow_tables_to_appear_in_same_query!(external_ip, instance); +allow_tables_to_appear_in_same_query!(external_ip, project); +allow_tables_to_appear_in_same_query!(external_ip, ip_pool_resource); + allow_tables_to_appear_in_same_query!( switch_port, switch_port_settings_route_config diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 1d67dd6916..07bbfcf5f1 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -26,6 +26,7 @@ use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; use ipnetwork::IpNetwork; +use nexus_db_model::ExternalIp; use nexus_db_model::IpPoolResourceType; use nexus_types::external_api::shared::IpRange; use omicron_common::api::external::http_pagination::PaginatedBy; @@ -413,18 +414,83 @@ impl DataStore { }) } + // TODO: move all that horrible ip searching logic in here + // TODO: write a test for this + // async fn ensure_no_ips_outstanding() {} + + /// Delete IP pool assocation with resource unless there are outstanding + /// IPs allocated from the pool in the associated silo (or the fleet, if + /// it's a fleet association). pub async fn ip_pool_dissociate_resource( &self, opctx: &OpContext, // TODO: this could take the authz_pool, it's just more annoying to test that way ip_pool_id: Uuid, + // TODO: we need to know the resource type because it affects the IPs query resource_id: Uuid, ) -> DeleteResult { + use db::schema::external_ip; + use db::schema::instance; use db::schema::ip_pool_resource; + use db::schema::project; opctx .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) .await?; + // We can only delete the association if there are no IPs allocated + // from this pool in the associated resource. + + // most of the query is the same between silo and fleet + let base_query = |table: external_ip::table| { + table + .inner_join( + instance::table + .on(external_ip::parent_id.eq(instance::id.nullable())), + ) + .filter(external_ip::is_service.eq(false)) + .filter(external_ip::parent_id.is_not_null()) + .filter(external_ip::time_deleted.is_null()) + .filter(external_ip::ip_pool_id.eq(ip_pool_id)) + .filter(instance::time_deleted.is_not_null()) + .select(ExternalIp::as_select()) + .limit(1) + }; + + let is_silo = true; // TODO obviously this is not how this works + let existing_ips = if is_silo { + // if it's a silo association, we also have to join through IPs to instances + // to projects to get the silo ID + base_query(external_ip::table) + .inner_join( + project::table.on(instance::project_id.eq(project::id)), + ) + .filter(project::silo_id.eq(resource_id)) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + } else { + // If it's a fleet association, we can't delete it if there are any IPs + // allocated from the pool anywhere + base_query(external_ip::table) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + } + .map_err(|e| { + Error::internal_error(&format!( + "error checking for outstanding IPs before deleting IP pool association to resource: {:?}", + e + )) + })?; + + if !existing_ips.is_empty() { + return Err(Error::InvalidRequest { + message: "IP addresses from this pool are in use in the associated silo/fleet".to_string() + }); + } + diesel::delete(ip_pool_resource::table) .filter(ip_pool_resource::ip_pool_id.eq(ip_pool_id)) .filter(ip_pool_resource::resource_id.eq(resource_id)) From fa9317aa23ab981d42efe703956e70ca323115cd Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 7 Nov 2023 20:41:17 -0600 Subject: [PATCH 27/67] move check for outstanding IPs into its own function --- nexus/db-queries/src/db/datastore/ip_pool.rs | 34 ++++++++++++++------ 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 07bbfcf5f1..f674ac78a9 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -414,24 +414,17 @@ impl DataStore { }) } - // TODO: move all that horrible ip searching logic in here // TODO: write a test for this - // async fn ensure_no_ips_outstanding() {} - - /// Delete IP pool assocation with resource unless there are outstanding - /// IPs allocated from the pool in the associated silo (or the fleet, if - /// it's a fleet association). - pub async fn ip_pool_dissociate_resource( + async fn ensure_no_ips_outstanding( &self, opctx: &OpContext, // TODO: this could take the authz_pool, it's just more annoying to test that way ip_pool_id: Uuid, // TODO: we need to know the resource type because it affects the IPs query resource_id: Uuid, - ) -> DeleteResult { + ) -> Result<(), Error> { use db::schema::external_ip; use db::schema::instance; - use db::schema::ip_pool_resource; use db::schema::project; opctx .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) @@ -491,6 +484,29 @@ impl DataStore { }); } + Ok(()) + } + + /// Delete IP pool assocation with resource unless there are outstanding + /// IPs allocated from the pool in the associated silo (or the fleet, if + /// it's a fleet association). + pub async fn ip_pool_dissociate_resource( + &self, + opctx: &OpContext, + // TODO: this could take the authz_pool, it's just more annoying to test that way + ip_pool_id: Uuid, + // TODO: we need to know the resource type because it affects the IPs query + resource_id: Uuid, + ) -> DeleteResult { + use db::schema::ip_pool_resource; + opctx + .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) + .await?; + + // We can only delete the association if there are no IPs allocated + // from this pool in the associated resource. + self.ensure_no_ips_outstanding(opctx, ip_pool_id, resource_id).await?; + diesel::delete(ip_pool_resource::table) .filter(ip_pool_resource::ip_pool_id.eq(ip_pool_id)) .filter(ip_pool_resource::resource_id.eq(resource_id)) From 2345f353843660d928ef030a4bf9f02677669189 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 7 Nov 2023 22:21:54 -0600 Subject: [PATCH 28/67] correct logic for looking up outstanding IPs --- nexus/db-model/src/ip_pool.rs | 9 ++++ nexus/db-queries/src/db/datastore/ip_pool.rs | 48 +++++++++++--------- nexus/src/app/ip_pool.rs | 24 +++++++++- nexus/types/src/external_api/params.rs | 13 ++++-- 4 files changed, 66 insertions(+), 28 deletions(-) diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index b6f496c99e..432737e57e 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -97,6 +97,7 @@ pub struct IpPoolResource { pub resource_id: Uuid, pub is_default: bool, } + impl From for views::IpPoolResourceType { fn from(typ: IpPoolResourceType) -> Self { match typ { @@ -106,6 +107,14 @@ impl From for views::IpPoolResourceType { } } +/// Information required to delete an IP pool association. Comes from request +/// params -- silo is a NameOrId and must be resolved to ID. +pub struct IpPoolResourceDelete { + pub ip_pool_id: Uuid, + pub resource_type: IpPoolResourceType, + pub resource_id: Uuid, +} + impl From for views::IpPoolResource { fn from(assoc: IpPoolResource) -> Self { Self { diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index f674ac78a9..9c557c9e9b 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -14,11 +14,10 @@ use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::fixed_data::silo::INTERNAL_SILO_ID; use crate::db::identity::Resource; -use crate::db::model::IpPool; -use crate::db::model::IpPoolRange; -use crate::db::model::IpPoolResource; -use crate::db::model::IpPoolUpdate; -use crate::db::model::Name; +use crate::db::model::{ + IpPool, IpPoolRange, IpPoolResource, IpPoolResourceDelete, IpPoolUpdate, + Name, +}; use crate::db::pagination::paginated; use crate::db::pool::DbConnection; use crate::db::queries::ip_pool::FilterOverlappingIpRanges; @@ -418,10 +417,7 @@ impl DataStore { async fn ensure_no_ips_outstanding( &self, opctx: &OpContext, - // TODO: this could take the authz_pool, it's just more annoying to test that way - ip_pool_id: Uuid, - // TODO: we need to know the resource type because it affects the IPs query - resource_id: Uuid, + association: &IpPoolResourceDelete, ) -> Result<(), Error> { use db::schema::external_ip; use db::schema::instance; @@ -443,26 +439,28 @@ impl DataStore { .filter(external_ip::is_service.eq(false)) .filter(external_ip::parent_id.is_not_null()) .filter(external_ip::time_deleted.is_null()) - .filter(external_ip::ip_pool_id.eq(ip_pool_id)) + .filter(external_ip::ip_pool_id.eq(association.ip_pool_id)) .filter(instance::time_deleted.is_not_null()) .select(ExternalIp::as_select()) .limit(1) }; - let is_silo = true; // TODO obviously this is not how this works - let existing_ips = if is_silo { + let existing_ips = match association.resource_type { + IpPoolResourceType::Silo => { + // if it's a silo association, we also have to join through IPs to instances // to projects to get the silo ID base_query(external_ip::table) .inner_join( project::table.on(instance::project_id.eq(project::id)), ) - .filter(project::silo_id.eq(resource_id)) + .filter(project::silo_id.eq(association.resource_id)) .load_async::( &*self.pool_connection_authorized(opctx).await?, ) .await - } else { + }, + IpPoolResourceType::Fleet => { // If it's a fleet association, we can't delete it if there are any IPs // allocated from the pool anywhere base_query(external_ip::table) @@ -470,6 +468,7 @@ impl DataStore { &*self.pool_connection_authorized(opctx).await?, ) .await + } } .map_err(|e| { Error::internal_error(&format!( @@ -493,10 +492,7 @@ impl DataStore { pub async fn ip_pool_dissociate_resource( &self, opctx: &OpContext, - // TODO: this could take the authz_pool, it's just more annoying to test that way - ip_pool_id: Uuid, - // TODO: we need to know the resource type because it affects the IPs query - resource_id: Uuid, + association: &IpPoolResourceDelete, ) -> DeleteResult { use db::schema::ip_pool_resource; opctx @@ -505,11 +501,11 @@ impl DataStore { // We can only delete the association if there are no IPs allocated // from this pool in the associated resource. - self.ensure_no_ips_outstanding(opctx, ip_pool_id, resource_id).await?; + self.ensure_no_ips_outstanding(opctx, association).await?; diesel::delete(ip_pool_resource::table) - .filter(ip_pool_resource::ip_pool_id.eq(ip_pool_id)) - .filter(ip_pool_resource::resource_id.eq(resource_id)) + .filter(ip_pool_resource::ip_pool_id.eq(association.ip_pool_id)) + .filter(ip_pool_resource::resource_id.eq(association.resource_id)) .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map(|_rows_deleted| ()) @@ -691,6 +687,7 @@ mod test { use crate::db::fixed_data::FLEET_ID; use crate::db::model::{IpPool, IpPoolResource, IpPoolResourceType}; use assert_matches::assert_matches; + use nexus_db_model::IpPoolResourceDelete; use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Resource; use omicron_common::api::external::{Error, IdentityMetadataCreateParams}; @@ -816,7 +813,14 @@ mod test { // now remove the association and we should get the default fleet pool again datastore - .ip_pool_dissociate_resource(&opctx, pool1_for_silo.id(), silo_id) + .ip_pool_dissociate_resource( + &opctx, + &IpPoolResourceDelete { + ip_pool_id: pool1_for_silo.id(), + resource_id: silo_id, + resource_type: IpPoolResourceType::Silo, + }, + ) .await .expect("Failed to dissociate IP pool from silo"); diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 95b1e3adbb..a4b2a5a908 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -7,6 +7,8 @@ use crate::external_api::params; use crate::external_api::shared::IpRange; use ipnetwork::IpNetwork; +use nexus_db_model::IpPoolResourceDelete; +use nexus_db_model::IpPoolResourceType; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; @@ -14,6 +16,7 @@ use nexus_db_queries::db::fixed_data::FLEET_ID; use nexus_db_queries::db::lookup; use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::db::model::Name; +use nexus_types::identity::Resource; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; @@ -137,11 +140,28 @@ impl super::Nexus { ) -> DeleteResult { let (.., authz_pool) = pool_lookup.lookup_for(authz::Action::Modify).await?; + + let (resource_type, resource_id) = match ip_pool_dissoc { + params::IpPoolAssociationDelete::Silo(assoc) => { + let (.., silo) = self + .silo_lookup(opctx, assoc.silo.clone())? + .fetch() + .await?; + (IpPoolResourceType::Silo, silo.id()) + } + params::IpPoolAssociationDelete::Fleet => { + (IpPoolResourceType::Fleet, *FLEET_ID) + } + }; + self.db_datastore .ip_pool_dissociate_resource( opctx, - authz_pool.id(), - ip_pool_dissoc.resource_id, + &IpPoolResourceDelete { + ip_pool_id: authz_pool.id(), + resource_id, + resource_type, + }, ) .await } diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index db7f14fe45..081c7a594e 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -760,10 +760,15 @@ pub enum IpPoolAssociationCreate { } #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct IpPoolAssociationDelete { - pub resource_id: Uuid, - // TODO: not technically necessary, should we include it for completeness? feels user-unfriendly - // pub resource_type: IpPoolResourceType, +pub struct IpPoolSiloAssociationDelete { + pub silo: NameOrId, +} + +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +#[serde(tag = "resource_type", rename_all = "snake_case")] +pub enum IpPoolAssociationDelete { + Silo(IpPoolSiloAssociationDelete), + Fleet, } // INSTANCES From 322686679427c404b3cde55d9ae2ead0722bebbf Mon Sep 17 00:00:00 2001 From: David Crespo Date: Fri, 10 Nov 2023 14:36:56 -0600 Subject: [PATCH 29/67] fix dissociate query params type serialization --- nexus/db-model/src/ip_pool.rs | 4 +-- nexus/src/app/ip_pool.rs | 6 ++-- nexus/src/external_api/http_entrypoints.rs | 14 +++++++-- nexus/tests/integration_tests/ip_pools.rs | 2 +- nexus/types/src/external_api/params.rs | 36 ++++++++++++++++++++-- nexus/types/src/external_api/shared.rs | 7 +++++ nexus/types/src/external_api/views.rs | 9 +----- openapi/nexus.json | 12 ++++++-- 8 files changed, 68 insertions(+), 22 deletions(-) diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 432737e57e..a8bee67bdf 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -16,7 +16,7 @@ use db_macros::Resource; use diesel::Selectable; use ipnetwork::IpNetwork; use nexus_types::external_api::params; -use nexus_types::external_api::shared::IpRange; +use nexus_types::external_api::shared::{self, IpRange}; use nexus_types::external_api::views; use nexus_types::identity::Resource; use omicron_common::api::external; @@ -98,7 +98,7 @@ pub struct IpPoolResource { pub is_default: bool, } -impl From for views::IpPoolResourceType { +impl From for shared::IpPoolResourceType { fn from(typ: IpPoolResourceType) -> Self { match typ { IpPoolResourceType::Fleet => Self::Fleet, diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index a4b2a5a908..95c93e4b1d 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -136,20 +136,20 @@ impl super::Nexus { &self, opctx: &OpContext, pool_lookup: &lookup::IpPool<'_>, - ip_pool_dissoc: ¶ms::IpPoolAssociationDelete, + ip_pool_dissoc: ¶ms::IpPoolAssociationDeleteValidated, ) -> DeleteResult { let (.., authz_pool) = pool_lookup.lookup_for(authz::Action::Modify).await?; let (resource_type, resource_id) = match ip_pool_dissoc { - params::IpPoolAssociationDelete::Silo(assoc) => { + params::IpPoolAssociationDeleteValidated::Silo(assoc) => { let (.., silo) = self .silo_lookup(opctx, assoc.silo.clone())? .fetch() .await?; (IpPoolResourceType::Silo, silo.id()) } - params::IpPoolAssociationDelete::Fleet => { + params::IpPoolAssociationDeleteValidated::Fleet => { (IpPoolResourceType::Fleet, *FLEET_ID) } }; diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 1b5bfaf25d..b4f82d9e7e 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -1399,7 +1399,6 @@ async fn ip_pool_association_create( async fn ip_pool_association_delete( rqctx: RequestContext>, path_params: Path, - // TODO: should this just be a path param? we have been trying to avoid that query_params: Query, ) -> Result { let apictx = rqctx.context(); @@ -1408,8 +1407,19 @@ async fn ip_pool_association_delete( let nexus = &apictx.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); + + let validated_params = + params::IpPoolAssociationDeleteValidated::try_from(query) + .map_err(|e| HttpError::for_bad_request(None, e))?; + let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; - nexus.ip_pool_dissociate_resource(&opctx, &pool_lookup, &query).await?; + nexus + .ip_pool_dissociate_resource( + &opctx, + &pool_lookup, + &validated_params, + ) + .await?; Ok(HttpResponseUpdatedNoContent()) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 13bbf44bc9..9cf1155337 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -26,13 +26,13 @@ use nexus_types::external_api::params::InstanceDiskAttachment; use nexus_types::external_api::params::InstanceNetworkInterfaceAttachment; use nexus_types::external_api::params::IpPoolCreate; use nexus_types::external_api::params::IpPoolUpdate; +use nexus_types::external_api::shared::IpPoolResourceType; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::shared::Ipv4Range; use nexus_types::external_api::shared::Ipv6Range; use nexus_types::external_api::views::IpPool; use nexus_types::external_api::views::IpPoolRange; use nexus_types::external_api::views::IpPoolResource; -use nexus_types::external_api::views::IpPoolResourceType; use nexus_types::external_api::views::Silo; use omicron_common::api::external::IdentityMetadataUpdateParams; use omicron_common::api::external::NameOrId; diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 081c7a594e..9424e41fc9 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -759,18 +759,48 @@ pub enum IpPoolAssociationCreate { Fleet(IpPoolAssociateFleet), } +// It would be cool if this was an enum like Silo(NameOrId) | Fleet, but +// OpenAPI doesn't support fancy schemas on query params. So instead we +// use this flat struct for the query params directly, and manually parse +// that into the good struct below. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct IpPoolAssociationDelete { + pub silo: Option, + pub resource_type: shared::IpPoolResourceType, +} + +// technically these are not params, but they are used with params +#[derive(Clone, Debug)] pub struct IpPoolSiloAssociationDelete { pub silo: NameOrId, } -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -#[serde(tag = "resource_type", rename_all = "snake_case")] -pub enum IpPoolAssociationDelete { +#[derive(Clone, Debug)] +pub enum IpPoolAssociationDeleteValidated { Silo(IpPoolSiloAssociationDelete), Fleet, } +impl TryFrom for IpPoolAssociationDeleteValidated { + type Error = String; + + fn try_from(value: IpPoolAssociationDelete) -> Result { + match (value.silo, value.resource_type) { + (Some(silo), shared::IpPoolResourceType::Silo) => { + Ok(Self::Silo(IpPoolSiloAssociationDelete { silo })) + } + (None, shared::IpPoolResourceType::Fleet) => Ok(Self::Fleet), + (Some(_), shared::IpPoolResourceType::Fleet) => { + Err("Silo must be null if resource_type is fleet".to_string()) + } + (None, shared::IpPoolResourceType::Silo) => { + Err("Silo must be specified if resource_type is silo" + .to_string()) + } + } + } +} + // INSTANCES /// Describes an attachment of an `InstanceNetworkInterface` to an `Instance`, diff --git a/nexus/types/src/external_api/shared.rs b/nexus/types/src/external_api/shared.rs index 48fbb9c10d..e74ed8134e 100644 --- a/nexus/types/src/external_api/shared.rs +++ b/nexus/types/src/external_api/shared.rs @@ -245,6 +245,13 @@ pub enum UpdateableComponentType { HostOmicron, } +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum IpPoolResourceType { + Fleet, + Silo, +} + #[cfg(test)] mod test { use super::Policy; diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index b6dcad8553..c58a99a02f 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -246,19 +246,12 @@ pub struct IpPool { pub identity: IdentityMetadata, } -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] -#[serde(rename_all = "snake_case")] -pub enum IpPoolResourceType { - Fleet, - Silo, -} - #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] pub struct IpPoolResource { // TODO: is including the pool ID redundant? it's convenient to have and // makes this response a cohesive whole pub ip_pool_id: Uuid, - pub resource_type: IpPoolResourceType, + pub resource_type: shared::IpPoolResourceType, pub resource_id: Uuid, pub is_default: bool, } diff --git a/openapi/nexus.json b/openapi/nexus.json index eadc31367c..5d1913dc66 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -4708,11 +4708,17 @@ }, { "in": "query", - "name": "resource_id", + "name": "resource_type", "required": true, "schema": { - "type": "string", - "format": "uuid" + "$ref": "#/components/schemas/IpPoolResourceType" + } + }, + { + "in": "query", + "name": "silo", + "schema": { + "$ref": "#/components/schemas/NameOrId" } } ], From 1dd01c1af85a9dd492ea57822f49c039c0566476 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Fri, 10 Nov 2023 14:54:43 -0600 Subject: [PATCH 30/67] these migrations go to 11. avoid conflict --- dev-tools/omdb/tests/env.out | 6 +++--- dev-tools/omdb/tests/successes.out | 12 ++++++------ nexus/db-model/src/schema.rs | 2 +- schema/crdb/{9.0.0 => 11.0.0}/up1.sql | 0 schema/crdb/{9.0.0 => 11.0.0}/up2.sql | 0 schema/crdb/{9.0.0 => 11.0.0}/up3.sql | 0 schema/crdb/{9.0.0 => 11.0.0}/up4.sql | 0 schema/crdb/{9.0.0 => 11.0.0}/up5.sql | 0 schema/crdb/11.0.1/README.md | 1 + schema/crdb/{9.0.1 => 11.0.1}/up1.sql | 0 schema/crdb/{9.0.1 => 11.0.1}/up2.sql | 0 schema/crdb/dbinit.sql | 2 +- 12 files changed, 12 insertions(+), 11 deletions(-) rename schema/crdb/{9.0.0 => 11.0.0}/up1.sql (100%) rename schema/crdb/{9.0.0 => 11.0.0}/up2.sql (100%) rename schema/crdb/{9.0.0 => 11.0.0}/up3.sql (100%) rename schema/crdb/{9.0.0 => 11.0.0}/up4.sql (100%) rename schema/crdb/{9.0.0 => 11.0.0}/up5.sql (100%) create mode 100644 schema/crdb/11.0.1/README.md rename schema/crdb/{9.0.1 => 11.0.1}/up1.sql (100%) rename schema/crdb/{9.0.1 => 11.0.1}/up2.sql (100%) diff --git a/dev-tools/omdb/tests/env.out b/dev-tools/omdb/tests/env.out index 7e4d432e5a..7cbac1565d 100644 --- a/dev-tools/omdb/tests/env.out +++ b/dev-tools/omdb/tests/env.out @@ -7,7 +7,7 @@ sim-b6d65341 [::1]:REDACTED_PORT scrimlet REDACTED_UUID_REDACTED_UUID_REDACTED --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (9.0.1) +note: database schema version matches expected (8.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "--db-url", "junk", "sleds"] termination: Exited(2) @@ -172,7 +172,7 @@ stderr: note: database URL not specified. Will search DNS. note: (override with --db-url or OMDB_DB_URL) note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (9.0.1) +note: database schema version matches expected (8.0.0) ============================================= EXECUTING COMMAND: omdb ["--dns-server", "[::1]:REDACTED_PORT", "db", "sleds"] termination: Exited(0) @@ -185,5 +185,5 @@ stderr: note: database URL not specified. Will search DNS. note: (override with --db-url or OMDB_DB_URL) note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (9.0.1) +note: database schema version matches expected (8.0.0) ============================================= diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 8a22c33bfd..3ebf7046d4 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -8,7 +8,7 @@ external oxide-dev.test 2 create silo: "tes --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (9.0.1) +note: database schema version matches expected (8.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "dns", "diff", "external", "2"] termination: Exited(0) @@ -24,7 +24,7 @@ changes: names added: 1, names removed: 0 --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (9.0.1) +note: database schema version matches expected (8.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "dns", "names", "external", "2"] termination: Exited(0) @@ -36,7 +36,7 @@ External zone: oxide-dev.test --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (9.0.1) +note: database schema version matches expected (8.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "services", "list-instances"] termination: Exited(0) @@ -54,7 +54,7 @@ Mgd REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (9.0.1) +note: database schema version matches expected (8.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "services", "list-by-sled"] termination: Exited(0) @@ -75,7 +75,7 @@ sled: sim-b6d65341 (id REDACTED_UUID_REDACTED_UUID_REDACTED) --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (9.0.1) +note: database schema version matches expected (8.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "sleds"] termination: Exited(0) @@ -86,7 +86,7 @@ sim-b6d65341 [::1]:REDACTED_PORT scrimlet REDACTED_UUID_REDACTED_UUID_REDACTED --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (9.0.1) +note: database schema version matches expected (8.0.0) ============================================= EXECUTING COMMAND: omdb ["mgs", "inventory"] termination: Exited(0) diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 421465bd3b..22c0998294 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1169,7 +1169,7 @@ table! { /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(9, 0, 1); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(11, 0, 1); allow_tables_to_appear_in_same_query!( system_update, diff --git a/schema/crdb/9.0.0/up1.sql b/schema/crdb/11.0.0/up1.sql similarity index 100% rename from schema/crdb/9.0.0/up1.sql rename to schema/crdb/11.0.0/up1.sql diff --git a/schema/crdb/9.0.0/up2.sql b/schema/crdb/11.0.0/up2.sql similarity index 100% rename from schema/crdb/9.0.0/up2.sql rename to schema/crdb/11.0.0/up2.sql diff --git a/schema/crdb/9.0.0/up3.sql b/schema/crdb/11.0.0/up3.sql similarity index 100% rename from schema/crdb/9.0.0/up3.sql rename to schema/crdb/11.0.0/up3.sql diff --git a/schema/crdb/9.0.0/up4.sql b/schema/crdb/11.0.0/up4.sql similarity index 100% rename from schema/crdb/9.0.0/up4.sql rename to schema/crdb/11.0.0/up4.sql diff --git a/schema/crdb/9.0.0/up5.sql b/schema/crdb/11.0.0/up5.sql similarity index 100% rename from schema/crdb/9.0.0/up5.sql rename to schema/crdb/11.0.0/up5.sql diff --git a/schema/crdb/11.0.1/README.md b/schema/crdb/11.0.1/README.md new file mode 100644 index 0000000000..bd12f9883b --- /dev/null +++ b/schema/crdb/11.0.1/README.md @@ -0,0 +1 @@ +These steps are separated from 11.0.0 because they drop things that are used in previous steps, which causes the idempotence test to fail when it runs each migration multiple times — the things earlier steps rely on are not there. diff --git a/schema/crdb/9.0.1/up1.sql b/schema/crdb/11.0.1/up1.sql similarity index 100% rename from schema/crdb/9.0.1/up1.sql rename to schema/crdb/11.0.1/up1.sql diff --git a/schema/crdb/9.0.1/up2.sql b/schema/crdb/11.0.1/up2.sql similarity index 100% rename from schema/crdb/9.0.1/up2.sql rename to schema/crdb/11.0.1/up2.sql diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 0a941490e9..47f9f24bd8 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -2628,7 +2628,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '9.0.1', NULL) + ( TRUE, NOW(), NOW(), '11.0.1', NULL) ON CONFLICT DO NOTHING; COMMIT; From a47a9a3ac57d41bb0cf5025d0abd39d1dde8dcad Mon Sep 17 00:00:00 2001 From: David Crespo Date: Fri, 10 Nov 2023 15:29:06 -0600 Subject: [PATCH 31/67] git chose to delete something important --- nexus/db-model/src/schema.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 9626a882fd..147701038f 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1263,6 +1263,10 @@ allow_tables_to_appear_in_same_query!(ip_pool_range, ip_pool, ip_pool_resource); joinable!(ip_pool_range -> ip_pool (ip_pool_id)); joinable!(ip_pool_resource -> ip_pool (ip_pool_id)); +allow_tables_to_appear_in_same_query!(inv_collection, inv_collection_error); +joinable!(inv_collection_error -> inv_collection (inv_collection_id)); +allow_tables_to_appear_in_same_query!(hw_baseboard_id, sw_caboose, inv_caboose); + allow_tables_to_appear_in_same_query!( dataset, disk, From 6ea14ad7c43b7b1338fee1b547bc149c87045e25 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Mon, 4 Dec 2023 12:38:16 -0600 Subject: [PATCH 32/67] move migration to 19 --- nexus/db-model/src/schema.rs | 2 +- schema/crdb/{11.0.0 => 19.0.0}/up1.sql | 0 schema/crdb/{11.0.0 => 19.0.0}/up2.sql | 0 schema/crdb/{11.0.0 => 19.0.0}/up3.sql | 0 schema/crdb/{11.0.0 => 19.0.0}/up4.sql | 0 schema/crdb/{11.0.0 => 19.0.0}/up5.sql | 0 schema/crdb/{11.0.1 => 19.0.1}/README.md | 0 schema/crdb/{11.0.1 => 19.0.1}/up1.sql | 0 schema/crdb/{11.0.1 => 19.0.1}/up2.sql | 0 schema/crdb/dbinit.sql | 2 +- 10 files changed, 2 insertions(+), 2 deletions(-) rename schema/crdb/{11.0.0 => 19.0.0}/up1.sql (100%) rename schema/crdb/{11.0.0 => 19.0.0}/up2.sql (100%) rename schema/crdb/{11.0.0 => 19.0.0}/up3.sql (100%) rename schema/crdb/{11.0.0 => 19.0.0}/up4.sql (100%) rename schema/crdb/{11.0.0 => 19.0.0}/up5.sql (100%) rename schema/crdb/{11.0.1 => 19.0.1}/README.md (100%) rename schema/crdb/{11.0.1 => 19.0.1}/up1.sql (100%) rename schema/crdb/{11.0.1 => 19.0.1}/up2.sql (100%) diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 147701038f..52568f8299 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1250,7 +1250,7 @@ table! { /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(11, 0, 1); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(19, 0, 1); allow_tables_to_appear_in_same_query!( system_update, diff --git a/schema/crdb/11.0.0/up1.sql b/schema/crdb/19.0.0/up1.sql similarity index 100% rename from schema/crdb/11.0.0/up1.sql rename to schema/crdb/19.0.0/up1.sql diff --git a/schema/crdb/11.0.0/up2.sql b/schema/crdb/19.0.0/up2.sql similarity index 100% rename from schema/crdb/11.0.0/up2.sql rename to schema/crdb/19.0.0/up2.sql diff --git a/schema/crdb/11.0.0/up3.sql b/schema/crdb/19.0.0/up3.sql similarity index 100% rename from schema/crdb/11.0.0/up3.sql rename to schema/crdb/19.0.0/up3.sql diff --git a/schema/crdb/11.0.0/up4.sql b/schema/crdb/19.0.0/up4.sql similarity index 100% rename from schema/crdb/11.0.0/up4.sql rename to schema/crdb/19.0.0/up4.sql diff --git a/schema/crdb/11.0.0/up5.sql b/schema/crdb/19.0.0/up5.sql similarity index 100% rename from schema/crdb/11.0.0/up5.sql rename to schema/crdb/19.0.0/up5.sql diff --git a/schema/crdb/11.0.1/README.md b/schema/crdb/19.0.1/README.md similarity index 100% rename from schema/crdb/11.0.1/README.md rename to schema/crdb/19.0.1/README.md diff --git a/schema/crdb/11.0.1/up1.sql b/schema/crdb/19.0.1/up1.sql similarity index 100% rename from schema/crdb/11.0.1/up1.sql rename to schema/crdb/19.0.1/up1.sql diff --git a/schema/crdb/11.0.1/up2.sql b/schema/crdb/19.0.1/up2.sql similarity index 100% rename from schema/crdb/11.0.1/up2.sql rename to schema/crdb/19.0.1/up2.sql diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 88104c1742..38e54af36f 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -2846,7 +2846,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '11.0.1', NULL) + ( TRUE, NOW(), NOW(), '19.0.1', NULL) ON CONFLICT DO NOTHING; COMMIT; From b871acf98e292f078fb3d45ef3d7a4e9d6c1b76d Mon Sep 17 00:00:00 2001 From: David Crespo Date: Mon, 4 Dec 2023 16:33:20 -0600 Subject: [PATCH 33/67] not sure how that became wrong again but whatever --- schema/crdb/19.0.0/up1.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/schema/crdb/19.0.0/up1.sql b/schema/crdb/19.0.0/up1.sql index 210c7c83ed..5ccd8f51b9 100644 --- a/schema/crdb/19.0.0/up1.sql +++ b/schema/crdb/19.0.0/up1.sql @@ -1,4 +1,4 @@ CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_resource_type AS ENUM ( - 'fleet', - 'silo' + 'silo', + 'fleet' ); From 2094a8b770057fdffec48e0040d885960430a909 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 5 Dec 2023 11:08:08 -0600 Subject: [PATCH 34/67] IP pools can't be fleet-wide (#4616) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Built on #4261 — seeing how @zephraph's radical suggestion works out. --------- Co-authored-by: Justin Bennett --- nexus/db-model/src/ip_pool.rs | 17 +- nexus/db-queries/src/db/datastore/ip_pool.rs | 136 +++++------ nexus/db-queries/src/db/datastore/project.rs | 6 +- nexus/db-queries/src/db/datastore/rack.rs | 33 --- nexus/src/app/ip_pool.rs | 57 ++--- nexus/src/external_api/http_entrypoints.rs | 47 ++-- nexus/test-utils/src/resource_helpers.rs | 16 +- nexus/tests/integration_tests/endpoints.rs | 8 +- nexus/tests/integration_tests/instances.rs | 11 +- nexus/tests/integration_tests/ip_pools.rs | 80 +++---- nexus/tests/output/nexus_tags.txt | 6 +- nexus/types/src/external_api/params.rs | 54 +---- nexus/types/src/external_api/shared.rs | 7 - nexus/types/src/external_api/views.rs | 7 +- openapi/nexus.json | 224 ++++++++----------- schema/crdb/19.0.0/up1.sql | 3 +- schema/crdb/dbinit.sql | 3 +- 17 files changed, 239 insertions(+), 476 deletions(-) diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index a8bee67bdf..8211287248 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -16,7 +16,7 @@ use db_macros::Resource; use diesel::Selectable; use ipnetwork::IpNetwork; use nexus_types::external_api::params; -use nexus_types::external_api::shared::{self, IpRange}; +use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::views; use nexus_types::identity::Resource; use omicron_common::api::external; @@ -85,7 +85,6 @@ impl_enum_type!( #[diesel(sql_type = IpPoolResourceTypeEnum)] pub enum IpPoolResourceType; - Fleet => b"fleet" Silo => b"silo" ); @@ -98,15 +97,6 @@ pub struct IpPoolResource { pub is_default: bool, } -impl From for shared::IpPoolResourceType { - fn from(typ: IpPoolResourceType) -> Self { - match typ { - IpPoolResourceType::Fleet => Self::Fleet, - IpPoolResourceType::Silo => Self::Silo, - } - } -} - /// Information required to delete an IP pool association. Comes from request /// params -- silo is a NameOrId and must be resolved to ID. pub struct IpPoolResourceDelete { @@ -115,12 +105,11 @@ pub struct IpPoolResourceDelete { pub resource_id: Uuid, } -impl From for views::IpPoolResource { +impl From for views::IpPoolSilo { fn from(assoc: IpPoolResource) -> Self { Self { ip_pool_id: assoc.ip_pool_id, - resource_type: assoc.resource_type.into(), - resource_id: assoc.resource_id, + silo_id: assoc.resource_id, is_default: assoc.is_default, } } diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 9c557c9e9b..6a54d72b12 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -95,11 +95,9 @@ impl DataStore { ip_pool::table .inner_join(ip_pool_resource::table) .filter( - (ip_pool_resource::resource_type + ip_pool_resource::resource_type .eq(IpPoolResourceType::Silo) - .and(ip_pool_resource::resource_id.eq(authz_silo.id()))) - .or(ip_pool_resource::resource_type - .eq(IpPoolResourceType::Fleet)), + .and(ip_pool_resource::resource_id.eq(authz_silo.id())), ) .filter(ip_pool::id.eq(authz_pool.id())) .filter(ip_pool::time_deleted.is_null()) @@ -139,11 +137,9 @@ impl DataStore { ip_pool::table .inner_join(ip_pool_resource::table) .filter( - (ip_pool_resource::resource_type + ip_pool_resource::resource_type .eq(IpPoolResourceType::Silo) - .and(ip_pool_resource::resource_id.eq(authz_silo_id))) - .or(ip_pool_resource::resource_type - .eq(IpPoolResourceType::Fleet)), + .and(ip_pool_resource::resource_id.eq(authz_silo_id)), ) .filter(ip_pool_resource::is_default.eq(true)) .filter(ip_pool::time_deleted.is_null()) @@ -429,57 +425,35 @@ impl DataStore { // We can only delete the association if there are no IPs allocated // from this pool in the associated resource. - // most of the query is the same between silo and fleet - let base_query = |table: external_ip::table| { - table - .inner_join( - instance::table - .on(external_ip::parent_id.eq(instance::id.nullable())), - ) - .filter(external_ip::is_service.eq(false)) - .filter(external_ip::parent_id.is_not_null()) - .filter(external_ip::time_deleted.is_null()) - .filter(external_ip::ip_pool_id.eq(association.ip_pool_id)) - .filter(instance::time_deleted.is_not_null()) - .select(ExternalIp::as_select()) - .limit(1) - }; - - let existing_ips = match association.resource_type { - IpPoolResourceType::Silo => { - - // if it's a silo association, we also have to join through IPs to instances - // to projects to get the silo ID - base_query(external_ip::table) - .inner_join( - project::table.on(instance::project_id.eq(project::id)), - ) - .filter(project::silo_id.eq(association.resource_id)) - .load_async::( - &*self.pool_connection_authorized(opctx).await?, - ) - .await - }, - IpPoolResourceType::Fleet => { - // If it's a fleet association, we can't delete it if there are any IPs - // allocated from the pool anywhere - base_query(external_ip::table) - .load_async::( - &*self.pool_connection_authorized(opctx).await?, - ) - .await - } - } - .map_err(|e| { - Error::internal_error(&format!( - "error checking for outstanding IPs before deleting IP pool association to resource: {:?}", - e - )) - })?; + let existing_ips = external_ip::table + .inner_join( + instance::table + .on(external_ip::parent_id.eq(instance::id.nullable())), + ) + .filter(external_ip::is_service.eq(false)) + .filter(external_ip::parent_id.is_not_null()) + .filter(external_ip::time_deleted.is_null()) + .filter(external_ip::ip_pool_id.eq(association.ip_pool_id)) + .filter(instance::time_deleted.is_not_null()) + .select(ExternalIp::as_select()) + .limit(1) + // we have to join through IPs to instances to projects to get the silo ID + .inner_join(project::table.on(instance::project_id.eq(project::id))) + .filter(project::silo_id.eq(association.resource_id)) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|e| { + Error::internal_error(&format!( + "error checking for outstanding IPs before deleting IP pool association to resource: {:?}", + e + )) + })?; if !existing_ips.is_empty() { return Err(Error::InvalidRequest { - message: "IP addresses from this pool are in use in the associated silo/fleet".to_string() + message: "IP addresses from this pool are in use in the associated silo".to_string() }); } @@ -487,8 +461,7 @@ impl DataStore { } /// Delete IP pool assocation with resource unless there are outstanding - /// IPs allocated from the pool in the associated silo (or the fleet, if - /// it's a fleet association). + /// IPs allocated from the pool in the associated silo pub async fn ip_pool_dissociate_resource( &self, opctx: &OpContext, @@ -684,7 +657,6 @@ impl DataStore { #[cfg(test)] mod test { use crate::db::datastore::datastore_test; - use crate::db::fixed_data::FLEET_ID; use crate::db::model::{IpPool, IpPoolResource, IpPoolResourceType}; use assert_matches::assert_matches; use nexus_db_model::IpPoolResourceDelete; @@ -708,29 +680,29 @@ mod test { assert_eq!(fleet_default_pool.identity.name.as_str(), "default"); - // unique index prevents second fleet-level default - let identity = IdentityMetadataCreateParams { - name: "another-fleet-default".parse().unwrap(), - description: "".to_string(), - }; - let second_default = datastore - .ip_pool_create(&opctx, IpPool::new(&identity)) - .await - .expect("Failed to create pool"); - let err = datastore - .ip_pool_associate_resource( - &opctx, - IpPoolResource { - ip_pool_id: second_default.id(), - resource_type: IpPoolResourceType::Fleet, - resource_id: *FLEET_ID, - is_default: true, - }, - ) - .await - .expect_err("Failed to fail to make IP pool fleet default"); - - assert_matches!(err, Error::ObjectAlreadyExists { .. }); + // // unique index prevents second fleet-level default + // let identity = IdentityMetadataCreateParams { + // name: "another-fleet-default".parse().unwrap(), + // description: "".to_string(), + // }; + // let second_default = datastore + // .ip_pool_create(&opctx, IpPool::new(&identity)) + // .await + // .expect("Failed to create pool"); + // let err = datastore + // .ip_pool_associate_resource( + // &opctx, + // IpPoolResource { + // ip_pool_id: second_default.id(), + // resource_type: IpPoolResourceType::Fleet, + // resource_id: *FLEET_ID, + // is_default: true, + // }, + // ) + // .await + // .expect_err("Failed to fail to make IP pool fleet default"); + + // assert_matches!(err, Error::ObjectAlreadyExists { .. }); // now test logic preferring most specific available default diff --git a/nexus/db-queries/src/db/datastore/project.rs b/nexus/db-queries/src/db/datastore/project.rs index 762fecfa43..783a3a952c 100644 --- a/nexus/db-queries/src/db/datastore/project.rs +++ b/nexus/db-queries/src/db/datastore/project.rs @@ -353,13 +353,11 @@ impl DataStore { &pagparams.map_name(|n| Name::ref_cast(n)), ), } - // TODO: make sure this join is compatible with pagination logic .inner_join(ip_pool_resource::table) .filter( - (ip_pool_resource::resource_type + ip_pool_resource::resource_type .eq(IpPoolResourceType::Silo) - .and(ip_pool_resource::resource_id.eq(silo_id))) - .or(ip_pool_resource::resource_type.eq(IpPoolResourceType::Fleet)), + .and(ip_pool_resource::resource_id.eq(silo_id)), ) .filter(ip_pool::time_deleted.is_null()) .select(db::model::IpPool::as_select()) diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 27211c4d82..1f4a53ab12 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -19,7 +19,6 @@ use crate::db::fixed_data::silo::INTERNAL_SILO_ID; use crate::db::fixed_data::vpc_subnet::DNS_VPC_SUBNET; use crate::db::fixed_data::vpc_subnet::NEXUS_VPC_SUBNET; use crate::db::fixed_data::vpc_subnet::NTP_VPC_SUBNET; -use crate::db::fixed_data::FLEET_ID; use crate::db::identity::Asset; use crate::db::model::Dataset; use crate::db::model::IncompleteExternalIp; @@ -736,38 +735,6 @@ impl DataStore { .await?; } - let default_pool = - db::model::IpPool::new(&IdentityMetadataCreateParams { - name: "default".parse::().unwrap(), - description: String::from("default IP pool"), - }); - - let default_pool_id = default_pool.id(); - - let default_created = self - .ip_pool_create(opctx, default_pool) - .await - .map(|_| true) - .or_else(|e| match e { - Error::ObjectAlreadyExists { .. } => Ok(false), - _ => Err(e), - })?; - - // make pool default for fleet. only need to do this if the create went - // through, i.e., if it wasn't already there - if default_created { - self.ip_pool_associate_resource( - opctx, - db::model::IpPoolResource { - ip_pool_id: default_pool_id, - resource_type: db::model::IpPoolResourceType::Fleet, - resource_id: *FLEET_ID, - is_default: true, - }, - ) - .await?; - } - Ok(()) } diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 95c93e4b1d..7284facf72 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -12,11 +12,9 @@ use nexus_db_model::IpPoolResourceType; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; -use nexus_db_queries::db::fixed_data::FLEET_ID; use nexus_db_queries::db::lookup; use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::db::model::Name; -use nexus_types::identity::Resource; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; @@ -93,40 +91,22 @@ impl super::Nexus { &self, opctx: &OpContext, pool_lookup: &lookup::IpPool<'_>, - assoc_create: ¶ms::IpPoolAssociationCreate, + silo_link: ¶ms::IpPoolSiloLink, ) -> CreateResult { - // TODO: check for perms on specified resource? or unnecessary because this is an operator action? let (.., authz_pool) = pool_lookup.lookup_for(authz::Action::Modify).await?; - let (resource_type, resource_id, is_default) = match assoc_create { - params::IpPoolAssociationCreate::Silo(assoc_silo) => { - let (silo,) = self - .silo_lookup(&opctx, assoc_silo.silo.clone())? - .lookup_for(authz::Action::Read) - .await?; - ( - db::model::IpPoolResourceType::Silo, - silo.id(), - assoc_silo.is_default, - ) - } - params::IpPoolAssociationCreate::Fleet(assoc_fleet) => { - // we don't need to be assured of the fleet's existence - ( - db::model::IpPoolResourceType::Fleet, - *FLEET_ID, - assoc_fleet.is_default, - ) - } - }; + let (silo,) = self + .silo_lookup(&opctx, silo_link.silo.clone())? + .lookup_for(authz::Action::Read) + .await?; self.db_datastore .ip_pool_associate_resource( opctx, db::model::IpPoolResource { ip_pool_id: authz_pool.id(), - resource_type, - resource_id, - is_default, + resource_type: db::model::IpPoolResourceType::Silo, + resource_id: silo.id(), + is_default: silo_link.is_default, }, ) .await @@ -136,31 +116,20 @@ impl super::Nexus { &self, opctx: &OpContext, pool_lookup: &lookup::IpPool<'_>, - ip_pool_dissoc: ¶ms::IpPoolAssociationDeleteValidated, + silo_lookup: &lookup::Silo<'_>, ) -> DeleteResult { let (.., authz_pool) = pool_lookup.lookup_for(authz::Action::Modify).await?; - - let (resource_type, resource_id) = match ip_pool_dissoc { - params::IpPoolAssociationDeleteValidated::Silo(assoc) => { - let (.., silo) = self - .silo_lookup(opctx, assoc.silo.clone())? - .fetch() - .await?; - (IpPoolResourceType::Silo, silo.id()) - } - params::IpPoolAssociationDeleteValidated::Fleet => { - (IpPoolResourceType::Fleet, *FLEET_ID) - } - }; + let (.., authz_silo) = + silo_lookup.lookup_for(authz::Action::Modify).await?; self.db_datastore .ip_pool_dissociate_resource( opctx, &IpPoolResourceDelete { ip_pool_id: authz_pool.id(), - resource_id, - resource_type, + resource_id: authz_silo.id(), + resource_type: IpPoolResourceType::Silo, }, ) .await diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 11bdfcaf8b..25e4039d24 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -122,9 +122,9 @@ pub(crate) fn external_api() -> NexusApiDescription { // Operator-Accessible IP Pools API api.register(ip_pool_list)?; api.register(ip_pool_create)?; - api.register(ip_pool_association_list)?; - api.register(ip_pool_association_create)?; - api.register(ip_pool_association_delete)?; + api.register(ip_pool_silo_list)?; + api.register(ip_pool_silo_link)?; + api.register(ip_pool_silo_unlink)?; api.register(ip_pool_view)?; api.register(ip_pool_delete)?; api.register(ip_pool_update)?; @@ -1323,24 +1323,20 @@ async fn ip_pool_update( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -// TODO: associate just seems like the wrong word and I'd like to change it -// across the board. What I really mean is "make available to" or "make availale -// for use in" - -/// List IP pool resource associations +/// List an IP pool's linked silos #[endpoint { method = GET, - path = "/v1/system/ip-pools/{pool}/associations", + path = "/v1/system/ip-pools/{pool}/silos", tags = ["system/networking"], }] -async fn ip_pool_association_list( +async fn ip_pool_silo_list( rqctx: RequestContext>, path_params: Path, // paginating by resource_id because they're unique per pool. most robust // option would be to paginate by a composite key representing the (pool, // resource_type, resource) query_params: Query, -) -> Result>, HttpError> { +) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -1362,23 +1358,23 @@ async fn ip_pool_association_list( Ok(HttpResponseOk(ScanById::results_page( &query, assocs, - &|_, x: &views::IpPoolResource| x.resource_id, + &|_, x: &views::IpPoolSilo| x.silo_id, )?)) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/// Associate an IP Pool with a silo or the fleet +/// Make an IP pool available within a silo #[endpoint { method = POST, - path = "/v1/system/ip-pools/{pool}/associations", + path = "/v1/system/ip-pools/{pool}/silos", tags = ["system/networking"], }] -async fn ip_pool_association_create( +async fn ip_pool_silo_link( rqctx: RequestContext>, path_params: Path, - resource_assoc: TypedBody, -) -> Result, HttpError> { + resource_assoc: TypedBody, +) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -1397,13 +1393,13 @@ async fn ip_pool_association_create( /// Remove an IP pool's association with a silo or project #[endpoint { method = DELETE, - path = "/v1/system/ip-pools/{pool}/associations", + path = "/v1/system/ip-pools/{pool}/silos", tags = ["system/networking"], }] -async fn ip_pool_association_delete( +async fn ip_pool_silo_unlink( rqctx: RequestContext>, path_params: Path, - query_params: Query, + query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { @@ -1412,17 +1408,10 @@ async fn ip_pool_association_delete( let path = path_params.into_inner(); let query = query_params.into_inner(); - let validated_params = - params::IpPoolAssociationDeleteValidated::try_from(query) - .map_err(|e| HttpError::for_bad_request(None, e))?; - let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; + let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; nexus - .ip_pool_dissociate_resource( - &opctx, - &pool_lookup, - &validated_params, - ) + .ip_pool_dissociate_resource(&opctx, &pool_lookup, &silo_lookup) .await?; Ok(HttpResponseUpdatedNoContent()) }; diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index b575e1b378..ea2ccc1259 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -145,14 +145,14 @@ pub async fn create_ip_pool( .await; // make pool available for use anywhere in fleet - let _assoc: views::IpPoolResource = object_create( - client, - &format!("/v1/system/ip-pools/{pool_name}/associations"), - ¶ms::IpPoolAssociationCreate::Fleet(params::IpPoolAssociateFleet { - is_default: false, - }), - ) - .await; + // let _assoc: views::IpPoolSilo = object_create( + // client, + // &format!("/v1/system/ip-pools/{pool_name}/associations"), + // ¶ms::IpPoolAssociationCreate::Fleet(params::IpPoolAssociateFleet { + // is_default: false, + // }), + // ) + // .await; // TODO: associate with fleet as a non-default like before? let range = populate_ip_pool(client, pool_name, ip_range).await; diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index e52d04a591..ff40d56019 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -522,12 +522,12 @@ lazy_static! { description: Some(String::from("a new IP pool")), }, }; - pub static ref DEMO_IP_POOL_ASSOC_URL: String = format!("{}/associations", *DEMO_IP_POOL_URL); - pub static ref DEMO_IP_POOL_ASSOC_BODY: params::IpPoolAssociationCreate = - params::IpPoolAssociationCreate::Silo(params::IpPoolAssociateSilo { + pub static ref DEMO_IP_POOL_ASSOC_URL: String = format!("{}/silos", *DEMO_IP_POOL_URL); + pub static ref DEMO_IP_POOL_ASSOC_BODY: params::IpPoolSiloLink = + params::IpPoolSiloLink { silo: NameOrId::Id(DEFAULT_SILO.identity().id), is_default: false, - }); + }; pub static ref DEMO_IP_POOL_RANGE: IpRange = IpRange::V4(Ipv4Range::new( std::net::Ipv4Addr::new(10, 0, 0, 0), std::net::Ipv4Addr::new(10, 0, 0, 255), diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 346ee32839..5c0fda03da 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -3613,12 +3613,11 @@ async fn test_instance_ephemeral_ip_from_correct_pool( }, ) .await; - let params = - params::IpPoolAssociationCreate::Silo(params::IpPoolAssociateSilo { - silo: NameOrId::Id(DEFAULT_SILO.id()), - is_default: true, - }); - let assoc_url = format!("/v1/system/ip-pools/{pool_name}/associations"); + let params = params::IpPoolSiloLink { + silo: NameOrId::Id(DEFAULT_SILO.id()), + is_default: true, + }; + let assoc_url = format!("/v1/system/ip-pools/{pool_name}/silo"); let _ = NexusRequest::objects_post(client, &assoc_url, ¶ms) .authn_as(AuthnMode::PrivilegedUser) .execute() diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 9cf1155337..bd7fe6033b 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -26,13 +26,12 @@ use nexus_types::external_api::params::InstanceDiskAttachment; use nexus_types::external_api::params::InstanceNetworkInterfaceAttachment; use nexus_types::external_api::params::IpPoolCreate; use nexus_types::external_api::params::IpPoolUpdate; -use nexus_types::external_api::shared::IpPoolResourceType; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::shared::Ipv4Range; use nexus_types::external_api::shared::Ipv6Range; use nexus_types::external_api::views::IpPool; use nexus_types::external_api::views::IpPoolRange; -use nexus_types::external_api::views::IpPoolResource; +use nexus_types::external_api::views::IpPoolSilo; use nexus_types::external_api::views::Silo; use omicron_common::api::external::IdentityMetadataUpdateParams; use omicron_common::api::external::NameOrId; @@ -357,17 +356,16 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { // expect 404 on association if the specified silo doesn't exist let nonexistent_silo_id = Uuid::new_v4(); - let params = - params::IpPoolAssociationCreate::Silo(params::IpPoolAssociateSilo { - silo: NameOrId::Id(nonexistent_silo_id), - is_default: false, - }); + let params = params::IpPoolSiloLink { + silo: NameOrId::Id(nonexistent_silo_id), + is_default: false, + }; let error = NexusRequest::new( RequestBuilder::new( client, Method::POST, - "/v1/system/ip-pools/p0/associations", + "/v1/system/ip-pools/p0/silos", ) .body(Some(¶ms)) .expect_status(Some(StatusCode::NOT_FOUND)), @@ -385,15 +383,13 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { ); // associate by name with silo that exists - let params = - params::IpPoolAssociationCreate::Silo(params::IpPoolAssociateSilo { - // TODO: this is probably not the best silo ID to use - silo: NameOrId::Name(cptestctx.silo_name.clone()), - is_default: false, - }); - let _: IpPoolResource = - object_create(client, "/v1/system/ip-pools/p0/associations", ¶ms) - .await; + let params = params::IpPoolSiloLink { + // TODO: this is probably not the best silo ID to use + silo: NameOrId::Name(cptestctx.silo_name.clone()), + is_default: false, + }; + let _: IpPoolSilo = + object_create(client, "/v1/system/ip-pools/p0/silos", ¶ms).await; // get silo ID so we can test association by ID as well let silo_url = format!("/v1/system/silos/{}", cptestctx.silo_name); @@ -404,12 +400,8 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { let silo_id = silo.identity.id; let assocs_p0 = get_associations(client, "p0").await; - let silo_assoc = IpPoolResource { - ip_pool_id: p0.identity.id, - resource_type: IpPoolResourceType::Silo, - resource_id: silo_id, - is_default: false, - }; + let silo_assoc = + IpPoolSilo { ip_pool_id: p0.identity.id, silo_id, is_default: false }; assert_eq!(assocs_p0.items.len(), 1); assert_eq!(assocs_p0.items[0], silo_assoc); @@ -417,21 +409,19 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { // TODO: confirm dissociation // associate same silo to other pool by ID - let params = - params::IpPoolAssociationCreate::Silo(params::IpPoolAssociateSilo { - silo: NameOrId::Id(silo.identity.id), - is_default: false, - }); - let _: IpPoolResource = - object_create(client, "/v1/system/ip-pools/p1/associations", ¶ms) - .await; + let params = params::IpPoolSiloLink { + silo: NameOrId::Id(silo.identity.id), + is_default: false, + }; + let _: IpPoolSilo = + object_create(client, "/v1/system/ip-pools/p1/silo", ¶ms).await; // association should look the same as the other one, except different pool ID let assocs_p1 = get_associations(client, "p1").await; assert_eq!(assocs_p1.items.len(), 1); assert_eq!( assocs_p1.items[0], - IpPoolResource { ip_pool_id: p1.identity.id, ..silo_assoc } + IpPoolSilo { ip_pool_id: p1.identity.id, ..silo_assoc } ); // TODO: associating a resource that is already associated should be a noop @@ -486,10 +476,10 @@ fn get_names(pools: Vec) -> Vec { async fn get_associations( client: &ClientTestContext, id: &str, -) -> ResultsPage { - objects_list_page_authz::( +) -> ResultsPage { + objects_list_page_authz::( client, - &format!("/v1/system/ip-pools/{}/associations", id), + &format!("/v1/system/ip-pools/{}/silos", id), ) .await } @@ -866,16 +856,16 @@ async fn test_ip_pool_list_usable_by_project( // add to fleet since we can't add to project yet // TODO: could do silo, might as well? need the ID, though. at least // until I make it so you can specify the resource by name - let params = - params::IpPoolAssociationCreate::Fleet(params::IpPoolAssociateFleet { - is_default: false, - }); - let _: IpPoolResource = object_create( - client, - &format!("/v1/system/ip-pools/{mypool_name}/associations"), - ¶ms, - ) - .await; + // let params = + // params::IpPoolAssociationCreate::Fleet(params::IpPoolAssociateFleet { + // is_default: false, + // }); + // let _: IpPoolResource = object_create( + // client, + // &format!("/v1/system/ip-pools/{mypool_name}/associations"), + // ¶ms, + // ) + // .await; // Add an IP range to mypool let mypool_range = IpRange::V4( diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 1a58f30514..4e17b77713 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -132,9 +132,6 @@ system_metric GET /v1/system/metrics/{metric_nam API operations found with tag "system/networking" OPERATION ID METHOD URL PATH -ip_pool_association_create POST /v1/system/ip-pools/{pool}/associations -ip_pool_association_delete DELETE /v1/system/ip-pools/{pool}/associations -ip_pool_association_list GET /v1/system/ip-pools/{pool}/associations ip_pool_create POST /v1/system/ip-pools ip_pool_delete DELETE /v1/system/ip-pools/{pool} ip_pool_list GET /v1/system/ip-pools @@ -145,6 +142,9 @@ ip_pool_service_range_add POST /v1/system/ip-pools-service/ra ip_pool_service_range_list GET /v1/system/ip-pools-service/ranges ip_pool_service_range_remove POST /v1/system/ip-pools-service/ranges/remove ip_pool_service_view GET /v1/system/ip-pools-service +ip_pool_silo_link POST /v1/system/ip-pools/{pool}/silos +ip_pool_silo_list GET /v1/system/ip-pools/{pool}/silos +ip_pool_silo_unlink DELETE /v1/system/ip-pools/{pool}/silos ip_pool_update PUT /v1/system/ip-pools/{pool} ip_pool_view GET /v1/system/ip-pools/{pool} networking_address_lot_block_list GET /v1/system/networking/address-lot/{address_lot}/blocks diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index c04a8cd111..9c91cf3e82 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -758,66 +758,16 @@ pub struct IpPoolUpdate { } #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct IpPoolAssociateSilo { +pub struct IpPoolSiloLink { pub silo: NameOrId, pub is_default: bool, } #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct IpPoolAssociateFleet { - pub is_default: bool, -} - -/// Parameters for associating an IP pool with a resource (fleet, silo) -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -#[serde(tag = "resource_type", rename_all = "snake_case")] -pub enum IpPoolAssociationCreate { - Silo(IpPoolAssociateSilo), - Fleet(IpPoolAssociateFleet), -} - -// It would be cool if this was an enum like Silo(NameOrId) | Fleet, but -// OpenAPI doesn't support fancy schemas on query params. So instead we -// use this flat struct for the query params directly, and manually parse -// that into the good struct below. -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct IpPoolAssociationDelete { - pub silo: Option, - pub resource_type: shared::IpPoolResourceType, -} - -// technically these are not params, but they are used with params -#[derive(Clone, Debug)] -pub struct IpPoolSiloAssociationDelete { +pub struct IpPoolSiloUnlink { pub silo: NameOrId, } -#[derive(Clone, Debug)] -pub enum IpPoolAssociationDeleteValidated { - Silo(IpPoolSiloAssociationDelete), - Fleet, -} - -impl TryFrom for IpPoolAssociationDeleteValidated { - type Error = String; - - fn try_from(value: IpPoolAssociationDelete) -> Result { - match (value.silo, value.resource_type) { - (Some(silo), shared::IpPoolResourceType::Silo) => { - Ok(Self::Silo(IpPoolSiloAssociationDelete { silo })) - } - (None, shared::IpPoolResourceType::Fleet) => Ok(Self::Fleet), - (Some(_), shared::IpPoolResourceType::Fleet) => { - Err("Silo must be null if resource_type is fleet".to_string()) - } - (None, shared::IpPoolResourceType::Silo) => { - Err("Silo must be specified if resource_type is silo" - .to_string()) - } - } - } -} - // INSTANCES /// Describes an attachment of an `InstanceNetworkInterface` to an `Instance`, diff --git a/nexus/types/src/external_api/shared.rs b/nexus/types/src/external_api/shared.rs index c7a8cda4ab..a4c5ae1e62 100644 --- a/nexus/types/src/external_api/shared.rs +++ b/nexus/types/src/external_api/shared.rs @@ -245,13 +245,6 @@ pub enum UpdateableComponentType { HostOmicron, } -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] -#[serde(rename_all = "snake_case")] -pub enum IpPoolResourceType { - Fleet, - Silo, -} - /// Properties that uniquely identify an Oxide hardware component #[derive( Clone, diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index 29df547e2c..8dca604e7b 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -248,12 +248,9 @@ pub struct IpPool { } #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] -pub struct IpPoolResource { - // TODO: is including the pool ID redundant? it's convenient to have and - // makes this response a cohesive whole +pub struct IpPoolSilo { pub ip_pool_id: Uuid, - pub resource_type: shared::IpPoolResourceType, - pub resource_id: Uuid, + pub silo_id: Uuid, pub is_default: bool, } diff --git a/openapi/nexus.json b/openapi/nexus.json index 3f0611abad..71e9533483 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -4685,13 +4685,14 @@ } } }, - "/v1/system/ip-pools/{pool}/associations": { + "/v1/system/ip-pools/{pool}/ranges": { "get": { "tags": [ "system/networking" ], - "summary": "List IP pool resource associations", - "operationId": "ip_pool_association_list", + "summary": "List ranges for an IP pool", + "description": "List ranges for an IP pool. Ranges are ordered by their first address.", + "operationId": "ip_pool_range_list", "parameters": [ { "in": "path", @@ -4721,13 +4722,6 @@ "nullable": true, "type": "string" } - }, - { - "in": "query", - "name": "sort_by", - "schema": { - "$ref": "#/components/schemas/IdSortMode" - } } ], "responses": { @@ -4736,7 +4730,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/IpPoolResourceResultsPage" + "$ref": "#/components/schemas/IpPoolRangeResultsPage" } } } @@ -4751,13 +4745,15 @@ "x-dropshot-pagination": { "required": [] } - }, + } + }, + "/v1/system/ip-pools/{pool}/ranges/add": { "post": { "tags": [ "system/networking" ], - "summary": "Associate an IP Pool with a silo or the fleet", - "operationId": "ip_pool_association_create", + "summary": "Add a range to an IP pool", + "operationId": "ip_pool_range_add", "parameters": [ { "in": "path", @@ -4773,7 +4769,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/IpPoolAssociationCreate" + "$ref": "#/components/schemas/IpRange" } } }, @@ -4785,7 +4781,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/IpPoolResource" + "$ref": "#/components/schemas/IpPoolRange" } } } @@ -4797,13 +4793,15 @@ "$ref": "#/components/responses/Error" } } - }, - "delete": { + } + }, + "/v1/system/ip-pools/{pool}/ranges/remove": { + "post": { "tags": [ "system/networking" ], - "summary": "Remove an IP pool's association with a silo or project", - "operationId": "ip_pool_association_delete", + "summary": "Remove a range from an IP pool", + "operationId": "ip_pool_range_remove", "parameters": [ { "in": "path", @@ -4813,23 +4811,18 @@ "schema": { "$ref": "#/components/schemas/NameOrId" } - }, - { - "in": "query", - "name": "resource_type", - "required": true, - "schema": { - "$ref": "#/components/schemas/IpPoolResourceType" - } - }, - { - "in": "query", - "name": "silo", - "schema": { - "$ref": "#/components/schemas/NameOrId" - } } ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpRange" + } + } + }, + "required": true + }, "responses": { "204": { "description": "resource updated" @@ -4843,14 +4836,13 @@ } } }, - "/v1/system/ip-pools/{pool}/ranges": { + "/v1/system/ip-pools/{pool}/silos": { "get": { "tags": [ "system/networking" ], - "summary": "List ranges for an IP pool", - "description": "List ranges for an IP pool. Ranges are ordered by their first address.", - "operationId": "ip_pool_range_list", + "summary": "List an IP pool's linked silos", + "operationId": "ip_pool_silo_list", "parameters": [ { "in": "path", @@ -4880,6 +4872,13 @@ "nullable": true, "type": "string" } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } } ], "responses": { @@ -4888,7 +4887,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/IpPoolRangeResultsPage" + "$ref": "#/components/schemas/IpPoolSiloResultsPage" } } } @@ -4903,15 +4902,13 @@ "x-dropshot-pagination": { "required": [] } - } - }, - "/v1/system/ip-pools/{pool}/ranges/add": { + }, "post": { "tags": [ "system/networking" ], - "summary": "Add a range to an IP pool", - "operationId": "ip_pool_range_add", + "summary": "Make an IP pool available within a silo", + "operationId": "ip_pool_silo_link", "parameters": [ { "in": "path", @@ -4927,7 +4924,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/IpRange" + "$ref": "#/components/schemas/IpPoolSiloLink" } } }, @@ -4939,7 +4936,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/IpPoolRange" + "$ref": "#/components/schemas/IpPoolSilo" } } } @@ -4951,15 +4948,13 @@ "$ref": "#/components/responses/Error" } } - } - }, - "/v1/system/ip-pools/{pool}/ranges/remove": { - "post": { + }, + "delete": { "tags": [ "system/networking" ], - "summary": "Remove a range from an IP pool", - "operationId": "ip_pool_range_remove", + "summary": "Remove an IP pool's association with a silo or project", + "operationId": "ip_pool_silo_unlink", "parameters": [ { "in": "path", @@ -4969,18 +4964,16 @@ "schema": { "$ref": "#/components/schemas/NameOrId" } + }, + { + "in": "query", + "name": "silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/IpRange" - } - } - }, - "required": true - }, "responses": { "204": { "description": "resource updated" @@ -11902,51 +11895,6 @@ "time_modified" ] }, - "IpPoolAssociationCreate": { - "description": "Parameters for associating an IP pool with a resource (fleet, silo)", - "oneOf": [ - { - "type": "object", - "properties": { - "is_default": { - "type": "boolean" - }, - "resource_type": { - "type": "string", - "enum": [ - "silo" - ] - }, - "silo": { - "$ref": "#/components/schemas/NameOrId" - } - }, - "required": [ - "is_default", - "resource_type", - "silo" - ] - }, - { - "type": "object", - "properties": { - "is_default": { - "type": "boolean" - }, - "resource_type": { - "type": "string", - "enum": [ - "fleet" - ] - } - }, - "required": [ - "is_default", - "resource_type" - ] - } - ] - }, "IpPoolCreate": { "description": "Create-time parameters for an `IpPool`", "type": "object", @@ -12010,7 +11958,28 @@ "items" ] }, - "IpPoolResource": { + "IpPoolResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/IpPool" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "IpPoolSilo": { "type": "object", "properties": { "ip_pool_id": { @@ -12020,50 +11989,33 @@ "is_default": { "type": "boolean" }, - "resource_id": { + "silo_id": { "type": "string", "format": "uuid" - }, - "resource_type": { - "$ref": "#/components/schemas/IpPoolResourceType" } }, "required": [ "ip_pool_id", "is_default", - "resource_id", - "resource_type" + "silo_id" ] }, - "IpPoolResourceResultsPage": { - "description": "A single page of results", + "IpPoolSiloLink": { "type": "object", "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/IpPoolResource" - } + "is_default": { + "type": "boolean" }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" + "silo": { + "$ref": "#/components/schemas/NameOrId" } }, "required": [ - "items" - ] - }, - "IpPoolResourceType": { - "type": "string", - "enum": [ - "fleet", + "is_default", "silo" ] }, - "IpPoolResultsPage": { + "IpPoolSiloResultsPage": { "description": "A single page of results", "type": "object", "properties": { @@ -12071,7 +12023,7 @@ "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/IpPool" + "$ref": "#/components/schemas/IpPoolSilo" } }, "next_page": { diff --git a/schema/crdb/19.0.0/up1.sql b/schema/crdb/19.0.0/up1.sql index 5ccd8f51b9..28204a4d3b 100644 --- a/schema/crdb/19.0.0/up1.sql +++ b/schema/crdb/19.0.0/up1.sql @@ -1,4 +1,3 @@ CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_resource_type AS ENUM ( - 'silo', - 'fleet' + 'silo' ); diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 79e43aaf93..45abda3665 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1543,8 +1543,7 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_pool_by_name ON omicron.public.ip_pool -- silo default and a fleet default. If we were to add a project type, it should -- be added before silo. CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_resource_type AS ENUM ( - 'silo', - 'fleet' + 'silo' ); -- join table associating IP pools with resources like fleet or silo From 9da8783021ea888bdfbb6fbdc32efec7f7a1a054 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 5 Dec 2023 16:03:21 -0600 Subject: [PATCH 35/67] integration_tests::ip_pools all pass --- nexus/db-queries/src/db/datastore/ip_pool.rs | 7 +- nexus/test-utils/src/resource_helpers.rs | 9 +- nexus/tests/integration_tests/instances.rs | 2 +- nexus/tests/integration_tests/ip_pools.rs | 127 ++++++++----------- 4 files changed, 65 insertions(+), 80 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 6a54d72b12..ca87722668 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -137,10 +137,9 @@ impl DataStore { ip_pool::table .inner_join(ip_pool_resource::table) .filter( - ip_pool_resource::resource_type - .eq(IpPoolResourceType::Silo) - .and(ip_pool_resource::resource_id.eq(authz_silo_id)), + ip_pool_resource::resource_type.eq(IpPoolResourceType::Silo), ) + .filter(ip_pool_resource::resource_id.eq(authz_silo_id)) .filter(ip_pool_resource::is_default.eq(true)) .filter(ip_pool::time_deleted.is_null()) // Order by most specific first so we get the most specific. @@ -443,7 +442,7 @@ impl DataStore { .load_async::( &*self.pool_connection_authorized(opctx).await?, ) - .await + .await .map_err(|e| { Error::internal_error(&format!( "error checking for outstanding IPs before deleting IP pool association to resource: {:?}", diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index ea2ccc1259..f861aaaa9a 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -32,6 +32,8 @@ use omicron_common::api::external::Disk; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Instance; use omicron_common::api::external::InstanceCpuCount; +use omicron_common::api::external::Name; +use omicron_common::api::external::NameOrId; use omicron_sled_agent::sim::SledAgent; use std::sync::Arc; use uuid::Uuid; @@ -144,17 +146,16 @@ pub async fn create_ip_pool( ) .await; - // make pool available for use anywhere in fleet // let _assoc: views::IpPoolSilo = object_create( // client, // &format!("/v1/system/ip-pools/{pool_name}/associations"), - // ¶ms::IpPoolAssociationCreate::Fleet(params::IpPoolAssociateFleet { + // ¶ms::IpPoolSiloLink { + // silo: NameOrId::Id(DEFAULT_SILO.id()), // is_default: false, - // }), + // }, // ) // .await; - // TODO: associate with fleet as a non-default like before? let range = populate_ip_pool(client, pool_name, ip_range).await; (pool, range) } diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 5c0fda03da..bfc0285c42 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -3617,7 +3617,7 @@ async fn test_instance_ephemeral_ip_from_correct_pool( silo: NameOrId::Id(DEFAULT_SILO.id()), is_default: true, }; - let assoc_url = format!("/v1/system/ip-pools/{pool_name}/silo"); + let assoc_url = format!("/v1/system/ip-pools/{pool_name}/silos"); let _ = NexusRequest::objects_post(client, &assoc_url, ¶ms) .authn_as(AuthnMode::PrivilegedUser) .execute() diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index bd7fe6033b..515184dbb9 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -4,12 +4,15 @@ //! Integration tests for operating on IP Pools +use std::collections::HashSet; + use dropshot::test_util::ClientTestContext; use dropshot::HttpErrorResponseBody; use dropshot::ResultsPage; use http::method::Method; use http::StatusCode; use nexus_db_queries::db::datastore::SERVICE_IP_POOL_NAME; +use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; @@ -25,6 +28,7 @@ use nexus_types::external_api::params::ExternalIpCreate; use nexus_types::external_api::params::InstanceDiskAttachment; use nexus_types::external_api::params::InstanceNetworkInterfaceAttachment; use nexus_types::external_api::params::IpPoolCreate; +use nexus_types::external_api::params::IpPoolSiloLink; use nexus_types::external_api::params::IpPoolUpdate; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::shared::Ipv4Range; @@ -33,12 +37,12 @@ use nexus_types::external_api::views::IpPool; use nexus_types::external_api::views::IpPoolRange; use nexus_types::external_api::views::IpPoolSilo; use nexus_types::external_api::views::Silo; +use nexus_types::identity::Resource; use omicron_common::api::external::IdentityMetadataUpdateParams; use omicron_common::api::external::NameOrId; use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; use omicron_nexus::TestInterfaces; use sled_agent_client::TestInterfaces as SledTestInterfaces; -use std::collections::HashSet; use uuid::Uuid; type ControlPlaneTestContext = @@ -65,10 +69,7 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { .await .expect("Failed to list IP Pools") .all_items; - assert_eq!(ip_pools.len(), 1, "Expected to see default IP pool"); - assert_eq!(ip_pools[0].identity.name, "default"); - // assert_eq!(ip_pools[0].silo_id, None); - // assert!(ip_pools[0].is_default); + assert_eq!(ip_pools.len(), 0, "Expected empty list of IP pools"); // Verify 404 if the pool doesn't exist yet, both for creating or deleting let error: HttpErrorResponseBody = NexusRequest::expect_failure( @@ -135,8 +136,8 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { .await .expect("Failed to list IP Pools") .all_items; - assert_eq!(list.len(), 2, "Expected exactly two IP pools"); - assert_pools_eq(&created_pool, &list[1]); + assert_eq!(list.len(), 1, "Expected exactly 1 IP pool"); + assert_pools_eq(&created_pool, &list[0]); let fetched_pool: IpPool = NexusRequest::object_get(client, &ip_pool_url) .authn_as(AuthnMode::PrivilegedUser) @@ -304,11 +305,10 @@ async fn test_ip_pool_service_no_cud(cptestctx: &ControlPlaneTestContext) { .execute_and_parse_unwrap::() .await; - // but it does not come back in the list. there's one in there and it's the default + // but it does not come back in the list. there are none in the list let pools = objects_list_page_authz::(client, "/v1/system/ip-pools").await; - assert_eq!(pools.items.len(), 1); - assert_ne!(pools.items[0].identity.id, pool.identity.id); + assert_eq!(pools.items.len(), 0); // deletes fail @@ -384,7 +384,6 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { // associate by name with silo that exists let params = params::IpPoolSiloLink { - // TODO: this is probably not the best silo ID to use silo: NameOrId::Name(cptestctx.silo_name.clone()), is_default: false, }; @@ -414,7 +413,7 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { is_default: false, }; let _: IpPoolSilo = - object_create(client, "/v1/system/ip-pools/p1/silo", ¶ms).await; + object_create(client, "/v1/system/ip-pools/p1/silos", ¶ms).await; // association should look the same as the other one, except different pool ID let assocs_p1 = get_associations(client, "p1").await; @@ -438,11 +437,10 @@ async fn test_ip_pool_pagination(cptestctx: &ControlPlaneTestContext) { let base_url = "/v1/system/ip-pools"; let first_page = objects_list_page_authz::(client, &base_url).await; - // we start out with one pool, and it's the default pool - assert_eq!(first_page.items.len(), 1); - assert_eq!(first_page.items[0].identity.name, "default"); + // we start out with no pools + assert_eq!(first_page.items.len(), 0); - let mut pool_names = vec!["default".to_string()]; + let mut pool_names = vec![]; // create more pools to work with, adding their names to the list so we // can use it to check order @@ -465,7 +463,7 @@ async fn test_ip_pool_pagination(cptestctx: &ControlPlaneTestContext) { ); let next_page = objects_list_page_authz::(client, &next_page_url).await; - assert_eq!(get_names(next_page.items), &pool_names[5..9]); + assert_eq!(get_names(next_page.items), &pool_names[5..8]); } /// helper to make tests less ugly @@ -799,41 +797,12 @@ async fn test_ip_pool_list_usable_by_project( let scoped_ip_pools_url = "/v1/ip-pools"; let ip_pools_url = "/v1/system/ip-pools"; let mypool_name = "mypool"; - let default_ip_pool_add_range_url = - format!("{}/default/ranges/add", ip_pools_url); + let mypool_silos_url = format!("{}/{}/silos", ip_pools_url, mypool_name); let mypool_ip_pool_add_range_url = format!("{}/{}/ranges/add", ip_pools_url, mypool_name); let service_ip_pool_add_range_url = "/v1/system/ip-pools-service/ranges/add".to_string(); - // Add an IP range to the default pool - let default_range = IpRange::V4( - Ipv4Range::new( - std::net::Ipv4Addr::new(10, 0, 0, 1), - std::net::Ipv4Addr::new(10, 0, 0, 2), - ) - .unwrap(), - ); - let created_range: IpPoolRange = NexusRequest::objects_post( - client, - &default_ip_pool_add_range_url, - &default_range, - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); - assert_eq!( - default_range.first_address(), - created_range.range.first_address() - ); - assert_eq!( - default_range.last_address(), - created_range.range.last_address() - ); - // Create an org and project, and then try to make an instance with an IP from // each range to which the project is expected have access. @@ -841,7 +810,6 @@ async fn test_ip_pool_list_usable_by_project( const INSTANCE_NAME: &str = "myinst"; create_project(client, PROJECT_NAME).await; - // TODO: give this project explicit access when such functionality exists let params = IpPoolCreate { identity: IdentityMetadataCreateParams { name: String::from(mypool_name).parse().unwrap(), @@ -853,19 +821,15 @@ async fn test_ip_pool_list_usable_by_project( .execute_and_parse_unwrap::() .await; - // add to fleet since we can't add to project yet - // TODO: could do silo, might as well? need the ID, though. at least - // until I make it so you can specify the resource by name - // let params = - // params::IpPoolAssociationCreate::Fleet(params::IpPoolAssociateFleet { - // is_default: false, - // }); - // let _: IpPoolResource = object_create( - // client, - // &format!("/v1/system/ip-pools/{mypool_name}/associations"), - // ¶ms, - // ) - // .await; + // associate pool with default silo, which is the privileged user's silo + let params = IpPoolSiloLink { + silo: NameOrId::Id(DEFAULT_SILO.id()), + is_default: true, + }; + NexusRequest::objects_post(client, &mypool_silos_url, ¶ms) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await; // Add an IP range to mypool let mypool_range = IpRange::V4( @@ -921,8 +885,7 @@ async fn test_ip_pool_list_usable_by_project( created_range.range.last_address() ); - // TODO: add non-service, ip pools that the project *can't* use, when that - // functionality is implemented in the future (i.e. a "notmypool") + // TODO: add non-service ip pools that the project *can't* use let list_url = format!("{}?project={}", scoped_ip_pools_url, PROJECT_NAME); let list = NexusRequest::iter_collection_authn::( @@ -932,13 +895,12 @@ async fn test_ip_pool_list_usable_by_project( .expect("Failed to list IP Pools") .all_items; - // default and mypool - assert_eq!(list.len(), 2); + assert_eq!(list.len(), 1); + assert_eq!(list[0].identity.name.to_string(), mypool_name); + + // currently there's only one in the list, so this is overkill. But there might be more let pool_names: HashSet = list.iter().map(|pool| pool.identity.name.to_string()).collect(); - let expected_names: HashSet = - ["default", "mypool"].into_iter().map(|s| s.to_string()).collect(); - assert_eq!(pool_names, expected_names); // ensure we can view each pool returned for pool_name in &pool_names { @@ -957,7 +919,7 @@ async fn test_ip_pool_list_usable_by_project( // should be able to access for pool_name in pool_names { let instance_name = format!("{}-{}", INSTANCE_NAME, pool_name); - let pool_name = Some(Name::try_from(pool_name).unwrap()); + let pool_name = Some(Name::try_from(pool_name.to_string()).unwrap()); create_instance_with( client, PROJECT_NAME, @@ -978,13 +940,36 @@ async fn test_ip_range_delete_with_allocated_external_ip_fails( let apictx = &cptestctx.server.apictx(); let nexus = &apictx.nexus; let ip_pools_url = "/v1/system/ip-pools"; - let pool_name = "default"; + let pool_name = "mypool"; let ip_pool_url = format!("{}/{}", ip_pools_url, pool_name); + let ip_pool_silos_url = format!("{}/{}/silos", ip_pools_url, pool_name); let ip_pool_ranges_url = format!("{}/ranges", ip_pool_url); let ip_pool_add_range_url = format!("{}/add", ip_pool_ranges_url); let ip_pool_rem_range_url = format!("{}/remove", ip_pool_ranges_url); - // Add an IP range to the default pool + // create pool + let params = IpPoolCreate { + identity: IdentityMetadataCreateParams { + name: String::from(pool_name).parse().unwrap(), + description: String::from("right on cue"), + }, + }; + NexusRequest::objects_post(client, ip_pools_url, ¶ms) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await; + + // associate pool with default silo, which is the privileged user's silo + let params = IpPoolSiloLink { + silo: NameOrId::Id(DEFAULT_SILO.id()), + is_default: true, + }; + NexusRequest::objects_post(client, &ip_pool_silos_url, ¶ms) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await; + + // Add an IP range to the pool let range = IpRange::V4( Ipv4Range::new( std::net::Ipv4Addr::new(10, 0, 0, 1), From a678c83c4449e53d927a2f33138ab6afac1f7801 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 5 Dec 2023 21:44:00 -0600 Subject: [PATCH 36/67] make instance integration tests pass --- nexus/src/app/sagas/disk_create.rs | 2 +- nexus/src/app/sagas/disk_delete.rs | 2 +- nexus/src/app/sagas/snapshot_create.rs | 2 +- nexus/test-utils/src/resource_helpers.rs | 23 ++- nexus/tests/integration_tests/instances.rs | 228 ++++++++++----------- 5 files changed, 128 insertions(+), 129 deletions(-) diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index fe403a7d41..d8213eaea2 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -856,7 +856,7 @@ pub(crate) mod test { const PROJECT_NAME: &str = "springfield-squidport"; async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None).await; + create_ip_pool(&client, "p0", None, None).await; let project = create_project(client, PROJECT_NAME).await; project.identity.id } diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index f2d80d64f5..f46847b808 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -189,7 +189,7 @@ pub(crate) mod test { const PROJECT_NAME: &str = "springfield-squidport"; async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None).await; + create_ip_pool(&client, "p0", None, None).await; let project = create_project(client, PROJECT_NAME).await; project.identity.id } diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 3b4dfc0043..c3fe6fc327 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -1786,7 +1786,7 @@ mod test { const INSTANCE_NAME: &str = "base-instance"; async fn create_org_project_and_disk(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None).await; + create_ip_pool(&client, "p0", None, None).await; create_project(client, PROJECT_NAME).await; create_disk(client, PROJECT_NAME, DISK_NAME).await.identity.id } diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index f861aaaa9a..20450a51a8 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -32,8 +32,8 @@ use omicron_common::api::external::Disk; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Instance; use omicron_common::api::external::InstanceCpuCount; -use omicron_common::api::external::Name; -use omicron_common::api::external::NameOrId; +// use omicron_common::api::external::Name; +// use omicron_common::api::external::NameOrId; use omicron_sled_agent::sim::SledAgent; use std::sync::Arc; use uuid::Uuid; @@ -133,6 +133,9 @@ pub async fn create_ip_pool( client: &ClientTestContext, pool_name: &str, ip_range: Option, + // TODO: could change this to is_default -- always associate with default + // silo, let caller decide whether it's the default + silo_link: Option, ) -> (IpPool, IpPoolRange) { let pool = object_create( client, @@ -146,15 +149,13 @@ pub async fn create_ip_pool( ) .await; - // let _assoc: views::IpPoolSilo = object_create( - // client, - // &format!("/v1/system/ip-pools/{pool_name}/associations"), - // ¶ms::IpPoolSiloLink { - // silo: NameOrId::Id(DEFAULT_SILO.id()), - // is_default: false, - // }, - // ) - // .await; + if let Some(silo_link) = silo_link { + let url = format!("/v1/system/ip-pools/{pool_name}/silos"); + object_create::( + client, &url, &silo_link, + ) + .await; + } let range = populate_ip_pool(client, pool_name, ip_range).await; (pool, range) diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index bfc0285c42..85b6f16317 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -99,10 +99,18 @@ fn default_vpc_subnets_url() -> String { format!("/v1/vpc-subnets?{}&vpc=default", get_project_selector()) } -async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - populate_ip_pool(&client, "default", None).await; - let project = create_project(client, PROJECT_NAME).await; - project.identity.id +async fn create_default_ip_pool(client: &ClientTestContext) -> views::IpPool { + let link = params::IpPoolSiloLink { + silo: NameOrId::Id(DEFAULT_SILO.id()), + is_default: true, + }; + let (pool, ..) = create_ip_pool(&client, "default", None, Some(link)).await; + pool +} + +async fn create_project_and_pool(client: &ClientTestContext) -> views::Project { + create_default_ip_pool(client).await; + create_project(client, PROJECT_NAME).await } #[nexus_test] @@ -160,8 +168,7 @@ async fn test_instances_access_before_create_returns_not_found( async fn test_instance_access(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; - let project = create_project(client, PROJECT_NAME).await; + let project = create_project_and_pool(client).await; // Create an instance. let instance_name = "test-instance"; @@ -209,7 +216,7 @@ async fn test_instances_create_reboot_halt( let nexus = &apictx.nexus; let instance_name = "just-rainsticks"; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Create an instance. let instance_url = get_instance_url(instance_name); @@ -535,7 +542,7 @@ async fn test_instance_start_creates_networking_state( ); } - create_org_and_project(&client).await; + create_project_and_pool(&client).await; let instance_url = get_instance_url(instance_name); let instance = create_instance(client, PROJECT_NAME, instance_name).await; @@ -636,7 +643,7 @@ async fn test_instance_migrate(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); - create_org_and_project(&client).await; + create_project_and_pool(&client).await; let instance_url = get_instance_url(instance_name); // Explicitly create an instance with no disks. Simulated sled agent assumes @@ -740,8 +747,8 @@ async fn test_instance_migrate_v2p(cptestctx: &ControlPlaneTestContext) { } // Set up the project and test instance. - populate_ip_pool(&client, "default", None).await; - create_project(client, PROJECT_NAME).await; + create_project_and_pool(client).await; + let instance = nexus_test_utils::resource_helpers::create_instance_with( client, PROJECT_NAME, @@ -927,8 +934,8 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { let datastore = nexus.datastore(); // Create an IP pool and project that we'll use for testing. - populate_ip_pool(&client, "default", None).await; - let project_id = create_project(&client, PROJECT_NAME).await.identity.id; + let project = create_project_and_pool(&client).await; + let project_id = project.identity.id; // Query the view of these metrics stored within CRDB let opctx = @@ -1018,7 +1025,8 @@ async fn test_instance_metrics_with_migration( .await .unwrap(); - let project_id = create_org_and_project(&client).await; + let project = create_project_and_pool(&client).await; + let project_id = project.identity.id; let instance_url = get_instance_url(instance_name); // Explicitly create an instance with no disks. Simulated sled agent assumes @@ -1127,7 +1135,7 @@ async fn test_instances_create_stopped_start( let nexus = &apictx.nexus; let instance_name = "just-rainsticks"; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Create an instance in a stopped state. let instance: Instance = object_create( @@ -1177,7 +1185,7 @@ async fn test_instances_delete_fails_when_running_succeeds_when_stopped( let nexus = &apictx.nexus; let instance_name = "just-rainsticks"; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Create an instance. let instance_url = get_instance_url(instance_name); @@ -1273,7 +1281,7 @@ async fn test_instance_using_image_from_other_project_fails( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; let server = ServerBuilder::new().run().unwrap(); server.expect( @@ -1369,7 +1377,7 @@ async fn test_instance_create_saga_removes_instance_database_record( let client = &cptestctx.external_client; // Create test IP pool, organization and project - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // The network interface parameters. let default_name = "default".parse::().unwrap(); @@ -1484,7 +1492,7 @@ async fn test_instance_with_single_explicit_ip_address( ) { let client = &cptestctx.external_client; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Create the parameters for the interface. let default_name = "default".parse::().unwrap(); @@ -1558,7 +1566,7 @@ async fn test_instance_with_new_custom_network_interfaces( ) { let client = &cptestctx.external_client; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Create a VPC Subnet other than the default. // // We'll create one interface in the default VPC Subnet and one in this new @@ -1708,7 +1716,7 @@ async fn test_instance_create_delete_network_interface( let nexus = &cptestctx.server.apictx().nexus; let instance_name = "nic-attach-test-inst"; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Create the VPC Subnet for the secondary interface let secondary_subnet = params::VpcSubnetCreate { @@ -1948,7 +1956,7 @@ async fn test_instance_update_network_interfaces( let nexus = &cptestctx.server.apictx().nexus; let instance_name = "nic-update-test-inst"; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Create the VPC Subnet for the secondary interface let secondary_subnet = params::VpcSubnetCreate { @@ -2412,7 +2420,7 @@ async fn test_attach_one_disk_to_instance(cptestctx: &ControlPlaneTestContext) { // Test pre-reqs DiskTest::new(&cptestctx).await; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Create the "probablydata" disk create_disk(&client, PROJECT_NAME, "probablydata").await; @@ -2482,7 +2490,7 @@ async fn test_instance_create_attach_disks( // Test pre-reqs DiskTest::new(&cptestctx).await; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; let attachable_disk = create_disk(&client, PROJECT_NAME, "attachable-disk").await; @@ -2556,7 +2564,7 @@ async fn test_instance_create_attach_disks_undo( // Test pre-reqs DiskTest::new(&cptestctx).await; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; let regular_disk = create_disk(&client, PROJECT_NAME, "a-reg-disk").await; let faulted_disk = create_disk(&client, PROJECT_NAME, "faulted-disk").await; @@ -2649,7 +2657,7 @@ async fn test_attach_eight_disks_to_instance( // Test pre-reqs DiskTest::new(&cptestctx).await; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Make 8 disks for i in 0..8 { @@ -2802,7 +2810,7 @@ async fn test_cannot_attach_faulted_disks(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; // Test pre-reqs DiskTest::new(&cptestctx).await; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Make 8 disks for i in 0..8 { @@ -2902,7 +2910,7 @@ async fn test_disks_detached_when_instance_destroyed( // Test pre-reqs DiskTest::new(&cptestctx).await; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; // Make 8 disks for i in 0..8 { @@ -3068,7 +3076,7 @@ async fn test_instances_memory_rejected_less_than_min_memory_size( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Attempt to create the instance, observe a server error. let instance_name = "just-rainsticks"; @@ -3117,7 +3125,7 @@ async fn test_instances_memory_not_divisible_by_min_memory_size( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Attempt to create the instance, observe a server error. let instance_name = "just-rainsticks"; @@ -3165,7 +3173,7 @@ async fn test_instances_memory_greater_than_max_size( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Attempt to create the instance, observe a server error. let instance_name = "just-rainsticks"; @@ -3259,8 +3267,7 @@ async fn test_cannot_provision_instance_beyond_cpu_capacity( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_project(client, PROJECT_NAME).await; - populate_ip_pool(&client, "default", None).await; + create_project_and_pool(client).await; // The third item in each tuple specifies whether instance start should // succeed or fail if all these configs are visited in order and started in @@ -3328,8 +3335,7 @@ async fn test_cannot_provision_instance_beyond_cpu_limit( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_project(client, PROJECT_NAME).await; - populate_ip_pool(&client, "default", None).await; + create_project_and_pool(client).await; let too_many_cpus = InstanceCpuCount::try_from(i64::from(MAX_VCPU_PER_INSTANCE + 1)) @@ -3370,8 +3376,7 @@ async fn test_cannot_provision_instance_beyond_ram_capacity( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_project(client, PROJECT_NAME).await; - populate_ip_pool(&client, "default", None).await; + create_project_and_pool(client).await; let configs = vec![ ( @@ -3445,7 +3450,7 @@ async fn test_instance_serial(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); - create_org_and_project(&client).await; + create_project_and_pool(&client).await; let instance_url = get_instance_url(instance_name); // Make sure we get a 404 if we try to access the serial console before creation. @@ -3562,97 +3567,56 @@ async fn test_instance_ephemeral_ip_from_correct_pool( // // The first is given to the "default" pool, the provided to a distinct // explicit pool. - let default_pool_range = IpRange::V4( + let range1 = IpRange::V4( Ipv4Range::new( std::net::Ipv4Addr::new(10, 0, 0, 1), std::net::Ipv4Addr::new(10, 0, 0, 5), ) .unwrap(), ); - let other_pool_range = IpRange::V4( + let range2 = IpRange::V4( Ipv4Range::new( std::net::Ipv4Addr::new(10, 1, 0, 1), std::net::Ipv4Addr::new(10, 1, 0, 5), ) .unwrap(), ); - populate_ip_pool(&client, "default", Some(default_pool_range)).await; - create_ip_pool(&client, "other-pool", Some(other_pool_range)).await; + + // make first pool the default for the priv user's silo + let link = params::IpPoolSiloLink { + silo: NameOrId::Id(DEFAULT_SILO.id()), + is_default: true, + }; + create_ip_pool(&client, "default", Some(range1), Some(link)).await; + + // second pool is associated with the silo but not default + let link = params::IpPoolSiloLink { + silo: NameOrId::Id(DEFAULT_SILO.id()), + is_default: false, + }; + create_ip_pool(&client, "other-pool", Some(range2), Some(link)).await; // Create an instance with pool name blank, expect IP from default pool create_instance_with_pool(client, "default-pool-inst", None).await; let ip = fetch_instance_ephemeral_ip(client, "default-pool-inst").await; assert!( - ip.ip >= default_pool_range.first_address() - && ip.ip <= default_pool_range.last_address(), + ip.ip >= range1.first_address() && ip.ip <= range1.last_address(), "Expected ephemeral IP to come from default pool" ); - // Create an instance explicitly using the "other-pool". + // Create an instance explicitly using the non-default "other-pool". create_instance_with_pool(client, "other-pool-inst", Some("other-pool")) .await; let ip = fetch_instance_ephemeral_ip(client, "other-pool-inst").await; assert!( - ip.ip >= other_pool_range.first_address() - && ip.ip <= other_pool_range.last_address(), + ip.ip >= range2.first_address() && ip.ip <= range2.last_address(), "Expected ephemeral IP to come from other pool" ); - - // now create a third pool, a silo default, to confirm it gets used. not - // using create_ip_pool because we need to specify a silo and default: true - let pool_name = "silo-pool"; - let _silo_pool: views::IpPool = object_create( - client, - "/v1/system/ip-pools", - ¶ms::IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: pool_name.parse().unwrap(), - description: String::from("an ip pool"), - }, - }, - ) - .await; - let params = params::IpPoolSiloLink { - silo: NameOrId::Id(DEFAULT_SILO.id()), - is_default: true, - }; - let assoc_url = format!("/v1/system/ip-pools/{pool_name}/silos"); - let _ = NexusRequest::objects_post(client, &assoc_url, ¶ms) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await; - - let silo_pool_range = IpRange::V4( - Ipv4Range::new( - std::net::Ipv4Addr::new(10, 2, 0, 1), - std::net::Ipv4Addr::new(10, 2, 0, 5), - ) - .unwrap(), - ); - populate_ip_pool(client, pool_name, Some(silo_pool_range)).await; - - create_instance_with_pool(client, "silo-pool-inst", Some("silo-pool")) - .await; - let ip = fetch_instance_ephemeral_ip(client, "silo-pool-inst").await; - assert!( - ip.ip >= silo_pool_range.first_address() - && ip.ip <= silo_pool_range.last_address(), - "Expected ephemeral IP to come from the silo default pool" - ); - - // we can still specify other pool even though we now have a silo default - create_instance_with_pool(client, "other-pool-inst-2", Some("other-pool")) - .await; - - let ip = fetch_instance_ephemeral_ip(client, "other-pool-inst-2").await; - assert!( - ip.ip >= other_pool_range.first_address() - && ip.ip <= other_pool_range.last_address(), - "Expected ephemeral IP to come from the other pool" - ); } +// IP pool that exists but is not associated with any silo (or with a silo other +// than the current user's) cannot be used to get IPs #[nexus_test] async fn test_instance_ephemeral_ip_from_orphan_pool( cptestctx: &ControlPlaneTestContext, @@ -3661,6 +3625,25 @@ async fn test_instance_ephemeral_ip_from_orphan_pool( let _ = create_project(&client, PROJECT_NAME).await; + // have to give default pool a range or snat IP allocation fails before it + // can get to failing on ephemeral IP allocation + let default_pool_range = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(10, 0, 0, 1), + std::net::Ipv4Addr::new(10, 0, 0, 5), + ) + .unwrap(), + ); + + // make first pool the default for the priv user's silo + let link = params::IpPoolSiloLink { + silo: NameOrId::Id(DEFAULT_SILO.id()), + is_default: true, + }; + create_ip_pool(&client, "default", Some(default_pool_range), Some(link)) + .await; + + // don't use create_ip_pool because it automatically associates with a pool let pool_name = "orphan-pool"; let _: views::IpPool = object_create( client, @@ -3674,13 +3657,6 @@ async fn test_instance_ephemeral_ip_from_orphan_pool( ) .await; - let default_pool_range = IpRange::V4( - Ipv4Range::new( - std::net::Ipv4Addr::new(10, 0, 0, 1), - std::net::Ipv4Addr::new(10, 0, 0, 5), - ) - .unwrap(), - ); let orphan_pool_range = IpRange::V4( Ipv4Range::new( std::net::Ipv4Addr::new(10, 1, 0, 1), @@ -3688,9 +3664,6 @@ async fn test_instance_ephemeral_ip_from_orphan_pool( ) .unwrap(), ); - // have to populate default pool or snat IP allocation fails before it can - // get to failing on ephemeral IP allocation - populate_ip_pool(&client, "default", Some(default_pool_range)).await; populate_ip_pool(client, pool_name, Some(orphan_pool_range)).await; // this should 404 @@ -3730,8 +3703,30 @@ async fn test_instance_ephemeral_ip_from_orphan_pool( "not found: ip-pool with name \"orphan-pool\"".to_string() ); - // TODO: associate the pool with a different silo and we should get the same + // associate the pool with a different silo and we should get the same // error on instance create + let params = params::IpPoolSiloLink { + silo: NameOrId::Name(cptestctx.silo_name.clone()), + is_default: false, + }; + let _: views::IpPoolSilo = + object_create(client, "/v1/system/ip-pools/orphan-pool/silos", ¶ms) + .await; + + let error = NexusRequest::new( + RequestBuilder::new(&client, http::Method::POST, &url) + .expect_status(Some(StatusCode::NOT_FOUND)) + .body(Some(&body)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await; + + assert_eq!(error.error_code.unwrap(), "ObjectNotFound".to_string()); + assert_eq!( + error.message, + "not found: ip-pool with name \"orphan-pool\"".to_string() + ); } async fn create_instance_with_pool( @@ -3796,8 +3791,12 @@ async fn test_instance_create_in_silo(cptestctx: &ControlPlaneTestContext) { ) .await; - // Populate IP Pool - populate_ip_pool(&client, "default", None).await; + // can't use create_default_ip_pool because we need to link to the silo we just made + let link = params::IpPoolSiloLink { + silo: NameOrId::Id(silo.identity.id), + is_default: true, + }; + create_ip_pool(&client, "default", None, Some(link)).await; // Create test projects NexusRequest::objects_post( @@ -3879,8 +3878,7 @@ async fn test_instance_create_in_silo(cptestctx: &ControlPlaneTestContext) { async fn test_instance_v2p_mappings(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; - create_project(client, PROJECT_NAME).await; + create_project_and_pool(client).await; // Add a few more sleds let nsleds = 3; From 31b476f1598b0a6cd20606005beef94854152637 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 6 Dec 2023 00:44:36 -0600 Subject: [PATCH 37/67] fix nexus-db-queries tests --- nexus/db-queries/src/db/datastore/ip_pool.rs | 55 ++++--------------- .../db-queries/src/db/queries/external_ip.rs | 16 +++--- 2 files changed, 18 insertions(+), 53 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index ca87722668..4f221d86f3 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -672,38 +672,9 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - // we start out with the default fleet-level pool already created, - // so when we ask for a default silo, we get it back - let fleet_default_pool = - datastore.ip_pools_fetch_default(&opctx).await.unwrap(); - - assert_eq!(fleet_default_pool.identity.name.as_str(), "default"); - - // // unique index prevents second fleet-level default - // let identity = IdentityMetadataCreateParams { - // name: "another-fleet-default".parse().unwrap(), - // description: "".to_string(), - // }; - // let second_default = datastore - // .ip_pool_create(&opctx, IpPool::new(&identity)) - // .await - // .expect("Failed to create pool"); - // let err = datastore - // .ip_pool_associate_resource( - // &opctx, - // IpPoolResource { - // ip_pool_id: second_default.id(), - // resource_type: IpPoolResourceType::Fleet, - // resource_id: *FLEET_ID, - // is_default: true, - // }, - // ) - // .await - // .expect_err("Failed to fail to make IP pool fleet default"); - - // assert_matches!(err, Error::ObjectAlreadyExists { .. }); - - // now test logic preferring most specific available default + // we start out with no default pool, so we expect not found + let error = datastore.ip_pools_fetch_default(&opctx).await.unwrap_err(); + assert_matches!(error, Error::InternalError { .. }); let silo_id = opctx.authn.silo_required().unwrap().id(); @@ -730,12 +701,9 @@ mod test { .expect("Failed to associate IP pool with silo"); // because that one was not a default, when we ask for the silo default - // pool, we still get the fleet default - let ip_pool = datastore - .ip_pools_fetch_default(&opctx) - .await - .expect("Failed to get default IP pool"); - assert_eq!(ip_pool.id(), fleet_default_pool.id()); + // pool, we still get nothing + let error = datastore.ip_pools_fetch_default(&opctx).await.unwrap_err(); + assert_matches!(error, Error::InternalError { .. }); // now we can change that association to is_default=true and // it should update rather than erroring out @@ -752,7 +720,7 @@ mod test { .await .expect("Failed to make IP pool default for silo"); - // now when we ask for the default pool again, we get the one we just changed + // now when we ask for the default pool again, we get that one let ip_pool = datastore .ip_pools_fetch_default(&opctx) .await @@ -782,7 +750,7 @@ mod test { .expect_err("Failed to fail to set a second default pool for silo"); assert_matches!(err, Error::ObjectAlreadyExists { .. }); - // now remove the association and we should get the default fleet pool again + // now remove the association and we should get nothing again datastore .ip_pool_dissociate_resource( &opctx, @@ -795,11 +763,8 @@ mod test { .await .expect("Failed to dissociate IP pool from silo"); - let ip_pool = datastore - .ip_pools_fetch_default(&opctx) - .await - .expect("Failed to get default IP pool"); - assert_eq!(ip_pool.id(), fleet_default_pool.id()); + let error = datastore.ip_pools_fetch_default(&opctx).await.unwrap_err(); + assert_matches!(error, Error::InternalError { .. }); db.cleanup().await.unwrap(); logctx.cleanup_successful(); diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index df45b41012..cb05a910a2 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -864,7 +864,7 @@ mod tests { Self { logctx, opctx, db, db_datastore } } - /// Create pool and associate with current silo + /// Create pool, associate with current silo async fn create_ip_pool(&self, name: &str, range: IpRange) { let pool = IpPool::new(&IdentityMetadataCreateParams { name: String::from(name).parse().unwrap(), @@ -949,7 +949,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 1), )) .unwrap(); - context.initialize_ip_pool("default", range).await; + context.create_ip_pool("default", range).await; for first_port in (0..super::MAX_PORT).step_by(NUM_SOURCE_NAT_PORTS.into()) { @@ -1003,7 +1003,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 1), )) .unwrap(); - context.initialize_ip_pool("default", range).await; + context.create_ip_pool("default", range).await; // Allocate an Ephemeral IP, which should take the entire port range of // the only address in the pool. @@ -1084,7 +1084,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.initialize_ip_pool("default", range).await; + context.create_ip_pool("default", range).await; // TODO-completeness: Implementing Iterator for IpRange would be nice. let addresses = [ @@ -1185,7 +1185,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.initialize_ip_pool("default", range).await; + context.create_ip_pool("default", range).await; let instance_id = Uuid::new_v4(); let id = Uuid::new_v4(); @@ -1644,7 +1644,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.initialize_ip_pool("default", range).await; + context.create_ip_pool("default", range).await; // Create one SNAT IP address. let instance_id = Uuid::new_v4(); @@ -1706,7 +1706,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.initialize_ip_pool("default", first_range).await; + context.create_ip_pool("default", first_range).await; let second_range = IpRange::try_from(( Ipv4Addr::new(10, 0, 0, 4), Ipv4Addr::new(10, 0, 0, 6), @@ -1750,7 +1750,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.initialize_ip_pool("default", first_range).await; + context.create_ip_pool("default", first_range).await; let first_address = Ipv4Addr::new(10, 0, 0, 4); let last_address = Ipv4Addr::new(10, 0, 0, 6); let second_range = From 6a55e8966c0cd4708674b40e46fe253032fa5e28 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 6 Dec 2023 13:11:07 -0600 Subject: [PATCH 38/67] add make_default endpoint and give it one lil test --- nexus/db-queries/src/db/datastore/ip_pool.rs | 84 ++++++++++++++++++- .../db-queries/src/db/queries/external_ip.rs | 4 +- nexus/src/app/ip_pool.rs | 15 ++++ nexus/src/external_api/http_entrypoints.rs | 33 +++++++- nexus/tests/integration_tests/instances.rs | 44 ++++++---- nexus/tests/integration_tests/ip_pools.rs | 58 ++++++++----- nexus/types/src/external_api/params.rs | 5 -- 7 files changed, 196 insertions(+), 47 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 4f221d86f3..4696ed2070 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -21,9 +21,11 @@ use crate::db::model::{ use crate::db::pagination::paginated; use crate::db::pool::DbConnection; use crate::db::queries::ip_pool::FilterOverlappingIpRanges; -use async_bb8_diesel::AsyncRunQueryDsl; +use crate::db::TransactionError; +use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; use chrono::Utc; use diesel::prelude::*; +use diesel::result::Error as DieselError; use ipnetwork::IpNetwork; use nexus_db_model::ExternalIp; use nexus_db_model::IpPoolResourceType; @@ -362,6 +364,8 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } + // TODO: separate this operation from update so that we can have /link 409 + // or whatever when the association already exists? pub async fn ip_pool_associate_resource( &self, opctx: &OpContext, @@ -408,6 +412,84 @@ impl DataStore { }) } + // TODO: make default should fail when the association doesn't exist. + // should it also fail when it's already default? probably not? + pub async fn ip_pool_make_default( + &self, + opctx: &OpContext, + authz_ip_pool: &authz::IpPool, + authz_silo: &authz::Silo, + ) -> UpdateResult { + use db::schema::ip_pool_resource::dsl; + + // TODO: correct auth check + opctx + .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) + .await?; + + let ip_pool_id = authz_ip_pool.id(); + let silo_id = authz_silo.id(); + + // Errors returned from the below transactions. + #[derive(Debug)] + enum IpPoolResourceUpdateError { + FailedToUnsetDefault(DieselError), + } + type TxnError = TransactionError; + + let conn = self.pool_connection_authorized(opctx).await?; + conn.transaction_async(|conn| async move { + // note this is matching the specified silo, but could be any pool + let existing_default_for_silo = dsl::ip_pool_resource + .filter(dsl::resource_type.eq(IpPoolResourceType::Silo)) + .filter(dsl::resource_id.eq(silo_id)) + .filter(dsl::is_default.eq(true)) + .select(IpPoolResource::as_select()) + .get_result_async(&conn) + .await; + + // if there is an existing default, we need to unset it before we can + // set the new default + if let Ok(existing_default) = existing_default_for_silo { + // if the pool we're making default is already default for this + // silo, don't error: just noop + if existing_default.ip_pool_id == ip_pool_id { + return Ok(existing_default); + } + + let unset_default = diesel::update(dsl::ip_pool_resource) + .filter(dsl::resource_id.eq(existing_default.resource_id)) + .filter(dsl::ip_pool_id.eq(existing_default.ip_pool_id)) + .filter( + dsl::resource_type.eq(existing_default.resource_type), + ) + .set(dsl::is_default.eq(false)) + .execute_async(&conn) + .await; + if let Err(e) = unset_default { + return Err(TxnError::CustomError( + IpPoolResourceUpdateError::FailedToUnsetDefault(e), + )); + } + } + + // TODO: test that this errors if the link doesn't exist already + let updated_link = diesel::update(dsl::ip_pool_resource) + .filter(dsl::resource_id.eq(silo_id)) + .filter(dsl::ip_pool_id.eq(ip_pool_id)) + .filter(dsl::resource_type.eq(IpPoolResourceType::Silo)) + .set(dsl::is_default.eq(true)) + .returning(IpPoolResource::as_returning()) + .get_result_async(&conn) + .await?; + Ok(updated_link) + }) + .await + .map_err(|e| { + Error::internal_error(&format!("Transaction error: {:?}", e)) + }) + } + // TODO: write a test for this async fn ensure_no_ips_outstanding( &self, diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index cb05a910a2..baf27a8b1b 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -886,7 +886,7 @@ mod tests { self.db_datastore .ip_pool_associate_resource(&self.opctx, association) .await - .expect("Failed to associate IP dool with silo"); + .expect("Failed to associate IP pool with silo"); self.initialize_ip_pool(name, range).await; } @@ -1712,6 +1712,8 @@ mod tests { Ipv4Addr::new(10, 0, 0, 6), )) .unwrap(); + // TODO: failing because I changed create_ip_pool to make it + // default for the silo, and there is already a default context.create_ip_pool("p1", second_range).await; // Allocating an address on an instance in the second pool should be diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 7284facf72..e97fd2b9df 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -135,6 +135,21 @@ impl super::Nexus { .await } + pub(crate) async fn ip_pool_make_default( + &self, + opctx: &OpContext, + pool_lookup: &lookup::IpPool<'_>, + silo_lookup: &lookup::Silo<'_>, + ) -> CreateResult { + let (.., authz_pool) = + pool_lookup.lookup_for(authz::Action::Modify).await?; + let (.., authz_silo) = + silo_lookup.lookup_for(authz::Action::Read).await?; + self.db_datastore + .ip_pool_make_default(opctx, &authz_pool, &authz_silo) + .await + } + pub(crate) async fn ip_pools_list( &self, opctx: &OpContext, diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 25e4039d24..b842c81c7d 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -125,6 +125,7 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(ip_pool_silo_list)?; api.register(ip_pool_silo_link)?; api.register(ip_pool_silo_unlink)?; + api.register(ip_pool_make_default)?; api.register(ip_pool_view)?; api.register(ip_pool_delete)?; api.register(ip_pool_update)?; @@ -1399,7 +1400,7 @@ async fn ip_pool_silo_link( async fn ip_pool_silo_unlink( rqctx: RequestContext>, path_params: Path, - query_params: Query, + query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { @@ -1418,6 +1419,36 @@ async fn ip_pool_silo_unlink( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } +// TODO: change this to PUT /ip-pools/{pool}/silos/{silo} so +// it can be used for both set default true and false + +/// Make an IP pool the default for a silo +#[endpoint { + method = POST, + path = "/v1/system/ip-pools/{pool}/make_default", + tags = ["system/networking"], +}] +async fn ip_pool_make_default( + rqctx: RequestContext>, + path_params: Path, + silo_selector: TypedBody, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + let silo_selector = silo_selector.into_inner(); + let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; + let silo_lookup = nexus.silo_lookup(&opctx, silo_selector.silo)?; + let assoc = nexus + .ip_pool_make_default(&opctx, &pool_lookup, &silo_lookup) + .await?; + Ok(HttpResponseCreated(assoc.into())) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + /// Fetch the IP pool used for Oxide services #[endpoint { method = GET, diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 85b6f16317..c5d4abc23b 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -3583,35 +3583,45 @@ async fn test_instance_ephemeral_ip_from_correct_pool( ); // make first pool the default for the priv user's silo - let link = params::IpPoolSiloLink { - silo: NameOrId::Id(DEFAULT_SILO.id()), - is_default: true, - }; - create_ip_pool(&client, "default", Some(range1), Some(link)).await; + dbg!(DEFAULT_SILO.id()); + let silo = NameOrId::Id(DEFAULT_SILO.id()); + let link = params::IpPoolSiloLink { silo: silo.clone(), is_default: true }; + create_ip_pool(&client, "pool1", Some(range1), Some(link)).await; // second pool is associated with the silo but not default - let link = params::IpPoolSiloLink { - silo: NameOrId::Id(DEFAULT_SILO.id()), - is_default: false, - }; - create_ip_pool(&client, "other-pool", Some(range2), Some(link)).await; + let link = params::IpPoolSiloLink { silo: silo.clone(), is_default: false }; + create_ip_pool(&client, "pool2", Some(range2), Some(link)).await; // Create an instance with pool name blank, expect IP from default pool - create_instance_with_pool(client, "default-pool-inst", None).await; + create_instance_with_pool(client, "pool1-inst", None).await; - let ip = fetch_instance_ephemeral_ip(client, "default-pool-inst").await; + let ip = fetch_instance_ephemeral_ip(client, "pool1-inst").await; assert!( ip.ip >= range1.first_address() && ip.ip <= range1.last_address(), - "Expected ephemeral IP to come from default pool" + "Expected ephemeral IP to come from pool1" ); // Create an instance explicitly using the non-default "other-pool". - create_instance_with_pool(client, "other-pool-inst", Some("other-pool")) - .await; - let ip = fetch_instance_ephemeral_ip(client, "other-pool-inst").await; + create_instance_with_pool(client, "pool2-inst", Some("pool2")).await; + let ip = fetch_instance_ephemeral_ip(client, "pool2-inst").await; + assert!( + ip.ip >= range2.first_address() && ip.ip <= range2.last_address(), + "Expected ephemeral IP to come from pool2" + ); + + // make pool2 default and create instance with default pool. check that it now it comes from pool2 + let _: views::IpPoolSilo = object_create( + client, + "/v1/system/ip-pools/pool2/make_default", + ¶ms::SiloSelector { silo: silo.clone() }, + ) + .await; + + create_instance_with_pool(client, "pool2-inst2", None).await; + let ip = fetch_instance_ephemeral_ip(client, "pool2-inst2").await; assert!( ip.ip >= range2.first_address() && ip.ip <= range2.last_address(), - "Expected ephemeral IP to come from other pool" + "Expected ephemeral IP to come from pool2" ); } diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 515184dbb9..ca8308a441 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -30,6 +30,7 @@ use nexus_types::external_api::params::InstanceNetworkInterfaceAttachment; use nexus_types::external_api::params::IpPoolCreate; use nexus_types::external_api::params::IpPoolSiloLink; use nexus_types::external_api::params::IpPoolUpdate; +use nexus_types::external_api::params::SiloSelector; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::shared::Ipv4Range; use nexus_types::external_api::shared::Ipv6Range; @@ -351,7 +352,7 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { let p1 = create_pool(client, "p1").await; // there should be no associations - let assocs_p0 = get_associations(client, "p0").await; + let assocs_p0 = silos_for_pool(client, "p0").await; assert_eq!(assocs_p0.items.len(), 0); // expect 404 on association if the specified silo doesn't exist @@ -383,46 +384,62 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { ); // associate by name with silo that exists - let params = params::IpPoolSiloLink { - silo: NameOrId::Name(cptestctx.silo_name.clone()), - is_default: false, - }; + let silo = NameOrId::Name(cptestctx.silo_name.clone()); + let params = + params::IpPoolSiloLink { silo: silo.clone(), is_default: false }; let _: IpPoolSilo = object_create(client, "/v1/system/ip-pools/p0/silos", ¶ms).await; // get silo ID so we can test association by ID as well let silo_url = format!("/v1/system/silos/{}", cptestctx.silo_name); - let silo = NexusRequest::object_get(client, &silo_url) + let silo_id = NexusRequest::object_get(client, &silo_url) .authn_as(AuthnMode::PrivilegedUser) .execute_and_parse_unwrap::() - .await; - let silo_id = silo.identity.id; + .await + .identity + .id; - let assocs_p0 = get_associations(client, "p0").await; - let silo_assoc = + let assocs_p0 = silos_for_pool(client, "p0").await; + let silo_link = IpPoolSilo { ip_pool_id: p0.identity.id, silo_id, is_default: false }; assert_eq!(assocs_p0.items.len(), 1); - assert_eq!(assocs_p0.items[0], silo_assoc); + assert_eq!(assocs_p0.items[0], silo_link); // TODO: dissociate silo // TODO: confirm dissociation // associate same silo to other pool by ID let params = params::IpPoolSiloLink { - silo: NameOrId::Id(silo.identity.id), + silo: NameOrId::Id(silo_id), is_default: false, }; let _: IpPoolSilo = object_create(client, "/v1/system/ip-pools/p1/silos", ¶ms).await; // association should look the same as the other one, except different pool ID - let assocs_p1 = get_associations(client, "p1").await; - assert_eq!(assocs_p1.items.len(), 1); + let silos_p1 = silos_for_pool(client, "p1").await; + assert_eq!(silos_p1.items.len(), 1); assert_eq!( - assocs_p1.items[0], - IpPoolSilo { ip_pool_id: p1.identity.id, ..silo_assoc } + silos_p1.items[0], + IpPoolSilo { ip_pool_id: p1.identity.id, ..silo_link } ); + // make p0's pool default and show that it changes + let params = SiloSelector { silo: silo.clone() }; + let _: IpPoolSilo = + object_create(client, "/v1/system/ip-pools/p0/make_default", ¶ms) + .await; + // making the same one default again is not an error + let _: IpPoolSilo = + object_create(client, "/v1/system/ip-pools/p0/make_default", ¶ms) + .await; + + let silos_p0 = silos_for_pool(client, "p0").await; + assert_eq!(silos_p0.items.len(), 1); + assert_eq!(silos_p0.items[0].is_default, true); + + // TODO: unset default + // TODO: associating a resource that is already associated should be a noop // and return a success message @@ -471,15 +488,12 @@ fn get_names(pools: Vec) -> Vec { pools.iter().map(|p| p.identity.name.to_string()).collect() } -async fn get_associations( +async fn silos_for_pool( client: &ClientTestContext, id: &str, ) -> ResultsPage { - objects_list_page_authz::( - client, - &format!("/v1/system/ip-pools/{}/silos", id), - ) - .await + let url = format!("/v1/system/ip-pools/{}/silos", id); + objects_list_page_authz::(client, &url).await } async fn create_pool(client: &ClientTestContext, name: &str) -> IpPool { diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 9c91cf3e82..ebd8fe86b1 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -763,11 +763,6 @@ pub struct IpPoolSiloLink { pub is_default: bool, } -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct IpPoolSiloUnlink { - pub silo: NameOrId, -} - // INSTANCES /// Describes an attachment of an `InstanceNetworkInterface` to an `Instance`, From 1b718186e62cda7e5b220dc76df94bf53b6d58d5 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 6 Dec 2023 14:42:40 -0600 Subject: [PATCH 39/67] passify a million angry integration tests --- nexus/src/app/sagas/instance_create.rs | 4 +- nexus/src/app/sagas/instance_delete.rs | 4 +- nexus/src/app/sagas/instance_migrate.rs | 8 +-- nexus/src/app/sagas/instance_start.rs | 4 +- nexus/src/app/sagas/snapshot_create.rs | 40 ++++++------ nexus/src/app/sagas/vpc_create.rs | 4 +- nexus/src/external_api/http_entrypoints.rs | 2 +- nexus/test-utils/src/resource_helpers.rs | 41 +++++++----- nexus/tests/integration_tests/disks.rs | 48 +++++++------- nexus/tests/integration_tests/instances.rs | 14 +--- nexus/tests/integration_tests/ip_pools.rs | 4 +- nexus/tests/integration_tests/metrics.rs | 6 +- nexus/tests/integration_tests/pantry.rs | 38 +++++------ nexus/tests/integration_tests/projects.rs | 6 +- nexus/tests/integration_tests/sleds.rs | 4 +- nexus/tests/integration_tests/snapshots.rs | 31 ++++----- .../integration_tests/subnet_allocation.rs | 4 +- .../integration_tests/volume_management.rs | 32 ++++----- nexus/tests/integration_tests/vpc_subnets.rs | 4 +- nexus/tests/output/nexus_tags.txt | 1 + openapi/nexus.json | 65 +++++++++++++++++++ 21 files changed, 208 insertions(+), 156 deletions(-) diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 153e0323e7..0850bfe30a 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -877,9 +877,9 @@ pub mod test { use nexus_db_queries::authn::saga::Serialized; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::datastore::DataStore; + use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_disk; use nexus_test_utils::resource_helpers::create_project; - use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use omicron_common::api::external::{ @@ -898,7 +898,7 @@ pub mod test { const DISK_NAME: &str = "my-disk"; async fn create_org_project_and_disk(client: &ClientTestContext) -> Uuid { - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(client, PROJECT_NAME).await; create_disk(&client, PROJECT_NAME, DISK_NAME).await; project.identity.id diff --git a/nexus/src/app/sagas/instance_delete.rs b/nexus/src/app/sagas/instance_delete.rs index 1605465c74..16a1026d46 100644 --- a/nexus/src/app/sagas/instance_delete.rs +++ b/nexus/src/app/sagas/instance_delete.rs @@ -142,9 +142,9 @@ mod test { use nexus_db_queries::{ authn::saga::Serialized, context::OpContext, db, db::lookup::LookupPath, }; + use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_disk; use nexus_test_utils::resource_helpers::create_project; - use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::identity::Resource; @@ -163,7 +163,7 @@ mod test { const DISK_NAME: &str = "my-disk"; async fn create_org_project_and_disk(client: &ClientTestContext) -> Uuid { - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(client, PROJECT_NAME).await; create_disk(&client, PROJECT_NAME, DISK_NAME).await; project.identity.id diff --git a/nexus/src/app/sagas/instance_migrate.rs b/nexus/src/app/sagas/instance_migrate.rs index d32a20bc40..8022cf81b7 100644 --- a/nexus/src/app/sagas/instance_migrate.rs +++ b/nexus/src/app/sagas/instance_migrate.rs @@ -480,10 +480,10 @@ mod tests { use camino::Utf8Path; use dropshot::test_util::ClientTestContext; use nexus_test_interface::NexusServer; - use nexus_test_utils::{ - resource_helpers::{create_project, object_create, populate_ip_pool}, - start_sled_agent, + use nexus_test_utils::resource_helpers::{ + create_default_ip_pool, create_project, object_create, }; + use nexus_test_utils::start_sled_agent; use nexus_test_utils_macros::nexus_test; use omicron_common::api::external::{ ByteCount, IdentityMetadataCreateParams, InstanceCpuCount, @@ -499,7 +499,7 @@ mod tests { const INSTANCE_NAME: &str = "test-instance"; async fn setup_test_project(client: &ClientTestContext) -> Uuid { - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(&client, PROJECT_NAME).await; project.identity.id } diff --git a/nexus/src/app/sagas/instance_start.rs b/nexus/src/app/sagas/instance_start.rs index 76773d6369..0dc502c7b9 100644 --- a/nexus/src/app/sagas/instance_start.rs +++ b/nexus/src/app/sagas/instance_start.rs @@ -637,7 +637,7 @@ mod test { use dropshot::test_util::ClientTestContext; use nexus_db_queries::authn; use nexus_test_utils::resource_helpers::{ - create_project, object_create, populate_ip_pool, + create_default_ip_pool, create_project, object_create, }; use nexus_test_utils_macros::nexus_test; use omicron_common::api::external::{ @@ -654,7 +654,7 @@ mod test { const INSTANCE_NAME: &str = "test-instance"; async fn setup_test_project(client: &ClientTestContext) -> Uuid { - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(&client, PROJECT_NAME).await; project.identity.id } diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index c3fe6fc327..c523b9b1b6 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -1563,8 +1563,8 @@ mod test { use nexus_db_queries::context::OpContext; use nexus_db_queries::db::datastore::InstanceAndActiveVmm; use nexus_db_queries::db::DataStore; + use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_disk; - use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::delete_disk; use nexus_test_utils::resource_helpers::object_create; @@ -1785,8 +1785,10 @@ mod test { const DISK_NAME: &str = "disky-mcdiskface"; const INSTANCE_NAME: &str = "base-instance"; - async fn create_org_project_and_disk(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None, None).await; + async fn create_project_and_disk_and_pool( + client: &ClientTestContext, + ) -> Uuid { + create_default_ip_pool(&client).await; create_project(client, PROJECT_NAME).await; create_disk(client, PROJECT_NAME, DISK_NAME).await.identity.id } @@ -1833,7 +1835,7 @@ mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx().nexus; - let disk_id = create_org_project_and_disk(&client).await; + let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters let opctx = test_opctx(cptestctx); @@ -2022,7 +2024,7 @@ mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx().nexus; - let disk_id = create_org_project_and_disk(&client).await; + let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters let opctx = test_opctx(&cptestctx); @@ -2047,13 +2049,11 @@ mod test { populate_ip_pool( &client, "default", - Some( - IpRange::try_from(( - Ipv4Addr::new(10, 1, 0, 0), - Ipv4Addr::new(10, 1, 255, 255), - )) - .unwrap(), - ), + IpRange::try_from(( + Ipv4Addr::new(10, 1, 0, 0), + Ipv4Addr::new(10, 1, 255, 255), + )) + .unwrap(), ) .await; } @@ -2182,7 +2182,7 @@ mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx().nexus; - let disk_id = create_org_project_and_disk(&client).await; + let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters let opctx = test_opctx(cptestctx); @@ -2291,7 +2291,7 @@ mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx().nexus; - let disk_id = create_org_project_and_disk(&client).await; + let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters let opctx = test_opctx(cptestctx); @@ -2355,13 +2355,11 @@ mod test { populate_ip_pool( &client, "default", - Some( - IpRange::try_from(( - Ipv4Addr::new(10, 1, 0, 0), - Ipv4Addr::new(10, 1, 255, 255), - )) - .unwrap(), - ), + IpRange::try_from(( + Ipv4Addr::new(10, 1, 0, 0), + Ipv4Addr::new(10, 1, 255, 255), + )) + .unwrap(), ) .await; diff --git a/nexus/src/app/sagas/vpc_create.rs b/nexus/src/app/sagas/vpc_create.rs index 4b5bedf41e..6b48e4087a 100644 --- a/nexus/src/app/sagas/vpc_create.rs +++ b/nexus/src/app/sagas/vpc_create.rs @@ -455,8 +455,8 @@ pub(crate) mod test { db::datastore::DataStore, db::fixed_data::vpc::SERVICES_VPC_ID, db::lookup::LookupPath, }; + use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_project; - use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils_macros::nexus_test; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Name; @@ -469,7 +469,7 @@ pub(crate) mod test { const PROJECT_NAME: &str = "springfield-squidport"; async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(client, PROJECT_NAME).await; project.identity.id } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index b842c81c7d..bf725e2c57 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -1425,7 +1425,7 @@ async fn ip_pool_silo_unlink( /// Make an IP pool the default for a silo #[endpoint { method = POST, - path = "/v1/system/ip-pools/{pool}/make_default", + path = "/v1/system/ip-pools/{pool}/make-default", tags = ["system/networking"], }] async fn ip_pool_make_default( diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 20450a51a8..482ce9f8fb 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -12,6 +12,7 @@ use dropshot::test_util::ClientTestContext; use dropshot::HttpErrorResponseBody; use dropshot::Method; use http::StatusCode; +use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO; use nexus_test_interface::NexusServer; use nexus_types::external_api::params; use nexus_types::external_api::params::PhysicalDiskKind; @@ -25,6 +26,7 @@ use nexus_types::external_api::views::IpPool; use nexus_types::external_api::views::IpPoolRange; use nexus_types::external_api::views::User; use nexus_types::external_api::views::{Project, Silo, Vpc, VpcRouter}; +use nexus_types::identity::Resource; use nexus_types::internal_api::params as internal_params; use nexus_types::internal_api::params::Baseboard; use omicron_common::api::external::ByteCount; @@ -32,6 +34,7 @@ use omicron_common::api::external::Disk; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Instance; use omicron_common::api::external::InstanceCpuCount; +use omicron_common::api::external::NameOrId; // use omicron_common::api::external::Name; // use omicron_common::api::external::NameOrId; use omicron_sled_agent::sim::SledAgent; @@ -105,23 +108,10 @@ pub async fn object_delete(client: &ClientTestContext, path: &str) { pub async fn populate_ip_pool( client: &ClientTestContext, pool_name: &str, - ip_range: Option, + ip_range: IpRange, ) -> IpPoolRange { - let ip_range = ip_range.unwrap_or_else(|| { - use std::net::Ipv4Addr; - IpRange::try_from(( - Ipv4Addr::new(10, 0, 0, 0), - Ipv4Addr::new(10, 0, 255, 255), - )) - .unwrap() - }); - let range = object_create( - client, - format!("/v1/system/ip-pools/{}/ranges/add", pool_name).as_str(), - &ip_range, - ) - .await; - range + let url = format!("/v1/system/ip-pools/{}/ranges/add", pool_name); + object_create(client, &url, &ip_range).await } /// Create an IP pool with a single range for testing. @@ -157,10 +147,29 @@ pub async fn create_ip_pool( .await; } + let ip_range = ip_range.unwrap_or_else(|| { + use std::net::Ipv4Addr; + IpRange::try_from(( + Ipv4Addr::new(10, 0, 0, 0), + Ipv4Addr::new(10, 0, 255, 255), + )) + .unwrap() + }); let range = populate_ip_pool(client, pool_name, ip_range).await; (pool, range) } +pub async fn create_default_ip_pool( + client: &ClientTestContext, +) -> views::IpPool { + let link = params::IpPoolSiloLink { + silo: NameOrId::Id(DEFAULT_SILO.id()), + is_default: true, + }; + let (pool, ..) = create_ip_pool(&client, "default", None, Some(link)).await; + pool +} + pub async fn create_certificate( client: &ClientTestContext, cert_name: &str, diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index a5a8339c34..3c2a631709 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -18,12 +18,12 @@ use nexus_test_utils::http_testing::Collection; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::identity_eq; +use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_disk; use nexus_test_utils::resource_helpers::create_instance; use nexus_test_utils::resource_helpers::create_instance_with; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::objects_list_page_authz; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; @@ -95,8 +95,8 @@ fn get_disk_detach_url(instance: &NameOrId) -> String { } } -async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - populate_ip_pool(&client, "default", None).await; +async fn create_project_and_pool(client: &ClientTestContext) -> Uuid { + create_default_ip_pool(client).await; let project = create_project(client, PROJECT_NAME).await; project.identity.id } @@ -107,7 +107,7 @@ async fn test_disk_not_found_before_creation( ) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); // List disks. There aren't any yet. @@ -186,7 +186,7 @@ async fn test_disk_create_attach_detach_delete( ) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - let project_id = create_org_and_project(client).await; + let project_id = create_project_and_pool(client).await; let nexus = &cptestctx.server.apictx().nexus; let disks_url = get_disks_url(); @@ -315,7 +315,7 @@ async fn test_disk_create_disk_that_already_exists_fails( ) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); // Create a disk. @@ -360,7 +360,7 @@ async fn test_disk_create_disk_that_already_exists_fails( async fn test_disk_slot_assignment(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let nexus = &cptestctx.server.apictx().nexus; let disk_names = ["a", "b", "c", "d"]; @@ -467,7 +467,7 @@ async fn test_disk_move_between_instances(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(&client).await; + create_project_and_pool(&client).await; let disks_url = get_disks_url(); // Create a disk. @@ -670,7 +670,7 @@ async fn test_disk_creation_region_requested_then_started( ) { let client = &cptestctx.external_client; let test = DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Before we create a disk, set the response from the Crucible Agent: // no matter what regions get requested, they'll always *start* as @@ -689,7 +689,7 @@ async fn test_disk_region_creation_failure( ) { let client = &cptestctx.external_client; let test = DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Before we create a disk, set the response from the Crucible Agent: // no matter what regions get requested, they'll always fail. @@ -745,7 +745,7 @@ async fn test_disk_invalid_block_size_rejected( ) { let client = &cptestctx.external_client; let _test = DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Attempt to allocate the disk, observe a server error. let disk_size = ByteCount::from_gibibytes_u32(3); @@ -788,7 +788,7 @@ async fn test_disk_reject_total_size_not_divisible_by_block_size( ) { let client = &cptestctx.external_client; let _test = DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Attempt to allocate the disk, observe a server error. let disk_size = ByteCount::from(3 * 1024 * 1024 * 1024 + 256); @@ -829,7 +829,7 @@ async fn test_disk_reject_total_size_less_than_min_disk_size_bytes( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disk_size = ByteCount::from(MIN_DISK_SIZE_BYTES / 2); @@ -871,7 +871,7 @@ async fn test_disk_reject_total_size_greater_than_max_disk_size_bytes( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disk_size = ByteCount::try_from(MAX_DISK_SIZE_BYTES + (1 << 30)).unwrap(); @@ -916,7 +916,7 @@ async fn test_disk_reject_total_size_not_divisible_by_min_disk_size( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disk_size = ByteCount::from(1024 * 1024 * 1024 + 512); @@ -971,7 +971,7 @@ async fn test_disk_backed_by_multiple_region_sets( test.add_zpool_with_dataset(cptestctx, 10).await; test.add_zpool_with_dataset(cptestctx, 10).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Ask for a 20 gibibyte disk. let disk_size = ByteCount::from_gibibytes_u32(20); @@ -1004,7 +1004,7 @@ async fn test_disk_backed_by_multiple_region_sets( async fn test_disk_too_big(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Assert default is still 10 GiB assert_eq!(10, DiskTest::DEFAULT_ZPOOL_SIZE_GIB); @@ -1044,7 +1044,7 @@ async fn test_disk_virtual_provisioning_collection( let _test = DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(client).await; let project_id1 = create_project(client, PROJECT_NAME).await.identity.id; let project_id2 = create_project(client, PROJECT_NAME_2).await.identity.id; @@ -1254,7 +1254,7 @@ async fn test_disk_size_accounting(cptestctx: &ControlPlaneTestContext) { // Assert default is still 10 GiB assert_eq!(10, DiskTest::DEFAULT_ZPOOL_SIZE_GIB); - create_org_and_project(client).await; + create_project_and_pool(client).await; // Total occupied size should start at 0 for zpool in &test.zpools { @@ -1423,7 +1423,7 @@ async fn test_multiple_disks_multiple_zpools( test.add_zpool_with_dataset(cptestctx, 10).await; test.add_zpool_with_dataset(cptestctx, 10).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Ask for a 10 gibibyte disk, this should succeed let disk_size = ByteCount::from_gibibytes_u32(10); @@ -1500,7 +1500,7 @@ async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { let oximeter = &cptestctx.oximeter; let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - let project_id = create_org_and_project(client).await; + let project_id = create_project_and_pool(client).await; let disk = create_disk(&client, PROJECT_NAME, DISK_NAME).await; oximeter.force_collect().await; @@ -1573,7 +1573,7 @@ async fn test_disk_metrics_paginated(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk(&client, PROJECT_NAME, DISK_NAME).await; create_instance_with_disk(client).await; @@ -1635,7 +1635,7 @@ async fn test_disk_metrics_paginated(cptestctx: &ControlPlaneTestContext) { async fn test_disk_create_for_importing(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); let new_disk = params::DiskCreate { @@ -1678,7 +1678,7 @@ async fn test_project_delete_disk_no_auth_idempotent( ) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Create a disk let disks_url = get_disks_url(); diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index c5d4abc23b..cb1250c198 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -17,6 +17,7 @@ use nexus_test_interface::NexusServer; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; +use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_disk; use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_local_user; @@ -99,15 +100,6 @@ fn default_vpc_subnets_url() -> String { format!("/v1/vpc-subnets?{}&vpc=default", get_project_selector()) } -async fn create_default_ip_pool(client: &ClientTestContext) -> views::IpPool { - let link = params::IpPoolSiloLink { - silo: NameOrId::Id(DEFAULT_SILO.id()), - is_default: true, - }; - let (pool, ..) = create_ip_pool(&client, "default", None, Some(link)).await; - pool -} - async fn create_project_and_pool(client: &ClientTestContext) -> views::Project { create_default_ip_pool(client).await; create_project(client, PROJECT_NAME).await @@ -3612,7 +3604,7 @@ async fn test_instance_ephemeral_ip_from_correct_pool( // make pool2 default and create instance with default pool. check that it now it comes from pool2 let _: views::IpPoolSilo = object_create( client, - "/v1/system/ip-pools/pool2/make_default", + "/v1/system/ip-pools/pool2/make-default", ¶ms::SiloSelector { silo: silo.clone() }, ) .await; @@ -3674,7 +3666,7 @@ async fn test_instance_ephemeral_ip_from_orphan_pool( ) .unwrap(), ); - populate_ip_pool(client, pool_name, Some(orphan_pool_range)).await; + populate_ip_pool(client, pool_name, orphan_pool_range).await; // this should 404 let instance_name = "orphan-pool-inst"; diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index ca8308a441..6b6c573f93 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -427,11 +427,11 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { // make p0's pool default and show that it changes let params = SiloSelector { silo: silo.clone() }; let _: IpPoolSilo = - object_create(client, "/v1/system/ip-pools/p0/make_default", ¶ms) + object_create(client, "/v1/system/ip-pools/p0/make-default", ¶ms) .await; // making the same one default again is not an error let _: IpPoolSilo = - object_create(client, "/v1/system/ip-pools/p0/make_default", ¶ms) + object_create(client, "/v1/system/ip-pools/p0/make-default", ¶ms) .await; let silos_p0 = silos_for_pool(client, "p0").await; diff --git a/nexus/tests/integration_tests/metrics.rs b/nexus/tests/integration_tests/metrics.rs index 89dd2e3cc6..5f517d49e0 100644 --- a/nexus/tests/integration_tests/metrics.rs +++ b/nexus/tests/integration_tests/metrics.rs @@ -9,8 +9,8 @@ use http::{Method, StatusCode}; use nexus_db_queries::db::fixed_data::silo::SILO_ID; use nexus_test_utils::http_testing::{AuthnMode, NexusRequest, RequestBuilder}; use nexus_test_utils::resource_helpers::{ - create_disk, create_instance, create_project, objects_list_page_authz, - populate_ip_pool, DiskTest, + create_default_ip_pool, create_disk, create_instance, create_project, + objects_list_page_authz, DiskTest, }; use nexus_test_utils::ControlPlaneTestContext; use nexus_test_utils_macros::nexus_test; @@ -168,7 +168,7 @@ async fn test_metrics( let client = &cptestctx.external_client; cptestctx.server.register_as_producer().await; // needed for oximeter metrics to work - populate_ip_pool(&client, "default", None).await; // needed for instance create to work + create_default_ip_pool(&client).await; // needed for instance create to work DiskTest::new(cptestctx).await; // needed for disk create to work // silo metrics start out zero diff --git a/nexus/tests/integration_tests/pantry.rs b/nexus/tests/integration_tests/pantry.rs index 26e27e92ee..a762f4dcf1 100644 --- a/nexus/tests/integration_tests/pantry.rs +++ b/nexus/tests/integration_tests/pantry.rs @@ -11,10 +11,10 @@ use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::identity_eq; +use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_instance; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::object_create; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; @@ -54,8 +54,8 @@ fn get_disk_attach_url(instance_name: &str) -> String { ) } -async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - populate_ip_pool(&client, "default", None).await; +async fn create_project_and_pool(client: &ClientTestContext) -> Uuid { + create_default_ip_pool(client).await; let project = create_project(client, PROJECT_NAME).await; project.identity.id } @@ -369,7 +369,7 @@ async fn validate_disk_state(client: &ClientTestContext, state: DiskState) { async fn test_disk_create_for_importing(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); let new_disk = params::DiskCreate { @@ -415,7 +415,7 @@ async fn test_cannot_mount_import_ready_disk( let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -446,7 +446,7 @@ async fn test_cannot_mount_import_from_bulk_writes_disk( let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -468,7 +468,7 @@ async fn test_import_blocks_from_url(cptestctx: &ControlPlaneTestContext) { let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -497,7 +497,7 @@ async fn test_import_blocks_with_bulk_write( let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -538,7 +538,7 @@ async fn test_import_blocks_with_bulk_write_with_snapshot( let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -589,7 +589,7 @@ async fn test_cannot_finalize_without_stopping_bulk_writes( let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -618,7 +618,7 @@ async fn test_cannot_bulk_write_to_unaligned_offset( let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -651,7 +651,7 @@ async fn test_cannot_bulk_write_data_not_block_size_multiple( let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -683,7 +683,7 @@ async fn test_cannot_bulk_write_data_past_end_of_disk( let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -715,7 +715,7 @@ async fn test_cannot_bulk_write_data_non_base64( let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -753,7 +753,7 @@ async fn test_can_stop_start_import_from_bulk_write( let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -781,7 +781,7 @@ async fn test_cannot_bulk_write_start_attached_disk( let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -811,7 +811,7 @@ async fn test_cannot_bulk_write_attached_disk( let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -841,7 +841,7 @@ async fn test_cannot_bulk_write_stop_attached_disk( let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; @@ -870,7 +870,7 @@ async fn test_cannot_finalize_attached_disk( let nexus = &cptestctx.server.apictx().nexus; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; create_disk_with_state_importing_blocks(client).await; diff --git a/nexus/tests/integration_tests/projects.rs b/nexus/tests/integration_tests/projects.rs index 24b2721a1d..d9d6ceef5b 100644 --- a/nexus/tests/integration_tests/projects.rs +++ b/nexus/tests/integration_tests/projects.rs @@ -10,8 +10,8 @@ use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::resource_helpers::{ - create_disk, create_project, create_vpc, object_create, populate_ip_pool, - project_get, projects_list, DiskTest, + create_default_ip_pool, create_disk, create_project, create_vpc, + object_create, project_get, projects_list, DiskTest, }; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; @@ -134,7 +134,7 @@ async fn test_project_deletion_with_instance( ) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; // Create a project that we'll use for testing. let name = "springfield-squidport"; diff --git a/nexus/tests/integration_tests/sleds.rs b/nexus/tests/integration_tests/sleds.rs index a166280ead..5e399cbe84 100644 --- a/nexus/tests/integration_tests/sleds.rs +++ b/nexus/tests/integration_tests/sleds.rs @@ -7,12 +7,12 @@ use camino::Utf8Path; use dropshot::test_util::ClientTestContext; use nexus_test_interface::NexusServer; +use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_instance; use nexus_test_utils::resource_helpers::create_physical_disk; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::delete_physical_disk; use nexus_test_utils::resource_helpers::objects_list_page_authz; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::start_sled_agent; use nexus_test_utils::SLED_AGENT_UUID; use nexus_test_utils_macros::nexus_test; @@ -144,7 +144,7 @@ async fn test_sled_instance_list(cptestctx: &ControlPlaneTestContext) { .is_empty()); // Create an IP pool and project that we'll use for testing. - populate_ip_pool(&external_client, "default", None).await; + create_default_ip_pool(&external_client).await; let project = create_project(&external_client, "test-project").await; let instance = create_instance(&external_client, "test-project", "test-instance") diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index 1dd32e6769..de56b4da75 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -17,9 +17,9 @@ use nexus_db_queries::db::lookup::LookupPath; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; +use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::object_create; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; @@ -50,7 +50,8 @@ fn get_disk_url(name: &str) -> String { format!("/v1/disks/{}?project={}", name, PROJECT_NAME) } -async fn create_org_and_project(client: &ClientTestContext) -> Uuid { +async fn create_project_and_pool(client: &ClientTestContext) -> Uuid { + create_default_ip_pool(client).await; let project = create_project(client, PROJECT_NAME).await; project.identity.id } @@ -59,8 +60,7 @@ async fn create_org_and_project(client: &ClientTestContext) -> Uuid { async fn test_snapshot_basic(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); // Define a global image @@ -179,8 +179,7 @@ async fn test_snapshot_basic(cptestctx: &ControlPlaneTestContext) { async fn test_snapshot_without_instance(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); // Define a global image @@ -294,8 +293,7 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { let nexus = &cptestctx.server.apictx().nexus; let datastore = nexus.datastore(); DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - let project_id = create_org_and_project(client).await; + let project_id = create_project_and_pool(client).await; let disks_url = get_disks_url(); // Create a blank disk @@ -454,7 +452,7 @@ async fn test_reject_creating_disk_from_snapshot( let nexus = &cptestctx.server.apictx().nexus; let datastore = nexus.datastore(); - let project_id = create_org_and_project(&client).await; + let project_id = create_project_and_pool(&client).await; let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -607,7 +605,7 @@ async fn test_reject_creating_disk_from_illegal_snapshot( let nexus = &cptestctx.server.apictx().nexus; let datastore = nexus.datastore(); - let project_id = create_org_and_project(&client).await; + let project_id = create_project_and_pool(&client).await; let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -703,7 +701,7 @@ async fn test_reject_creating_disk_from_other_project_snapshot( let nexus = &cptestctx.server.apictx().nexus; let datastore = nexus.datastore(); - let project_id = create_org_and_project(&client).await; + let project_id = create_project_and_pool(&client).await; let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -783,8 +781,7 @@ async fn test_cannot_snapshot_if_no_space(cptestctx: &ControlPlaneTestContext) { // Test that snapshots cannot be created if there is no space for the blocks let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); // Create a disk at just over half the capacity of what DiskTest allocates @@ -837,8 +834,7 @@ async fn test_cannot_snapshot_if_no_space(cptestctx: &ControlPlaneTestContext) { async fn test_snapshot_unwind(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; let disk_test = DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); // Define a global image @@ -952,7 +948,7 @@ async fn test_create_snapshot_record_idempotent( let nexus = &cptestctx.server.apictx().nexus; let datastore = nexus.datastore(); - let project_id = create_org_and_project(&client).await; + let project_id = create_project_and_pool(&client).await; let disk_id = Uuid::new_v4(); let snapshot = db::model::Snapshot { @@ -1140,8 +1136,7 @@ async fn test_multiple_deletes_not_sent(cptestctx: &ControlPlaneTestContext) { let nexus = &cptestctx.server.apictx().nexus; let datastore = nexus.datastore(); DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - let _project_id = create_org_and_project(client).await; + let _project_id = create_project_and_pool(client).await; let disks_url = get_disks_url(); // Create a blank disk diff --git a/nexus/tests/integration_tests/subnet_allocation.rs b/nexus/tests/integration_tests/subnet_allocation.rs index 7f5c27384c..91a933754c 100644 --- a/nexus/tests/integration_tests/subnet_allocation.rs +++ b/nexus/tests/integration_tests/subnet_allocation.rs @@ -13,10 +13,10 @@ use ipnetwork::Ipv4Network; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; +use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_instance_with; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::objects_list_page_authz; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; use omicron_common::api::external::{ @@ -84,7 +84,7 @@ async fn test_subnet_allocation(cptestctx: &ControlPlaneTestContext) { let project_name = "springfield-squidport"; // Create a project that we'll use for testing. - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; create_project(&client, project_name).await; let url_instances = format!("/v1/instances?project={}", project_name); diff --git a/nexus/tests/integration_tests/volume_management.rs b/nexus/tests/integration_tests/volume_management.rs index 24a0e5591b..b5355631bd 100644 --- a/nexus/tests/integration_tests/volume_management.rs +++ b/nexus/tests/integration_tests/volume_management.rs @@ -12,9 +12,9 @@ use nexus_db_queries::db::DataStore; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; +use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::object_create; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; @@ -53,14 +53,14 @@ fn get_snapshot_url(snapshot: &str) -> String { format!("/v1/snapshots/{}?project={}", snapshot, PROJECT_NAME) } -async fn create_org_and_project(client: &ClientTestContext) -> Uuid { +async fn create_project_and_pool(client: &ClientTestContext) -> Uuid { + create_default_ip_pool(client).await; let project = create_project(client, PROJECT_NAME).await; project.identity.id } async fn create_image(client: &ClientTestContext) -> views::Image { - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; // Define a global image let server = ServerBuilder::new().run().unwrap(); @@ -427,8 +427,7 @@ async fn test_multiple_disks_multiple_snapshots_order_1( // Test multiple disks with multiple snapshots let client = &cptestctx.external_client; let disk_test = DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); // Create a blank disk @@ -563,8 +562,7 @@ async fn test_multiple_disks_multiple_snapshots_order_2( // Test multiple disks with multiple snapshots, varying the delete order let client = &cptestctx.external_client; let disk_test = DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); // Create a blank disk @@ -833,8 +831,7 @@ async fn test_multiple_layers_of_snapshots_delete_all_disks_first( // delete all disks, then delete all snapshots let client = &cptestctx.external_client; let disk_test = DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; prepare_for_test_multiple_layers_of_snapshots(&client).await; @@ -872,8 +869,7 @@ async fn test_multiple_layers_of_snapshots_delete_all_snapshots_first( // delete all snapshots, then delete all disks let client = &cptestctx.external_client; let disk_test = DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; prepare_for_test_multiple_layers_of_snapshots(&client).await; @@ -911,8 +907,7 @@ async fn test_multiple_layers_of_snapshots_random_delete_order( // delete snapshots and disks in a random order let client = &cptestctx.external_client; let disk_test = DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; prepare_for_test_multiple_layers_of_snapshots(&client).await; @@ -1132,8 +1127,7 @@ async fn delete_image_test( let disk_test = DiskTest::new(&cptestctx).await; let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disks_url = get_disks_url(); @@ -2361,8 +2355,7 @@ async fn test_disk_create_saga_unwinds_correctly( // created. let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disk_test = DiskTest::new(&cptestctx).await; let disks_url = get_disks_url(); @@ -2414,8 +2407,7 @@ async fn test_snapshot_create_saga_unwinds_correctly( // created. let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; - create_org_and_project(client).await; + create_project_and_pool(client).await; let disk_test = DiskTest::new(&cptestctx).await; let disks_url = get_disks_url(); diff --git a/nexus/tests/integration_tests/vpc_subnets.rs b/nexus/tests/integration_tests/vpc_subnets.rs index 3067300e19..76cff9ac79 100644 --- a/nexus/tests/integration_tests/vpc_subnets.rs +++ b/nexus/tests/integration_tests/vpc_subnets.rs @@ -14,7 +14,7 @@ use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::identity_eq; use nexus_test_utils::resource_helpers::objects_list_page_authz; use nexus_test_utils::resource_helpers::{ - create_instance, create_project, create_vpc, populate_ip_pool, + create_default_ip_pool, create_instance, create_project, create_vpc, }; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::{params, views::VpcSubnet}; @@ -37,8 +37,8 @@ async fn test_delete_vpc_subnet_with_interfaces_fails( // Create a project that we'll use for testing. let project_name = "springfield-squidport"; let instance_name = "inst"; + create_default_ip_pool(client).await; let _ = create_project(&client, project_name).await; - populate_ip_pool(client, "default", None).await; let subnets_url = format!("/v1/vpc-subnets?project={}&vpc=default", project_name); diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 4e17b77713..961d3dc094 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -135,6 +135,7 @@ OPERATION ID METHOD URL PATH ip_pool_create POST /v1/system/ip-pools ip_pool_delete DELETE /v1/system/ip-pools/{pool} ip_pool_list GET /v1/system/ip-pools +ip_pool_make_default POST /v1/system/ip-pools/{pool}/make-default ip_pool_range_add POST /v1/system/ip-pools/{pool}/ranges/add ip_pool_range_list GET /v1/system/ip-pools/{pool}/ranges ip_pool_range_remove POST /v1/system/ip-pools/{pool}/ranges/remove diff --git a/openapi/nexus.json b/openapi/nexus.json index 71e9533483..15c1c9d63f 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -4685,6 +4685,54 @@ } } }, + "/v1/system/ip-pools/{pool}/make-default": { + "post": { + "tags": [ + "system/networking" + ], + "summary": "Make an IP pool the default for a silo", + "operationId": "ip_pool_make_default", + "parameters": [ + { + "in": "path", + "name": "pool", + "description": "Name or ID of the IP pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloSelector" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolSilo" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/v1/system/ip-pools/{pool}/ranges": { "get": { "tags": [ @@ -4968,6 +5016,7 @@ { "in": "query", "name": "silo", + "description": "Name or ID of the silo", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -13217,6 +13266,22 @@ "role_name" ] }, + "SiloSelector": { + "type": "object", + "properties": { + "silo": { + "description": "Name or ID of the silo", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + } + }, + "required": [ + "silo" + ] + }, "Sled": { "description": "An operator's view of a Sled.", "type": "object", From 75c02905132a69b4ab23775edc3cf8d425289191 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 6 Dec 2023 16:24:03 -0600 Subject: [PATCH 40/67] clean up mess of ip pool test helpers. now pristine-ish --- nexus/src/app/sagas/disk_create.rs | 20 ++++---- nexus/src/app/sagas/disk_delete.rs | 13 +---- nexus/src/app/sagas/snapshot_create.rs | 30 ------------ nexus/test-utils/src/resource_helpers.rs | 46 ++++++++---------- nexus/tests/integration_tests/instances.rs | 55 +++++----------------- 5 files changed, 41 insertions(+), 123 deletions(-) diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index d8213eaea2..ed67c83d48 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -836,10 +836,8 @@ pub(crate) mod test { use diesel::{ ExpressionMethods, OptionalExtension, QueryDsl, SelectableHelper, }; - use dropshot::test_util::ClientTestContext; use nexus_db_queries::context::OpContext; use nexus_db_queries::{authn::saga::Serialized, db::datastore::DataStore}; - use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; @@ -855,12 +853,6 @@ pub(crate) mod test { const DISK_NAME: &str = "my-disk"; const PROJECT_NAME: &str = "springfield-squidport"; - async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None, None).await; - let project = create_project(client, PROJECT_NAME).await; - project.identity.id - } - pub fn new_disk_create_params() -> params::DiskCreate { params::DiskCreate { identity: IdentityMetadataCreateParams { @@ -898,7 +890,8 @@ pub(crate) mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx().nexus; - let project_id = create_org_and_project(&client).await; + let project_id = + create_project(&client, PROJECT_NAME).await.identity.id; // Build the saga DAG with the provided test parameters let opctx = test_opctx(cptestctx); @@ -1069,7 +1062,8 @@ pub(crate) mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx().nexus; - let project_id = create_org_and_project(&client).await; + let project_id = + create_project(&client, PROJECT_NAME).await.identity.id; let opctx = test_opctx(cptestctx); crate::app::sagas::test_helpers::action_failure_can_unwind::< @@ -1098,7 +1092,8 @@ pub(crate) mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx.nexus; - let project_id = create_org_and_project(&client).await; + let project_id = + create_project(&client, PROJECT_NAME).await.identity.id; let opctx = test_opctx(&cptestctx); crate::app::sagas::test_helpers::action_failure_can_unwind_idempotently::< @@ -1138,7 +1133,8 @@ pub(crate) mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx.nexus; - let project_id = create_org_and_project(&client).await; + let project_id = + create_project(&client, PROJECT_NAME).await.identity.id; // Build the saga DAG with the provided test parameters let opctx = test_opctx(&cptestctx); diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index f46847b808..e735c4790f 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -171,29 +171,20 @@ pub(crate) mod test { app::saga::create_saga_dag, app::sagas::disk_delete::Params, app::sagas::disk_delete::SagaDiskDelete, }; - use dropshot::test_util::ClientTestContext; use nexus_db_model::Disk; use nexus_db_queries::authn::saga::Serialized; use nexus_db_queries::context::OpContext; - use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; use omicron_common::api::external::Name; - use uuid::Uuid; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; const PROJECT_NAME: &str = "springfield-squidport"; - async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None, None).await; - let project = create_project(client, PROJECT_NAME).await; - project.identity.id - } - pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), @@ -229,7 +220,7 @@ pub(crate) mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx.nexus; - let project_id = create_org_and_project(&client).await; + let project_id = create_project(client, PROJECT_NAME).await.identity.id; let disk = create_disk(&cptestctx).await; // Build the saga DAG with the provided test parameters @@ -255,7 +246,7 @@ pub(crate) mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx.nexus; - let project_id = create_org_and_project(&client).await; + let project_id = create_project(client, PROJECT_NAME).await.identity.id; let disk = create_disk(&cptestctx).await; // Build the saga DAG with the provided test parameters diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index c523b9b1b6..ed8c8ccebf 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -1554,7 +1554,6 @@ mod test { use crate::app::saga::create_saga_dag; use crate::app::sagas::test_helpers; - use crate::external_api::shared::IpRange; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::{ ExpressionMethods, OptionalExtension, QueryDsl, SelectableHelper, @@ -1568,7 +1567,6 @@ mod test { use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::delete_disk; use nexus_test_utils::resource_helpers::object_create; - use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params::InstanceDiskAttachment; @@ -1580,7 +1578,6 @@ mod test { use omicron_common::api::external::NameOrId; use sled_agent_client::types::CrucibleOpts; use sled_agent_client::TestInterfaces as SledAgentTestInterfaces; - use std::net::Ipv4Addr; use std::str::FromStr; #[test] @@ -2042,22 +2039,6 @@ mod test { // before the first attempt to run the saga recreates it. delete_disk(client, PROJECT_NAME, DISK_NAME).await; - // The no-pantry variant of the test needs to see the disk attached to - // an instance. Set up an IP pool so that instances can be created - // against it. - if !use_the_pantry { - populate_ip_pool( - &client, - "default", - IpRange::try_from(( - Ipv4Addr::new(10, 1, 0, 0), - Ipv4Addr::new(10, 1, 255, 255), - )) - .unwrap(), - ) - .await; - } - crate::app::sagas::test_helpers::action_failure_can_unwind::< SagaSnapshotCreate, _, @@ -2352,17 +2333,6 @@ mod test { assert!(output.is_err()); // Attach the disk to an instance, then rerun the saga - populate_ip_pool( - &client, - "default", - IpRange::try_from(( - Ipv4Addr::new(10, 1, 0, 0), - Ipv4Addr::new(10, 1, 255, 255), - )) - .unwrap(), - ) - .await; - let instance_state = setup_test_instance( cptestctx, client, diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 482ce9f8fb..b7bcbff536 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -105,15 +105,6 @@ pub async fn object_delete(client: &ClientTestContext, path: &str) { }); } -pub async fn populate_ip_pool( - client: &ClientTestContext, - pool_name: &str, - ip_range: IpRange, -) -> IpPoolRange { - let url = format!("/v1/system/ip-pools/{}/ranges/add", pool_name); - object_create(client, &url, &ip_range).await -} - /// Create an IP pool with a single range for testing. /// /// The IP range may be specified if it's important for testing the behavior @@ -123,9 +114,6 @@ pub async fn create_ip_pool( client: &ClientTestContext, pool_name: &str, ip_range: Option, - // TODO: could change this to is_default -- always associate with default - // silo, let caller decide whether it's the default - silo_link: Option, ) -> (IpPool, IpPoolRange) { let pool = object_create( client, @@ -139,14 +127,6 @@ pub async fn create_ip_pool( ) .await; - if let Some(silo_link) = silo_link { - let url = format!("/v1/system/ip-pools/{pool_name}/silos"); - object_create::( - client, &url, &silo_link, - ) - .await; - } - let ip_range = ip_range.unwrap_or_else(|| { use std::net::Ipv4Addr; IpRange::try_from(( @@ -155,18 +135,32 @@ pub async fn create_ip_pool( )) .unwrap() }); - let range = populate_ip_pool(client, pool_name, ip_range).await; + let url = format!("/v1/system/ip-pools/{}/ranges/add", pool_name); + let range = object_create(client, &url, &ip_range).await; (pool, range) } +pub async fn link_ip_pool( + client: &ClientTestContext, + pool_name: &str, + silo_id: &Uuid, + is_default: bool, +) { + let link = + params::IpPoolSiloLink { silo: NameOrId::Id(*silo_id), is_default }; + let url = format!("/v1/system/ip-pools/{pool_name}/silos"); + object_create::( + client, &url, &link, + ) + .await; +} + +/// What you want for any test that is not testing IP logic specifically pub async fn create_default_ip_pool( client: &ClientTestContext, ) -> views::IpPool { - let link = params::IpPoolSiloLink { - silo: NameOrId::Id(DEFAULT_SILO.id()), - is_default: true, - }; - let (pool, ..) = create_ip_pool(&client, "default", None, Some(link)).await; + let (pool, ..) = create_ip_pool(&client, "default", None).await; + link_ip_pool(&client, "default", &DEFAULT_SILO.id(), true).await; pool } diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index cb1250c198..2a3e0ebed4 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -23,9 +23,9 @@ use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_local_user; use nexus_test_utils::resource_helpers::create_silo; use nexus_test_utils::resource_helpers::grant_iam; +use nexus_test_utils::resource_helpers::link_ip_pool; use nexus_test_utils::resource_helpers::object_create; use nexus_test_utils::resource_helpers::objects_list_page_authz; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils::start_sled_agent; use nexus_types::external_api::shared::IpKind; @@ -3575,14 +3575,12 @@ async fn test_instance_ephemeral_ip_from_correct_pool( ); // make first pool the default for the priv user's silo - dbg!(DEFAULT_SILO.id()); - let silo = NameOrId::Id(DEFAULT_SILO.id()); - let link = params::IpPoolSiloLink { silo: silo.clone(), is_default: true }; - create_ip_pool(&client, "pool1", Some(range1), Some(link)).await; + create_ip_pool(&client, "pool1", Some(range1)).await; + link_ip_pool(&client, "pool1", &DEFAULT_SILO.id(), /*default*/ true).await; // second pool is associated with the silo but not default - let link = params::IpPoolSiloLink { silo: silo.clone(), is_default: false }; - create_ip_pool(&client, "pool2", Some(range2), Some(link)).await; + create_ip_pool(&client, "pool2", Some(range2)).await; + link_ip_pool(&client, "pool2", &DEFAULT_SILO.id(), /*default*/ false).await; // Create an instance with pool name blank, expect IP from default pool create_instance_with_pool(client, "pool1-inst", None).await; @@ -3605,7 +3603,7 @@ async fn test_instance_ephemeral_ip_from_correct_pool( let _: views::IpPoolSilo = object_create( client, "/v1/system/ip-pools/pool2/make-default", - ¶ms::SiloSelector { silo: silo.clone() }, + ¶ms::SiloSelector { silo: NameOrId::Id(DEFAULT_SILO.id()) }, ) .await; @@ -3627,37 +3625,9 @@ async fn test_instance_ephemeral_ip_from_orphan_pool( let _ = create_project(&client, PROJECT_NAME).await; - // have to give default pool a range or snat IP allocation fails before it - // can get to failing on ephemeral IP allocation - let default_pool_range = IpRange::V4( - Ipv4Range::new( - std::net::Ipv4Addr::new(10, 0, 0, 1), - std::net::Ipv4Addr::new(10, 0, 0, 5), - ) - .unwrap(), - ); - // make first pool the default for the priv user's silo - let link = params::IpPoolSiloLink { - silo: NameOrId::Id(DEFAULT_SILO.id()), - is_default: true, - }; - create_ip_pool(&client, "default", Some(default_pool_range), Some(link)) - .await; - - // don't use create_ip_pool because it automatically associates with a pool - let pool_name = "orphan-pool"; - let _: views::IpPool = object_create( - client, - "/v1/system/ip-pools", - ¶ms::IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: String::from(pool_name).parse().unwrap(), - description: String::from("an ip pool"), - }, - }, - ) - .await; + create_ip_pool(&client, "default", None).await; + link_ip_pool(&client, "default", &DEFAULT_SILO.id(), true).await; let orphan_pool_range = IpRange::V4( Ipv4Range::new( @@ -3666,7 +3636,7 @@ async fn test_instance_ephemeral_ip_from_orphan_pool( ) .unwrap(), ); - populate_ip_pool(client, pool_name, orphan_pool_range).await; + create_ip_pool(&client, "orphan-pool", Some(orphan_pool_range)).await; // this should 404 let instance_name = "orphan-pool-inst"; @@ -3794,11 +3764,8 @@ async fn test_instance_create_in_silo(cptestctx: &ControlPlaneTestContext) { .await; // can't use create_default_ip_pool because we need to link to the silo we just made - let link = params::IpPoolSiloLink { - silo: NameOrId::Id(silo.identity.id), - is_default: true, - }; - create_ip_pool(&client, "default", None, Some(link)).await; + create_ip_pool(&client, "default", None).await; + link_ip_pool(&client, "default", &silo.identity.id, true).await; // Create test projects NexusRequest::objects_post( From 7ff15b976ec299154cc42d9a0e2416d660f0e4fd Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 6 Dec 2023 23:28:44 -0600 Subject: [PATCH 41/67] update migrations for a world without fleet IP pools --- schema/crdb/19.0.0/up4.sql | 24 ++++++++++++++++++++---- schema/crdb/19.0.0/up5.sql | 16 ++++++++++------ schema/crdb/19.0.0/up6.sql | 9 +++++++++ 3 files changed, 39 insertions(+), 10 deletions(-) create mode 100644 schema/crdb/19.0.0/up6.sql diff --git a/schema/crdb/19.0.0/up4.sql b/schema/crdb/19.0.0/up4.sql index 52ce081453..a5ba739755 100644 --- a/schema/crdb/19.0.0/up4.sql +++ b/schema/crdb/19.0.0/up4.sql @@ -1,6 +1,22 @@ -- copy existing fleet associations into association table. treat all existing -- pools as fleet-associated because that is the current behavior -INSERT INTO ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) -SELECT id, 'fleet', '001de000-1334-4000-8000-000000000000', is_default -FROM ip_pool -WHERE time_deleted IS null; \ No newline at end of file +INSERT INTO omicron.public.ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) +SELECT + p.id AS ip_pool_id, + 'silo' AS resource_type, + s.id AS resource_id, + -- note problem solved by up5.sql after this regarding is_default: if pool P1 + -- is a fleet default and pool P2 is a silo default on silo S1, we cannot link + -- both to S1 with is_default = true. what we really want in that case is link + -- it to S1 with is_default = false. So first, here we copy the "original" + -- value of is_default, and then in up5 we flip is_default to false if there + -- is a conflicting default silo-linked pool + p.is_default +FROM ip_pool AS p +CROSS JOIN silo AS s +WHERE p.time_deleted IS null + AND p.silo_id IS null -- means it's a fleet pool + AND s.time_deleted IS null +-- make this idempotent +ON CONFLICT (ip_pool_id, resource_type, resource_id) +DO NOTHING; diff --git a/schema/crdb/19.0.0/up5.sql b/schema/crdb/19.0.0/up5.sql index 656b45027b..d5f86a8ca7 100644 --- a/schema/crdb/19.0.0/up5.sql +++ b/schema/crdb/19.0.0/up5.sql @@ -1,6 +1,10 @@ --- copy existing ip_pool-to-silo associations into association table -INSERT INTO ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) -SELECT id, 'silo', silo_id, is_default -FROM ip_pool -WHERE silo_id IS NOT null - AND time_deleted IS null; \ No newline at end of file +-- turn any former fleet defaults into non-defaults if there's going to be a +-- silo conflicting with it +UPDATE omicron.public.ip_pool_resource AS ipr +SET is_default = false +FROM omicron.public.ip_pool as ip +WHERE ipr.is_default = true + AND ip.is_default = true -- both being default is the conflict being resolved + AND ip.silo_id = ipr.resource_id + AND ip.id = ipr.ip_pool_id; + diff --git a/schema/crdb/19.0.0/up6.sql b/schema/crdb/19.0.0/up6.sql new file mode 100644 index 0000000000..570f3dba7b --- /dev/null +++ b/schema/crdb/19.0.0/up6.sql @@ -0,0 +1,9 @@ +-- copy existing ip_pool-to-silo associations into association table +INSERT INTO omicron.public.ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) +SELECT id, 'silo', silo_id, is_default +FROM ip_pool +WHERE silo_id IS NOT null + AND time_deleted IS null +-- make this idempotent +ON CONFLICT (ip_pool_id, resource_type, resource_id) +DO NOTHING; From 85922cd77f956352282f5863983ef833ee7cc91a Mon Sep 17 00:00:00 2001 From: David Crespo Date: Thu, 7 Dec 2023 16:12:58 -0600 Subject: [PATCH 42/67] ip_pool_make_default -> ip_pool_silo_update, much better tests --- nexus/db-queries/src/db/datastore/ip_pool.rs | 34 ++++- nexus/src/app/ip_pool.rs | 11 +- nexus/src/external_api/http_entrypoints.rs | 31 ++-- nexus/test-utils/src/resource_helpers.rs | 36 ++++- nexus/tests/integration_tests/ip_pools.rs | 141 ++++++++++++++----- nexus/tests/output/nexus_tags.txt | 4 +- nexus/types/src/external_api/params.rs | 11 ++ openapi/nexus.json | 134 +++++++++--------- 8 files changed, 266 insertions(+), 136 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 4696ed2070..950e06b4e6 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -405,8 +405,12 @@ impl DataStore { e, ErrorHandler::Conflict( ResourceType::IpPoolResource, - // TODO: make string more useful - &ip_pool_resource.ip_pool_id.to_string(), + &format!( + "ip_pool_id: {:?}, resource_id: {:?}, resource_type: {:?}", + ip_pool_resource.ip_pool_id, + ip_pool_resource.resource_id, + ip_pool_resource.resource_type, + ) ), ) }) @@ -414,11 +418,12 @@ impl DataStore { // TODO: make default should fail when the association doesn't exist. // should it also fail when it's already default? probably not? - pub async fn ip_pool_make_default( + pub async fn ip_pool_set_default( &self, opctx: &OpContext, authz_ip_pool: &authz::IpPool, authz_silo: &authz::Silo, + is_default: bool, ) -> UpdateResult { use db::schema::ip_pool_resource::dsl; @@ -430,6 +435,28 @@ impl DataStore { let ip_pool_id = authz_ip_pool.id(); let silo_id = authz_silo.id(); + let conn = self.pool_connection_authorized(opctx).await?; + + // if we're making is_default false, we can just do that without + // checking any other stuff + if !is_default { + let updated_link = diesel::update(dsl::ip_pool_resource) + .filter(dsl::resource_id.eq(silo_id)) + .filter(dsl::ip_pool_id.eq(ip_pool_id)) + .filter(dsl::resource_type.eq(IpPoolResourceType::Silo)) + .set(dsl::is_default.eq(false)) + .returning(IpPoolResource::as_returning()) + .get_result_async(&*conn) + .await + .map_err(|e| { + Error::internal_error(&format!( + "Transaction error: {:?}", + e + )) + })?; + return Ok(updated_link); + } + // Errors returned from the below transactions. #[derive(Debug)] enum IpPoolResourceUpdateError { @@ -437,7 +464,6 @@ impl DataStore { } type TxnError = TransactionError; - let conn = self.pool_connection_authorized(opctx).await?; conn.transaction_async(|conn| async move { // note this is matching the specified silo, but could be any pool let existing_default_for_silo = dsl::ip_pool_resource diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index e97fd2b9df..2a931d6786 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -135,18 +135,25 @@ impl super::Nexus { .await } - pub(crate) async fn ip_pool_make_default( + pub(crate) async fn ip_pool_silo_update( &self, opctx: &OpContext, pool_lookup: &lookup::IpPool<'_>, silo_lookup: &lookup::Silo<'_>, + update: ¶ms::IpPoolSiloUpdate, ) -> CreateResult { let (.., authz_pool) = pool_lookup.lookup_for(authz::Action::Modify).await?; let (.., authz_silo) = silo_lookup.lookup_for(authz::Action::Read).await?; + self.db_datastore - .ip_pool_make_default(opctx, &authz_pool, &authz_silo) + .ip_pool_set_default( + opctx, + &authz_pool, + &authz_silo, + update.is_default, + ) .await } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index bf725e2c57..75115c2eaf 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -125,7 +125,7 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(ip_pool_silo_list)?; api.register(ip_pool_silo_link)?; api.register(ip_pool_silo_unlink)?; - api.register(ip_pool_make_default)?; + api.register(ip_pool_silo_update)?; api.register(ip_pool_view)?; api.register(ip_pool_delete)?; api.register(ip_pool_update)?; @@ -1394,23 +1394,20 @@ async fn ip_pool_silo_link( /// Remove an IP pool's association with a silo or project #[endpoint { method = DELETE, - path = "/v1/system/ip-pools/{pool}/silos", + path = "/v1/system/ip-pools/{pool}/silos/{silo}", tags = ["system/networking"], }] async fn ip_pool_silo_unlink( rqctx: RequestContext>, - path_params: Path, - query_params: Query, + path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let nexus = &apictx.nexus; let path = path_params.into_inner(); - let query = query_params.into_inner(); - let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; - let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; + let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; nexus .ip_pool_dissociate_resource(&opctx, &pool_lookup, &silo_lookup) .await?; @@ -1424,27 +1421,27 @@ async fn ip_pool_silo_unlink( /// Make an IP pool the default for a silo #[endpoint { - method = POST, - path = "/v1/system/ip-pools/{pool}/make-default", + method = PUT, + path = "/v1/system/ip-pools/{pool}/silos/{silo}", tags = ["system/networking"], }] -async fn ip_pool_make_default( +async fn ip_pool_silo_update( rqctx: RequestContext>, - path_params: Path, - silo_selector: TypedBody, -) -> Result, HttpError> { + path_params: Path, + update: TypedBody, +) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let nexus = &apictx.nexus; let path = path_params.into_inner(); - let silo_selector = silo_selector.into_inner(); + let update = update.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; - let silo_lookup = nexus.silo_lookup(&opctx, silo_selector.silo)?; + let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; let assoc = nexus - .ip_pool_make_default(&opctx, &pool_lookup, &silo_lookup) + .ip_pool_silo_update(&opctx, &pool_lookup, &silo_lookup, &update) .await?; - Ok(HttpResponseCreated(assoc.into())) + Ok(HttpResponseOk(assoc.into())) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index b7bcbff536..a004de1c04 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -70,13 +70,35 @@ where .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .unwrap_or_else(|_| { - panic!("failed to make \"create\" request to {path}") - }) + .map_err(|e| panic!("failed to make \"POST\" request to {path}: {e}")) + .unwrap() .parsed_body() .unwrap() } +/// Make a POST, assert status code, return error response body +pub async fn object_create_error( + client: &ClientTestContext, + path: &str, + input: &InputType, + status: StatusCode, +) -> HttpErrorResponseBody +where + InputType: serde::Serialize, +{ + NexusRequest::new( + RequestBuilder::new(client, Method::POST, path) + .body(Some(&input)) + .expect_status(Some(status)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body::() + .unwrap() +} + pub async fn object_put( client: &ClientTestContext, path: &str, @@ -90,7 +112,8 @@ where .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .unwrap_or_else(|_| panic!("failed to make \"PUT\" request to {path}")) + .map_err(|e| panic!("failed to make \"PUT\" request to {path}: {e}")) + .unwrap() .parsed_body() .unwrap() } @@ -100,9 +123,8 @@ pub async fn object_delete(client: &ClientTestContext, path: &str) { .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .unwrap_or_else(|_| { - panic!("failed to make \"delete\" request to {path}") - }); + .map_err(|e| panic!("failed to make \"DELETE\" request to {path}: {e}")) + .unwrap(); } /// Create an IP pool with a single range for testing. diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 6b6c573f93..c5564886e6 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -18,6 +18,9 @@ use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::object_create; +use nexus_test_utils::resource_helpers::object_create_error; +use nexus_test_utils::resource_helpers::object_delete; +use nexus_test_utils::resource_helpers::object_put; use nexus_test_utils::resource_helpers::objects_list_page_authz; use nexus_test_utils::resource_helpers::{ create_instance, create_instance_with, @@ -29,8 +32,8 @@ use nexus_types::external_api::params::InstanceDiskAttachment; use nexus_types::external_api::params::InstanceNetworkInterfaceAttachment; use nexus_types::external_api::params::IpPoolCreate; use nexus_types::external_api::params::IpPoolSiloLink; +use nexus_types::external_api::params::IpPoolSiloUpdate; use nexus_types::external_api::params::IpPoolUpdate; -use nexus_types::external_api::params::SiloSelector; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::shared::Ipv4Range; use nexus_types::external_api::shared::Ipv6Range; @@ -345,7 +348,7 @@ async fn test_ip_pool_service_no_cud(cptestctx: &ControlPlaneTestContext) { } #[nexus_test] -async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { +async fn test_ip_pool_silo_link(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; let p0 = create_pool(client, "p0").await; @@ -362,21 +365,13 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { is_default: false, }; - let error = NexusRequest::new( - RequestBuilder::new( - client, - Method::POST, - "/v1/system/ip-pools/p0/silos", - ) - .body(Some(¶ms)) - .expect_status(Some(StatusCode::NOT_FOUND)), + let error = object_create_error( + client, + "/v1/system/ip-pools/p0/silos", + ¶ms, + StatusCode::NOT_FOUND, ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body::() - .unwrap(); + .await; assert_eq!( error.message, @@ -405,45 +400,117 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { assert_eq!(assocs_p0.items.len(), 1); assert_eq!(assocs_p0.items[0], silo_link); - // TODO: dissociate silo - // TODO: confirm dissociation - - // associate same silo to other pool by ID - let params = params::IpPoolSiloLink { + // associate same silo to other pool by ID instead of name + let link_params = params::IpPoolSiloLink { silo: NameOrId::Id(silo_id), - is_default: false, + is_default: true, }; - let _: IpPoolSilo = - object_create(client, "/v1/system/ip-pools/p1/silos", ¶ms).await; + let url = "/v1/system/ip-pools/p1/silos"; + let _: IpPoolSilo = object_create(client, &url, &link_params).await; - // association should look the same as the other one, except different pool ID let silos_p1 = silos_for_pool(client, "p1").await; assert_eq!(silos_p1.items.len(), 1); assert_eq!( silos_p1.items[0], - IpPoolSilo { ip_pool_id: p1.identity.id, ..silo_link } + IpPoolSilo { ip_pool_id: p1.identity.id, is_default: true, silo_id } ); - // make p0's pool default and show that it changes - let params = SiloSelector { silo: silo.clone() }; + // creating a third pool and trying to link it as default: true should fail + create_pool(client, "p2").await; + let url = "/v1/system/ip-pools/p2/silos"; + let error = object_create_error( + client, + &url, + &link_params, + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!(error.error_code, Some("ObjectAlreadyExists".to_string())); + + // unlink silo (doesn't matter that it's a default) + let url = format!("/v1/system/ip-pools/p1/silos/{}", cptestctx.silo_name); + object_delete(client, &url).await; + + let silos_p1 = silos_for_pool(client, "p1").await; + assert_eq!(silos_p1.items.len(), 0); +} + +#[nexus_test] +async fn test_ip_pool_update_default(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + + create_pool(client, "p0").await; + create_pool(client, "p1").await; + + // there should be no linked silos + let silos_p0 = silos_for_pool(client, "p0").await; + assert_eq!(silos_p0.items.len(), 0); + + let silos_p1 = silos_for_pool(client, "p1").await; + assert_eq!(silos_p1.items.len(), 0); + + // associated both pools with the test silo + let silo = NameOrId::Name(cptestctx.silo_name.clone()); + let params = + params::IpPoolSiloLink { silo: silo.clone(), is_default: false }; let _: IpPoolSilo = - object_create(client, "/v1/system/ip-pools/p0/make-default", ¶ms) - .await; - // making the same one default again is not an error + object_create(client, "/v1/system/ip-pools/p0/silos", ¶ms).await; let _: IpPoolSilo = - object_create(client, "/v1/system/ip-pools/p0/make-default", ¶ms) - .await; + object_create(client, "/v1/system/ip-pools/p1/silos", ¶ms).await; + // now both are linked to the silo, neither is marked default + let silos_p0 = silos_for_pool(client, "p0").await; + assert_eq!(silos_p0.items.len(), 1); + assert_eq!(silos_p0.items[0].is_default, false); + + let silos_p1 = silos_for_pool(client, "p1").await; + assert_eq!(silos_p1.items.len(), 1); + assert_eq!(silos_p1.items[0].is_default, false); + + // make p0 default + let params = IpPoolSiloUpdate { is_default: true }; + let p0_silo_url = + format!("/v1/system/ip-pools/p0/silos/{}", cptestctx.silo_name); + let _: IpPoolSilo = object_put(client, &p0_silo_url, ¶ms).await; + + // making the same one default again is not an error + let _: IpPoolSilo = object_put(client, &p0_silo_url, ¶ms).await; + + // now p0 is default let silos_p0 = silos_for_pool(client, "p0").await; assert_eq!(silos_p0.items.len(), 1); assert_eq!(silos_p0.items[0].is_default, true); - // TODO: unset default + // p1 still not default + let silos_p1 = silos_for_pool(client, "p1").await; + assert_eq!(silos_p1.items.len(), 1); + assert_eq!(silos_p1.items[0].is_default, false); + + // making p1 the default pool for the silo unsets it on p0 - // TODO: associating a resource that is already associated should be a noop - // and return a success message + // set p1 default + let params = IpPoolSiloUpdate { is_default: true }; + let p1_silo_url = + format!("/v1/system/ip-pools/p1/silos/{}", cptestctx.silo_name); + let _: IpPoolSilo = object_put(client, &p1_silo_url, ¶ms).await; - // TODO: trying to set a second default for a resource should fail + // p1 is now default + let silos_p1 = silos_for_pool(client, "p1").await; + assert_eq!(silos_p1.items.len(), 1); + assert_eq!(silos_p1.items[0].is_default, true); + + // p0 is no longer default + let silos_p0 = silos_for_pool(client, "p0").await; + assert_eq!(silos_p0.items.len(), 1); + assert_eq!(silos_p0.items[0].is_default, false); + + // we can also unset default + let params = IpPoolSiloUpdate { is_default: false }; + let _: IpPoolSilo = object_put(client, &p1_silo_url, ¶ms).await; + + let silos_p1 = silos_for_pool(client, "p1").await; + assert_eq!(silos_p1.items.len(), 1); + assert_eq!(silos_p1.items[0].is_default, false); } // IP pool list fetch logic includes a join to ip_pool_resource, which is diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 961d3dc094..86d6a7182a 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -135,7 +135,6 @@ OPERATION ID METHOD URL PATH ip_pool_create POST /v1/system/ip-pools ip_pool_delete DELETE /v1/system/ip-pools/{pool} ip_pool_list GET /v1/system/ip-pools -ip_pool_make_default POST /v1/system/ip-pools/{pool}/make-default ip_pool_range_add POST /v1/system/ip-pools/{pool}/ranges/add ip_pool_range_list GET /v1/system/ip-pools/{pool}/ranges ip_pool_range_remove POST /v1/system/ip-pools/{pool}/ranges/remove @@ -145,7 +144,8 @@ ip_pool_service_range_remove POST /v1/system/ip-pools-service/ra ip_pool_service_view GET /v1/system/ip-pools-service ip_pool_silo_link POST /v1/system/ip-pools/{pool}/silos ip_pool_silo_list GET /v1/system/ip-pools/{pool}/silos -ip_pool_silo_unlink DELETE /v1/system/ip-pools/{pool}/silos +ip_pool_silo_unlink DELETE /v1/system/ip-pools/{pool}/silos/{silo} +ip_pool_silo_update PUT /v1/system/ip-pools/{pool}/silos/{silo} ip_pool_update PUT /v1/system/ip-pools/{pool} ip_pool_view GET /v1/system/ip-pools/{pool} networking_address_lot_block_list GET /v1/system/networking/address-lot/{address_lot}/blocks diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index ebd8fe86b1..0f424554f2 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -757,12 +757,23 @@ pub struct IpPoolUpdate { pub identity: IdentityMetadataUpdateParams, } +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct IpPoolSiloPath { + pub pool: NameOrId, + pub silo: NameOrId, +} + #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct IpPoolSiloLink { pub silo: NameOrId, pub is_default: bool, } +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct IpPoolSiloUpdate { + pub is_default: bool, +} + // INSTANCES /// Describes an attachment of an `InstanceNetworkInterface` to an `Instance`, diff --git a/openapi/nexus.json b/openapi/nexus.json index 15c1c9d63f..0f78475ca3 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -4685,54 +4685,6 @@ } } }, - "/v1/system/ip-pools/{pool}/make-default": { - "post": { - "tags": [ - "system/networking" - ], - "summary": "Make an IP pool the default for a silo", - "operationId": "ip_pool_make_default", - "parameters": [ - { - "in": "path", - "name": "pool", - "description": "Name or ID of the IP pool", - "required": true, - "schema": { - "$ref": "#/components/schemas/NameOrId" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SiloSelector" - } - } - }, - "required": true - }, - "responses": { - "201": { - "description": "successful creation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/IpPoolSilo" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, "/v1/system/ip-pools/{pool}/ranges": { "get": { "tags": [ @@ -4996,6 +4948,61 @@ "$ref": "#/components/responses/Error" } } + } + }, + "/v1/system/ip-pools/{pool}/silos/{silo}": { + "put": { + "tags": [ + "system/networking" + ], + "summary": "Make an IP pool the default for a silo", + "operationId": "ip_pool_silo_update", + "parameters": [ + { + "in": "path", + "name": "pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolSiloUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolSilo" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } }, "delete": { "tags": [ @@ -5007,16 +5014,14 @@ { "in": "path", "name": "pool", - "description": "Name or ID of the IP pool", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" } }, { - "in": "query", + "in": "path", "name": "silo", - "description": "Name or ID of the silo", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -12085,6 +12090,17 @@ "items" ] }, + "IpPoolSiloUpdate": { + "type": "object", + "properties": { + "is_default": { + "type": "boolean" + } + }, + "required": [ + "is_default" + ] + }, "IpPoolUpdate": { "description": "Parameters for updating an IP Pool", "type": "object", @@ -13266,22 +13282,6 @@ "role_name" ] }, - "SiloSelector": { - "type": "object", - "properties": { - "silo": { - "description": "Name or ID of the silo", - "allOf": [ - { - "$ref": "#/components/schemas/NameOrId" - } - ] - } - }, - "required": [ - "silo" - ] - }, "Sled": { "description": "An operator's view of a Sled.", "type": "object", From 7194ee2de934af060b9f99be37214a70b5ebe705 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Thu, 7 Dec 2023 16:58:32 -0600 Subject: [PATCH 43/67] unauthorized tests pass. it's a miracle --- nexus/tests/integration_tests/endpoints.rs | 27 +++++++++++++++---- nexus/tests/integration_tests/unauthorized.rs | 23 ++++++++++------ 2 files changed, 37 insertions(+), 13 deletions(-) diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index ff40d56019..186eca41d1 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -522,12 +522,19 @@ lazy_static! { description: Some(String::from("a new IP pool")), }, }; - pub static ref DEMO_IP_POOL_ASSOC_URL: String = format!("{}/silos", *DEMO_IP_POOL_URL); - pub static ref DEMO_IP_POOL_ASSOC_BODY: params::IpPoolSiloLink = + pub static ref DEMO_IP_POOL_SILOS_URL: String = format!("{}/silos", *DEMO_IP_POOL_URL); + pub static ref DEMO_IP_POOL_SILOS_BODY: params::IpPoolSiloLink = params::IpPoolSiloLink { silo: NameOrId::Id(DEFAULT_SILO.identity().id), + is_default: true, // necessary for demo instance create to go through + }; + + pub static ref DEMO_IP_POOL_SILO_URL: String = format!("{}/silos/{}", *DEMO_IP_POOL_URL, *DEMO_SILO_NAME); + pub static ref DEMO_IP_POOL_SILO_UPDATE_BODY: params::IpPoolSiloUpdate = + params::IpPoolSiloUpdate { is_default: false, }; + pub static ref DEMO_IP_POOL_RANGE: IpRange = IpRange::V4(Ipv4Range::new( std::net::Ipv4Addr::new(10, 0, 0, 0), std::net::Ipv4Addr::new(10, 0, 0, 255), @@ -828,13 +835,23 @@ lazy_static! { ], }, - // IP pool resource association endpoint + // IP pool silos endpoint VerifyEndpoint { - url: &DEMO_IP_POOL_ASSOC_URL, + url: &DEMO_IP_POOL_SILOS_URL, visibility: Visibility::Protected, unprivileged_access: UnprivilegedAccess::None, allowed_methods: vec![ - AllowedMethod::Post(serde_json::to_value(&*DEMO_IP_POOL_ASSOC_BODY).unwrap()) + AllowedMethod::Get, + AllowedMethod::Post(serde_json::to_value(&*DEMO_IP_POOL_SILOS_BODY).unwrap()), + ], + }, + VerifyEndpoint { + url: &DEMO_IP_POOL_SILO_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Delete, + AllowedMethod::Put(serde_json::to_value(&*DEMO_IP_POOL_SILO_UPDATE_BODY).unwrap()), ], }, diff --git a/nexus/tests/integration_tests/unauthorized.rs b/nexus/tests/integration_tests/unauthorized.rs index 9936af20bf..8f658b6a58 100644 --- a/nexus/tests/integration_tests/unauthorized.rs +++ b/nexus/tests/integration_tests/unauthorized.rs @@ -69,9 +69,8 @@ async fn test_unauthorized(cptestctx: &ControlPlaneTestContext) { .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .unwrap_or_else(|_| { - panic!("Failed to GET from URL: {url}") - }), + .map_err(|e| panic!("Failed to GET from URL: {url}, {e}")) + .unwrap(), id_routes, ), SetupReq::Post { url, body, id_routes } => ( @@ -80,7 +79,8 @@ async fn test_unauthorized(cptestctx: &ControlPlaneTestContext) { .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .unwrap_or_else(|_| panic!("Failed to POST to URL: {url}")), + .map_err(|e| panic!("Failed to POST to URL: {url}, {e}")) + .unwrap(), id_routes, ), }; @@ -202,10 +202,11 @@ lazy_static! { &*DEMO_SILO_USER_ID_SET_PASSWORD_URL, ], }, - // Get the default IP pool - SetupReq::Get { - url: &DEMO_IP_POOL_URL, - id_routes: vec![], + // Create the default IP pool + SetupReq::Post { + url: &DEMO_IP_POOLS_URL, + body: serde_json::to_value(&*DEMO_IP_POOL_CREATE).unwrap(), + id_routes: vec!["/v1/ip-pools/{id}"], }, // Create an IP pool range SetupReq::Post { @@ -213,6 +214,12 @@ lazy_static! { body: serde_json::to_value(&*DEMO_IP_POOL_RANGE).unwrap(), id_routes: vec![], }, + // Link default pool to default silo + SetupReq::Post { + url: &DEMO_IP_POOL_SILOS_URL, + body: serde_json::to_value(&*DEMO_IP_POOL_SILOS_BODY).unwrap(), + id_routes: vec![], + }, // Create a Project in the Organization SetupReq::Post { url: "/v1/projects", From f9ed857926f737899f63326fd4299bb68c4fa8cb Mon Sep 17 00:00:00 2001 From: David Crespo Date: Thu, 7 Dec 2023 21:38:08 -0600 Subject: [PATCH 44/67] I think... all the tests pass --- .../db-queries/src/db/queries/external_ip.rs | 29 ++++++++++--------- nexus/test-utils/src/resource_helpers.rs | 9 ++---- nexus/tests/integration_tests/instances.rs | 7 +++-- 3 files changed, 23 insertions(+), 22 deletions(-) diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index baf27a8b1b..2ec9e27daf 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -865,7 +865,12 @@ mod tests { } /// Create pool, associate with current silo - async fn create_ip_pool(&self, name: &str, range: IpRange) { + async fn create_ip_pool( + &self, + name: &str, + range: IpRange, + is_default: bool, + ) { let pool = IpPool::new(&IdentityMetadataCreateParams { name: String::from(name).parse().unwrap(), description: format!("ip pool {}", name), @@ -881,7 +886,7 @@ mod tests { resource_id: silo_id, resource_type: IpPoolResourceType::Silo, ip_pool_id: pool.id(), - is_default: false, + is_default, }; self.db_datastore .ip_pool_associate_resource(&self.opctx, association) @@ -949,7 +954,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 1), )) .unwrap(); - context.create_ip_pool("default", range).await; + context.create_ip_pool("default", range, true).await; for first_port in (0..super::MAX_PORT).step_by(NUM_SOURCE_NAT_PORTS.into()) { @@ -1003,7 +1008,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 1), )) .unwrap(); - context.create_ip_pool("default", range).await; + context.create_ip_pool("default", range, true).await; // Allocate an Ephemeral IP, which should take the entire port range of // the only address in the pool. @@ -1084,7 +1089,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.create_ip_pool("default", range).await; + context.create_ip_pool("default", range, true).await; // TODO-completeness: Implementing Iterator for IpRange would be nice. let addresses = [ @@ -1185,7 +1190,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.create_ip_pool("default", range).await; + context.create_ip_pool("default", range, true).await; let instance_id = Uuid::new_v4(); let id = Uuid::new_v4(); @@ -1644,7 +1649,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.create_ip_pool("default", range).await; + context.create_ip_pool("default", range, true).await; // Create one SNAT IP address. let instance_id = Uuid::new_v4(); @@ -1706,15 +1711,13 @@ mod tests { Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.create_ip_pool("default", first_range).await; + context.create_ip_pool("default", first_range, true).await; let second_range = IpRange::try_from(( Ipv4Addr::new(10, 0, 0, 4), Ipv4Addr::new(10, 0, 0, 6), )) .unwrap(); - // TODO: failing because I changed create_ip_pool to make it - // default for the silo, and there is already a default - context.create_ip_pool("p1", second_range).await; + context.create_ip_pool("p1", second_range, false).await; // Allocating an address on an instance in the second pool should be // respected, even though there are IPs available in the first. @@ -1752,12 +1755,12 @@ mod tests { Ipv4Addr::new(10, 0, 0, 3), )) .unwrap(); - context.create_ip_pool("default", first_range).await; + context.create_ip_pool("default", first_range, true).await; let first_address = Ipv4Addr::new(10, 0, 0, 4); let last_address = Ipv4Addr::new(10, 0, 0, 6); let second_range = IpRange::try_from((first_address, last_address)).unwrap(); - context.create_ip_pool("p1", second_range).await; + context.create_ip_pool("p1", second_range, false).await; // Allocate all available addresses in the second pool. let instance_id = Uuid::new_v4(); diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index a004de1c04..8b7518792b 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -70,8 +70,7 @@ where .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .map_err(|e| panic!("failed to make \"POST\" request to {path}: {e}")) - .unwrap() + .expect(&format!("failed to make \"POST\" request to {path}")) .parsed_body() .unwrap() } @@ -112,8 +111,7 @@ where .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .map_err(|e| panic!("failed to make \"PUT\" request to {path}: {e}")) - .unwrap() + .expect(&format!("failed to make \"PUT\" request to {path}")) .parsed_body() .unwrap() } @@ -123,8 +121,7 @@ pub async fn object_delete(client: &ClientTestContext, path: &str) { .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .map_err(|e| panic!("failed to make \"DELETE\" request to {path}: {e}")) - .unwrap(); + .expect(&format!("failed to make \"DELETE\" request to {path}")); } /// Create an IP pool with a single range for testing. diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 2a3e0ebed4..8992de8062 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -25,6 +25,7 @@ use nexus_test_utils::resource_helpers::create_silo; use nexus_test_utils::resource_helpers::grant_iam; use nexus_test_utils::resource_helpers::link_ip_pool; use nexus_test_utils::resource_helpers::object_create; +use nexus_test_utils::resource_helpers::object_put; use nexus_test_utils::resource_helpers::objects_list_page_authz; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils::start_sled_agent; @@ -3600,10 +3601,10 @@ async fn test_instance_ephemeral_ip_from_correct_pool( ); // make pool2 default and create instance with default pool. check that it now it comes from pool2 - let _: views::IpPoolSilo = object_create( + let _: views::IpPoolSilo = object_put( client, - "/v1/system/ip-pools/pool2/make-default", - ¶ms::SiloSelector { silo: NameOrId::Id(DEFAULT_SILO.id()) }, + &format!("/v1/system/ip-pools/pool2/silos/{}", DEFAULT_SILO.id()), + ¶ms::IpPoolSiloUpdate { is_default: true }, ) .await; From d1e9b0adb445d7ff648e8d7080f3f2e45b2678dc Mon Sep 17 00:00:00 2001 From: David Crespo Date: Fri, 8 Dec 2023 12:53:33 -0600 Subject: [PATCH 45/67] test pool-silo unlink when there are/aren't IPs outstanding --- nexus/db-queries/src/db/datastore/ip_pool.rs | 13 ++-- nexus/test-utils/src/resource_helpers.rs | 29 ++++++++- nexus/tests/integration_tests/instances.rs | 68 ++++++++++++++++++++ 3 files changed, 102 insertions(+), 8 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 950e06b4e6..582feab962 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -537,16 +537,17 @@ impl DataStore { instance::table .on(external_ip::parent_id.eq(instance::id.nullable())), ) + .inner_join(project::table.on(instance::project_id.eq(project::id))) .filter(external_ip::is_service.eq(false)) .filter(external_ip::parent_id.is_not_null()) .filter(external_ip::time_deleted.is_null()) .filter(external_ip::ip_pool_id.eq(association.ip_pool_id)) - .filter(instance::time_deleted.is_not_null()) - .select(ExternalIp::as_select()) - .limit(1) + // TODO: filter by type? i.e., ephemeral and snat? + .filter(instance::time_deleted.is_null()) // we have to join through IPs to instances to projects to get the silo ID - .inner_join(project::table.on(instance::project_id.eq(project::id))) .filter(project::silo_id.eq(association.resource_id)) + .select(ExternalIp::as_select()) + .limit(1) .load_async::( &*self.pool_connection_authorized(opctx).await?, ) @@ -560,7 +561,9 @@ impl DataStore { if !existing_ips.is_empty() { return Err(Error::InvalidRequest { - message: "IP addresses from this pool are in use in the associated silo".to_string() + message: + "IP addresses from this pool are in use in the linked silo" + .to_string(), }); } diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 8b7518792b..789f457730 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -70,7 +70,9 @@ where .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .expect(&format!("failed to make \"POST\" request to {path}")) + .unwrap_or_else(|e| { + panic!("failed to make \"POST\" request to {path}: {e}") + }) .parsed_body() .unwrap() } @@ -98,6 +100,23 @@ where .unwrap() } +pub async fn object_delete_error( + client: &ClientTestContext, + path: &str, + status: StatusCode, +) -> HttpErrorResponseBody { + NexusRequest::new( + RequestBuilder::new(client, Method::DELETE, path) + .expect_status(Some(status)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body::() + .unwrap() +} + pub async fn object_put( client: &ClientTestContext, path: &str, @@ -111,7 +130,9 @@ where .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .expect(&format!("failed to make \"PUT\" request to {path}")) + .unwrap_or_else(|e| { + panic!("failed to make \"PUT\" request to {path}: {e}") + }) .parsed_body() .unwrap() } @@ -121,7 +142,9 @@ pub async fn object_delete(client: &ClientTestContext, path: &str) { .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .expect(&format!("failed to make \"DELETE\" request to {path}")); + .unwrap_or_else(|e| { + panic!("failed to make \"DELETE\" request to {path}: {e}") + }); } /// Create an IP pool with a single range for testing. diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 8992de8062..c85e51c957 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -25,6 +25,9 @@ use nexus_test_utils::resource_helpers::create_silo; use nexus_test_utils::resource_helpers::grant_iam; use nexus_test_utils::resource_helpers::link_ip_pool; use nexus_test_utils::resource_helpers::object_create; +use nexus_test_utils::resource_helpers::object_create_error; +use nexus_test_utils::resource_helpers::object_delete; +use nexus_test_utils::resource_helpers::object_delete_error; use nexus_test_utils::resource_helpers::object_put; use nexus_test_utils::resource_helpers::objects_list_page_authz; use nexus_test_utils::resource_helpers::DiskTest; @@ -3614,6 +3617,71 @@ async fn test_instance_ephemeral_ip_from_correct_pool( ip.ip >= range2.first_address() && ip.ip <= range2.last_address(), "Expected ephemeral IP to come from pool2" ); + + // try to delete association with pool1, but it fails because there is an + // instance with an IP from the pool in this silo + let pool1_silo_url = + format!("/v1/system/ip-pools/pool1/silos/{}", DEFAULT_SILO.id()); + let error = + object_delete_error(client, &pool1_silo_url, StatusCode::BAD_REQUEST) + .await; + assert_eq!( + error.message, + "IP addresses from this pool are in use in the linked silo" + ); + + // stop and delete instances with IPs from pool1. perhaps surprisingly, that + // includes pool2-inst also because the SNAT IP comes from the default pool + // even when different pool is specified for the ephemeral IP + stop_instance(&cptestctx, "pool1-inst").await; + stop_instance(&cptestctx, "pool2-inst").await; + + // TODO: this still doesn't working because the SNAT IP is always created + // with the default pool, so even when pool2-inst was created by specifying + // pool2, the SNAT IP came from pool1 + object_delete(client, &pool1_silo_url).await; + + // create instance with pool1, expecting allocation to fail + let instance_name = "pool1-inst-fail"; + let url = format!("/v1/instances?project={}", PROJECT_NAME); + let instance_params = params::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: instance_name.parse().unwrap(), + description: format!("instance {:?}", instance_name), + }, + ncpus: InstanceCpuCount(4), + memory: ByteCount::from_gibibytes_u32(1), + hostname: String::from("the_host"), + user_data: vec![], + network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![params::ExternalIpCreate::Ephemeral { + pool_name: Some("pool1".parse().unwrap()), + }], + disks: vec![], + start: true, + }; + let error = object_create_error( + client, + &url, + &instance_params, + StatusCode::NOT_FOUND, + ) + .await; + assert_eq!(error.message, "not found: ip-pool with name \"pool1\""); +} + +async fn stop_instance( + cptestctx: &ControlPlaneTestContext, + instance_name: &str, +) { + let client = &cptestctx.external_client; + let instance = + instance_post(&client, instance_name, InstanceOp::Stop).await; + let nexus = &cptestctx.server.apictx().nexus; + instance_simulate(nexus, &instance.identity.id).await; + let url = + format!("/v1/instances/{}?project={}", instance_name, PROJECT_NAME); + object_delete(client, &url).await; } // IP pool that exists but is not associated with any silo (or with a silo other From 39a4a48bf16effbc5843831fb9e34fe14919c496 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Mon, 11 Dec 2023 15:30:02 -0600 Subject: [PATCH 46/67] move schema version to v20 --- nexus/db-model/src/schema.rs | 14 +++++++------- schema/crdb/{19.0.0 => 20.0.0}/up1.sql | 0 schema/crdb/{19.0.0 => 20.0.0}/up2.sql | 0 schema/crdb/{19.0.0 => 20.0.0}/up3.sql | 0 schema/crdb/{19.0.0 => 20.0.0}/up4.sql | 0 schema/crdb/{19.0.0 => 20.0.0}/up5.sql | 0 schema/crdb/{19.0.0 => 20.0.0}/up6.sql | 0 schema/crdb/{19.0.1 => 20.0.1}/README.md | 0 schema/crdb/{19.0.1 => 20.0.1}/up1.sql | 0 schema/crdb/{19.0.1 => 20.0.1}/up2.sql | 0 schema/crdb/dbinit.sql | 2 +- 11 files changed, 8 insertions(+), 8 deletions(-) rename schema/crdb/{19.0.0 => 20.0.0}/up1.sql (100%) rename schema/crdb/{19.0.0 => 20.0.0}/up2.sql (100%) rename schema/crdb/{19.0.0 => 20.0.0}/up3.sql (100%) rename schema/crdb/{19.0.0 => 20.0.0}/up4.sql (100%) rename schema/crdb/{19.0.0 => 20.0.0}/up5.sql (100%) rename schema/crdb/{19.0.0 => 20.0.0}/up6.sql (100%) rename schema/crdb/{19.0.1 => 20.0.1}/README.md (100%) rename schema/crdb/{19.0.1 => 20.0.1}/up1.sql (100%) rename schema/crdb/{19.0.1 => 20.0.1}/up2.sql (100%) diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 088e58e5d5..4b6138142d 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -8,6 +8,13 @@ use omicron_common::api::external::SemverVersion; +/// The version of the database schema this particular version of Nexus was +/// built against. +/// +/// This should be updated whenever the schema is changed. For more details, +/// refer to: schema/crdb/README.adoc +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(20, 0, 1); + table! { disk (id) { id -> Uuid, @@ -1303,13 +1310,6 @@ table! { } } -/// The version of the database schema this particular version of Nexus was -/// built against. -/// -/// This should be updated whenever the schema is changed. For more details, -/// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(19, 0, 1); - allow_tables_to_appear_in_same_query!( system_update, component_update, diff --git a/schema/crdb/19.0.0/up1.sql b/schema/crdb/20.0.0/up1.sql similarity index 100% rename from schema/crdb/19.0.0/up1.sql rename to schema/crdb/20.0.0/up1.sql diff --git a/schema/crdb/19.0.0/up2.sql b/schema/crdb/20.0.0/up2.sql similarity index 100% rename from schema/crdb/19.0.0/up2.sql rename to schema/crdb/20.0.0/up2.sql diff --git a/schema/crdb/19.0.0/up3.sql b/schema/crdb/20.0.0/up3.sql similarity index 100% rename from schema/crdb/19.0.0/up3.sql rename to schema/crdb/20.0.0/up3.sql diff --git a/schema/crdb/19.0.0/up4.sql b/schema/crdb/20.0.0/up4.sql similarity index 100% rename from schema/crdb/19.0.0/up4.sql rename to schema/crdb/20.0.0/up4.sql diff --git a/schema/crdb/19.0.0/up5.sql b/schema/crdb/20.0.0/up5.sql similarity index 100% rename from schema/crdb/19.0.0/up5.sql rename to schema/crdb/20.0.0/up5.sql diff --git a/schema/crdb/19.0.0/up6.sql b/schema/crdb/20.0.0/up6.sql similarity index 100% rename from schema/crdb/19.0.0/up6.sql rename to schema/crdb/20.0.0/up6.sql diff --git a/schema/crdb/19.0.1/README.md b/schema/crdb/20.0.1/README.md similarity index 100% rename from schema/crdb/19.0.1/README.md rename to schema/crdb/20.0.1/README.md diff --git a/schema/crdb/19.0.1/up1.sql b/schema/crdb/20.0.1/up1.sql similarity index 100% rename from schema/crdb/19.0.1/up1.sql rename to schema/crdb/20.0.1/up1.sql diff --git a/schema/crdb/19.0.1/up2.sql b/schema/crdb/20.0.1/up2.sql similarity index 100% rename from schema/crdb/19.0.1/up2.sql rename to schema/crdb/20.0.1/up2.sql diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 45abda3665..c5f76318b8 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -3016,7 +3016,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '19.0.1', NULL) + ( TRUE, NOW(), NOW(), '20.0.1', NULL) ON CONFLICT DO NOTHING; COMMIT; From d74ccd4c420ffd204e9d87dbd05252f53b006fe9 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Mon, 11 Dec 2023 18:02:24 -0600 Subject: [PATCH 47/67] fix issues introduced by main merge (still some tweaks required) --- .../src/db/datastore/external_ip.rs | 2 +- nexus/db-queries/src/db/datastore/ip_pool.rs | 10 ++- nexus/tests/integration_tests/disks.rs | 3 +- nexus/tests/integration_tests/external_ips.rs | 17 +++-- nexus/tests/integration_tests/instances.rs | 67 +++++++++---------- 5 files changed, 46 insertions(+), 53 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 00012bc73c..3c20002d40 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -76,7 +76,7 @@ impl DataStore { .fetch_for(authz::Action::CreateChild) .await?; - // Is this pool associated with either the fleet or the silo? otherwise, 404 + // Is this pool is not associated with the current silo, 404 if self .ip_pool_fetch_association(opctx, &authz_pool) .await diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index b6c85cc791..4fe3d505ec 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -83,7 +83,7 @@ impl DataStore { /// Look up whether the given pool is available to users in the given silo, /// i.e., whether there is an entry in the association table associating the - /// pool with either that silo or the fleet + /// pool with that silo pub async fn ip_pool_fetch_association( &self, opctx: &OpContext, @@ -557,11 +557,9 @@ impl DataStore { })?; if !existing_ips.is_empty() { - return Err(Error::InvalidRequest { - message: - "IP addresses from this pool are in use in the linked silo" - .to_string(), - }); + return Err(Error::invalid_request( + "IP addresses from this pool are in use in the linked silo", + )); } Ok(()) diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index e2ae3b0b80..8a6807747d 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -1252,8 +1252,7 @@ async fn test_disk_virtual_provisioning_collection_failed_delete( let disk_test = DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; - let project_id1 = create_project(client, PROJECT_NAME).await.identity.id; + let project_id1 = create_project_and_pool(client).await; let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); diff --git a/nexus/tests/integration_tests/external_ips.rs b/nexus/tests/integration_tests/external_ips.rs index f3161dea72..fc5dbb499a 100644 --- a/nexus/tests/integration_tests/external_ips.rs +++ b/nexus/tests/integration_tests/external_ips.rs @@ -15,11 +15,11 @@ use http::StatusCode; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; +use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_floating_ip; use nexus_test_utils::resource_helpers::create_instance_with; use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_project; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; use nexus_types::external_api::views::FloatingIp; @@ -55,7 +55,7 @@ pub fn get_floating_ip_by_id_url(fip_id: &Uuid) -> String { async fn test_floating_ip_access(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(client, PROJECT_NAME).await; // Create a floating IP from the default pool. @@ -102,12 +102,15 @@ async fn test_floating_ip_access(cptestctx: &ControlPlaneTestContext) { async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let other_pool_range = IpRange::V4( Ipv4Range::new(Ipv4Addr::new(10, 1, 0, 1), Ipv4Addr::new(10, 1, 0, 5)) .unwrap(), ); create_ip_pool(&client, "other-pool", Some(other_pool_range)).await; + // link_ip_pool(&client, "default", &DEFAULT_SILO.id(), false).await; + // TODO: add check for silo link on floating IP allocation so that this + // link line being commented out makes the test fail let project = create_project(client, PROJECT_NAME).await; @@ -180,7 +183,7 @@ async fn test_floating_ip_create_ip_in_use( ) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(client, PROJECT_NAME).await; let contested_ip = "10.0.0.0".parse().unwrap(); @@ -228,7 +231,7 @@ async fn test_floating_ip_create_name_in_use( ) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(client, PROJECT_NAME).await; let contested_name = FIP_NAMES[0]; @@ -277,7 +280,7 @@ async fn test_floating_ip_create_name_in_use( async fn test_floating_ip_delete(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(client, PROJECT_NAME).await; let fip = create_floating_ip( @@ -306,7 +309,7 @@ async fn test_floating_ip_attachment(cptestctx: &ControlPlaneTestContext) { let apictx = &cptestctx.server.apictx(); let nexus = &apictx.nexus; - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let project = create_project(client, PROJECT_NAME).await; let fip = create_floating_ip( diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index f97bdae7ad..0ab773dab8 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -3772,7 +3772,12 @@ async fn test_instance_attach_several_external_ips( ) .unwrap(), ); - populate_ip_pool(&client, "default", Some(default_pool_range)).await; + create_ip_pool(&client, "default", Some(default_pool_range)).await; + link_ip_pool(&client, "default", &DEFAULT_SILO.id(), true).await; + + // this doesn't work as a replacement for the above. figure out why and + // probably delete it + // create_default_ip_pool(&client).await; // Create several floating IPs for the instance, totalling 8 IPs. let mut external_ip_create = @@ -3842,47 +3847,35 @@ async fn test_instance_allow_only_one_ephemeral_ip( let _ = create_project(&client, PROJECT_NAME).await; - // Create one IP pool with space for two ephemerals. - let default_pool_range = IpRange::V4( - Ipv4Range::new( - std::net::Ipv4Addr::new(10, 0, 0, 1), - std::net::Ipv4Addr::new(10, 0, 0, 2), - ) - .unwrap(), - ); - populate_ip_pool(&client, "default", Some(default_pool_range)).await; + // don't need any IP pools because request fails at parse time let ephemeral_create = params::ExternalIpCreate::Ephemeral { pool_name: Some("default".parse().unwrap()), }; - let error: HttpErrorResponseBody = NexusRequest::new( - RequestBuilder::new(client, Method::POST, &get_instances_url()) - .body(Some(¶ms::InstanceCreate { - identity: IdentityMetadataCreateParams { - name: "default-pool-inst".parse().unwrap(), - description: "instance default-pool-inst".into(), - }, - ncpus: InstanceCpuCount(4), - memory: ByteCount::from_gibibytes_u32(1), - hostname: String::from("the_host"), - user_data: - b"#cloud-config\nsystem_info:\n default_user:\n name: oxide" - .to_vec(), - network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, - external_ips: vec![ - ephemeral_create.clone(), ephemeral_create - ], - disks: vec![], - start: true, - })) - .expect_status(Some(StatusCode::BAD_REQUEST)), + let create_params = params::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: "default-pool-inst".parse().unwrap(), + description: "instance default-pool-inst".into(), + }, + ncpus: InstanceCpuCount(4), + memory: ByteCount::from_gibibytes_u32(1), + hostname: String::from("the_host"), + user_data: + b"#cloud-config\nsystem_info:\n default_user:\n name: oxide" + .to_vec(), + network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![ephemeral_create.clone(), ephemeral_create], + disks: vec![], + start: true, + }; + let error = object_create_error( + client, + &get_instances_url(), + &create_params, + StatusCode::BAD_REQUEST, ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + .await; + assert_eq!( error.message, "An instance may not have more than 1 ephemeral IP address" From 70724cb41ad0603d5394452894298634ce0af568 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 12 Dec 2023 15:23:16 -0600 Subject: [PATCH 48/67] ensure silo is linked to IP pool before allowing floating IP allocation --- .../src/db/datastore/external_ip.rs | 26 +++++++----- nexus/db-queries/src/db/datastore/ip_pool.rs | 28 +++++++++---- .../db-queries/src/db/queries/external_ip.rs | 2 +- nexus/src/app/sagas/instance_create.rs | 2 +- nexus/tests/integration_tests/external_ips.rs | 40 +++++++++++++------ 5 files changed, 65 insertions(+), 33 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 3c20002d40..02ce950118 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -76,19 +76,18 @@ impl DataStore { .fetch_for(authz::Action::CreateChild) .await?; - // Is this pool is not associated with the current silo, 404 - if self - .ip_pool_fetch_association(opctx, &authz_pool) - .await - .is_err() - { + // If this pool is not linked to the current silo, 404 + if self.ip_pool_fetch_link(opctx, pool.id()).await.is_err() { return Err(authz_pool.not_found()); } pool } // If no name given, use the default logic - None => self.ip_pools_fetch_default(&opctx).await?, + None => { + let (.., pool) = self.ip_pools_fetch_default(&opctx).await?; + pool + } }; let pool_id = pool.identity.id; @@ -144,24 +143,29 @@ impl DataStore { ) -> CreateResult { let ip_id = Uuid::new_v4(); - let pool_id = match params.pool { + // TODO: NameOrId resolution should happen a level higher, in the nexus function + let (.., authz_pool, pool) = match params.pool { Some(NameOrId::Name(name)) => { LookupPath::new(opctx, self) .ip_pool_name(&Name(name)) .fetch_for(authz::Action::Read) .await? - .1 } Some(NameOrId::Id(id)) => { LookupPath::new(opctx, self) .ip_pool_id(id) .fetch_for(authz::Action::Read) .await? - .1 } None => self.ip_pools_fetch_default(opctx).await?, + }; + + let pool_id = pool.id(); + + // If this pool is not linked to the current silo, 404 + if self.ip_pool_fetch_link(opctx, pool_id).await.is_err() { + return Err(authz_pool.not_found()); } - .id(); let data = if let Some(ip) = params.address { IncompleteExternalIp::for_floating_explicit( diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 4fe3d505ec..52931a70f9 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -81,13 +81,13 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } - /// Look up whether the given pool is available to users in the given silo, - /// i.e., whether there is an entry in the association table associating the - /// pool with that silo - pub async fn ip_pool_fetch_association( + /// Look up whether the given pool is available to users in the current + /// silo, i.e., whether there is an entry in the association table linking + /// the pool with that silo + pub async fn ip_pool_fetch_link( &self, opctx: &OpContext, - authz_pool: &authz::IpPool, + ip_pool_id: Uuid, ) -> LookupResult { use db::schema::ip_pool; use db::schema::ip_pool_resource; @@ -101,7 +101,7 @@ impl DataStore { .eq(IpPoolResourceType::Silo) .and(ip_pool_resource::resource_id.eq(authz_silo.id())), ) - .filter(ip_pool::id.eq(authz_pool.id())) + .filter(ip_pool::id.eq(ip_pool_id)) .filter(ip_pool::time_deleted.is_null()) .select(IpPoolResource::as_select()) .first_async::( @@ -119,7 +119,7 @@ impl DataStore { pub async fn ip_pools_fetch_default( &self, opctx: &OpContext, - ) -> LookupResult { + ) -> LookupResult<(authz::IpPool, IpPool)> { use db::schema::ip_pool; use db::schema::ip_pool_resource; @@ -156,6 +156,18 @@ impl DataStore { ) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + .map(|ip_pool| { + ( + authz::IpPool::new( + authz::FLEET, + ip_pool.id(), + LookupType::ByCompositeId( + "Default IP Pool".to_string(), + ), + ), + ip_pool, + ) + }) } /// Looks up an IP pool intended for internal services. @@ -827,7 +839,7 @@ mod test { .expect("Failed to make IP pool default for silo"); // now when we ask for the default pool again, we get that one - let ip_pool = datastore + let (.., ip_pool) = datastore .ip_pools_fetch_default(&opctx) .await .expect("Failed to get silo's default IP pool"); diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 01e9aab53f..1ab878dc1c 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -938,7 +938,7 @@ mod tests { } async fn default_pool_id(&self) -> Uuid { - let pool = self + let (.., pool) = self .db_datastore .ip_pools_fetch_default(&self.opctx) .await diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index f99097f2fb..5685efc420 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -564,7 +564,7 @@ async fn sic_allocate_instance_snat_ip( let instance_id = sagactx.lookup::("instance_id")?; let ip_id = sagactx.lookup::("snat_ip_id")?; - let pool = datastore + let (.., pool) = datastore .ip_pools_fetch_default(&opctx) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/tests/integration_tests/external_ips.rs b/nexus/tests/integration_tests/external_ips.rs index fc5dbb499a..8d16bce197 100644 --- a/nexus/tests/integration_tests/external_ips.rs +++ b/nexus/tests/integration_tests/external_ips.rs @@ -12,6 +12,7 @@ use dropshot::test_util::ClientTestContext; use dropshot::HttpErrorResponseBody; use http::Method; use http::StatusCode; +use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; @@ -20,13 +21,18 @@ use nexus_test_utils::resource_helpers::create_floating_ip; use nexus_test_utils::resource_helpers::create_instance_with; use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_project; +use nexus_test_utils::resource_helpers::link_ip_pool; +use nexus_test_utils::resource_helpers::object_create; +use nexus_test_utils::resource_helpers::object_create_error; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; use nexus_types::external_api::views::FloatingIp; +use nexus_types::identity::Resource; use omicron_common::address::IpRange; use omicron_common::address::Ipv4Range; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Instance; +use omicron_common::api::external::NameOrId; use uuid::Uuid; type ControlPlaneTestContext = @@ -102,15 +108,15 @@ async fn test_floating_ip_access(cptestctx: &ControlPlaneTestContext) { async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; + // automatically linked to current silo create_default_ip_pool(&client).await; + let other_pool_range = IpRange::V4( Ipv4Range::new(Ipv4Addr::new(10, 1, 0, 1), Ipv4Addr::new(10, 1, 0, 5)) .unwrap(), ); + // not automatically linked to currently silo. see below create_ip_pool(&client, "other-pool", Some(other_pool_range)).await; - // link_ip_pool(&client, "default", &DEFAULT_SILO.id(), false).await; - // TODO: add check for silo link on floating IP allocation so that this - // link line being commented out makes the test fail let project = create_project(client, PROJECT_NAME).await; @@ -145,16 +151,26 @@ async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { assert_eq!(fip.instance_id, None); assert_eq!(fip.ip, ip_addr); - // Create with no chosen IP from named pool. + // Creating with other-pool fails with 404 until it is linked to the current silo let fip_name = FIP_NAMES[2]; - let fip = create_floating_ip( - client, - fip_name, - project.identity.name.as_str(), - None, - Some("other-pool"), - ) - .await; + let params = params::FloatingIpCreate { + identity: IdentityMetadataCreateParams { + name: fip_name.parse().unwrap(), + description: String::from("a floating ip"), + }, + address: None, + pool: Some(NameOrId::Name("other-pool".parse().unwrap())), + }; + let url = format!("/v1/floating-ips?project={}", project.identity.name); + let error = + object_create_error(client, &url, ¶ms, StatusCode::NOT_FOUND).await; + assert_eq!(error.message, "not found: ip-pool with name \"other-pool\""); + + // now link the pool and everything should work with the exact same params + link_ip_pool(&client, "other-pool", &DEFAULT_SILO.id(), false).await; + + // Create with no chosen IP from named pool. + let fip: FloatingIp = object_create(client, &url, ¶ms).await; assert_eq!(fip.identity.name.as_str(), fip_name); assert_eq!(fip.project_id, project.identity.id); assert_eq!(fip.instance_id, None); From 69a482a665354d9845dcb215471b793f8dd789bb Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 12 Dec 2023 16:41:01 -0600 Subject: [PATCH 49/67] ensure no outstanding floating IPs before unlinking silo --- nexus/db-model/src/ip_pool.rs | 1 + nexus/db-queries/src/db/datastore/ip_pool.rs | 62 ++++++++++++++++--- nexus/tests/integration_tests/external_ips.rs | 30 ++++++--- 3 files changed, 77 insertions(+), 16 deletions(-) diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 8211287248..fc29455e1b 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -99,6 +99,7 @@ pub struct IpPoolResource { /// Information required to delete an IP pool association. Comes from request /// params -- silo is a NameOrId and must be resolved to ID. +#[derive(Clone, Debug)] pub struct IpPoolResourceDelete { pub ip_pool_id: Uuid, pub resource_type: IpPoolResourceType, diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 52931a70f9..a6c1f39e0e 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -28,6 +28,7 @@ use diesel::prelude::*; use diesel::result::Error as DieselError; use ipnetwork::IpNetwork; use nexus_db_model::ExternalIp; +use nexus_db_model::IpKind; use nexus_db_model::IpPoolResourceType; use nexus_types::external_api::shared::IpRange; use omicron_common::api::external::http_pagination::PaginatedBy; @@ -525,8 +526,10 @@ impl DataStore { }) } - // TODO: write a test for this - async fn ensure_no_ips_outstanding( + /// Ephemeral and snat IPs are associated with a silo through an instance, + /// so in order to see if there are any such IPs outstanding in the given + /// silo, we have to join IP -> Instance -> Project -> Silo + async fn ensure_no_instance_ips_outstanding( &self, opctx: &OpContext, association: &IpPoolResourceDelete, @@ -538,9 +541,6 @@ impl DataStore { .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) .await?; - // We can only delete the association if there are no IPs allocated - // from this pool in the associated resource. - let existing_ips = external_ip::table .inner_join( instance::table @@ -551,7 +551,8 @@ impl DataStore { .filter(external_ip::parent_id.is_not_null()) .filter(external_ip::time_deleted.is_null()) .filter(external_ip::ip_pool_id.eq(association.ip_pool_id)) - // TODO: filter by type? i.e., ephemeral and snat? + // important, floating IPs are handled separately + .filter(external_ip::kind.eq(IpKind::Ephemeral).or(external_ip::kind.eq(IpKind::SNat))) .filter(instance::time_deleted.is_null()) // we have to join through IPs to instances to projects to get the silo ID .filter(project::silo_id.eq(association.resource_id)) @@ -577,6 +578,52 @@ impl DataStore { Ok(()) } + /// Floating IPs are associated with a silo through a project, so this one + /// is a little simpler than ephemeral. We join IP -> Project -> Silo. + async fn ensure_no_floating_ips_outstanding( + &self, + opctx: &OpContext, + association: &IpPoolResourceDelete, + ) -> Result<(), Error> { + use db::schema::external_ip; + use db::schema::project; + opctx + .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) + .await?; + + let existing_ips = external_ip::table + .inner_join(project::table.on(external_ip::project_id.eq(project::id.nullable()))) + .filter(external_ip::is_service.eq(false)) + .filter(external_ip::time_deleted.is_null()) + // all floating IPs have a project + .filter(external_ip::project_id.is_not_null()) + .filter(external_ip::ip_pool_id.eq(association.ip_pool_id)) + .filter(external_ip::kind.eq(IpKind::Floating)) + // we have to join through IPs to projects to get the silo ID + .filter(project::silo_id.eq(association.resource_id)) + .filter(project::time_deleted.is_null()) + .select(ExternalIp::as_select()) + .limit(1) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|e| { + Error::internal_error(&format!( + "error checking for outstanding IPs before deleting IP pool association to resource: {:?}", + e + )) + })?; + + if !existing_ips.is_empty() { + return Err(Error::invalid_request( + "IP addresses from this pool are in use in the linked silo", + )); + } + + Ok(()) + } + /// Delete IP pool assocation with resource unless there are outstanding /// IPs allocated from the pool in the associated silo pub async fn ip_pool_dissociate_resource( @@ -591,7 +638,8 @@ impl DataStore { // We can only delete the association if there are no IPs allocated // from this pool in the associated resource. - self.ensure_no_ips_outstanding(opctx, association).await?; + self.ensure_no_instance_ips_outstanding(opctx, association).await?; + self.ensure_no_floating_ips_outstanding(opctx, association).await?; diesel::delete(ip_pool_resource::table) .filter(ip_pool_resource::ip_pool_id.eq(association.ip_pool_id)) diff --git a/nexus/tests/integration_tests/external_ips.rs b/nexus/tests/integration_tests/external_ips.rs index 8d16bce197..53b0f7a70d 100644 --- a/nexus/tests/integration_tests/external_ips.rs +++ b/nexus/tests/integration_tests/external_ips.rs @@ -24,6 +24,8 @@ use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::link_ip_pool; use nexus_test_utils::resource_helpers::object_create; use nexus_test_utils::resource_helpers::object_create_error; +use nexus_test_utils::resource_helpers::object_delete; +use nexus_test_utils::resource_helpers::object_delete_error; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; use nexus_types::external_api::views::FloatingIp; @@ -167,7 +169,8 @@ async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { assert_eq!(error.message, "not found: ip-pool with name \"other-pool\""); // now link the pool and everything should work with the exact same params - link_ip_pool(&client, "other-pool", &DEFAULT_SILO.id(), false).await; + let silo_id = DEFAULT_SILO.id(); + link_ip_pool(&client, "other-pool", &silo_id, false).await; // Create with no chosen IP from named pool. let fip: FloatingIp = object_create(client, &url, ¶ms).await; @@ -308,15 +311,24 @@ async fn test_floating_ip_delete(cptestctx: &ControlPlaneTestContext) { ) .await; + // unlink fails because there are outstanding IPs + let silo_id = DEFAULT_SILO.id(); + let silo_link_url = + format!("/v1/system/ip-pools/default/silos/{}", silo_id); + let error = + object_delete_error(client, &silo_link_url, StatusCode::BAD_REQUEST) + .await; + assert_eq!( + error.message, + "IP addresses from this pool are in use in the linked silo" + ); + // Delete the floating IP. - NexusRequest::object_delete( - client, - &get_floating_ip_by_id_url(&fip.identity.id), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap(); + let floating_ip_url = get_floating_ip_by_id_url(&fip.identity.id); + object_delete(client, &floating_ip_url).await; + + // now unlink works + object_delete(client, &silo_link_url).await; } #[nexus_test] From 7c0ba9d6aba55619d248419d5ec6ecb2dba9e789 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 12 Dec 2023 16:45:33 -0600 Subject: [PATCH 50/67] verify there are no linked silos before deleting pool --- nexus/db-queries/src/db/datastore/ip_pool.rs | 18 +++ nexus/test-utils/src/resource_helpers.rs | 69 ++++++--- nexus/tests/integration_tests/ip_pools.rs | 139 ++++++------------- 3 files changed, 112 insertions(+), 114 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index a6c1f39e0e..7abbbbd53b 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -248,6 +248,7 @@ impl DataStore { ) -> DeleteResult { use db::schema::ip_pool::dsl; use db::schema::ip_pool_range; + use db::schema::ip_pool_resource; opctx.authorize(authz::Action::Delete, authz_pool).await?; // Verify there are no IP ranges still in this pool @@ -268,6 +269,23 @@ impl DataStore { )); } + // Verify there are no linked silos + let silo_link = ip_pool_resource::table + .filter(ip_pool_resource::ip_pool_id.eq(authz_pool.id())) + .select(ip_pool_resource::resource_id) + .limit(1) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + if silo_link.is_some() { + return Err(Error::invalid_request( + "IP Pool cannot be deleted while it is linked to a silo", + )); + } + // Delete the pool, conditional on the rcgen not having changed. This // protects the delete from occuring if clients created a new IP range // in between the above check for children and this query. diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index b7c599cc1e..528a009cc7 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -57,39 +57,31 @@ where .unwrap() } -pub async fn object_create( +pub async fn object_get( client: &ClientTestContext, path: &str, - input: &InputType, ) -> OutputType where - InputType: serde::Serialize, OutputType: serde::de::DeserializeOwned, { - NexusRequest::objects_post(client, path, input) + NexusRequest::object_get(client, path) .authn_as(AuthnMode::PrivilegedUser) .execute() .await .unwrap_or_else(|e| { - panic!("failed to make \"POST\" request to {path}: {e}") + panic!("failed to make \"GET\" request to {path}: {e}") }) .parsed_body() .unwrap() } -/// Make a POST, assert status code, return error response body -pub async fn object_create_error( +pub async fn object_get_error( client: &ClientTestContext, path: &str, - input: &InputType, status: StatusCode, -) -> HttpErrorResponseBody -where - InputType: serde::Serialize, -{ +) -> HttpErrorResponseBody { NexusRequest::new( - RequestBuilder::new(client, Method::POST, path) - .body(Some(&input)) + RequestBuilder::new(client, Method::DELETE, path) .expect_status(Some(status)), ) .authn_as(AuthnMode::PrivilegedUser) @@ -100,13 +92,39 @@ where .unwrap() } -pub async fn object_delete_error( +pub async fn object_create( + client: &ClientTestContext, + path: &str, + input: &InputType, +) -> OutputType +where + InputType: serde::Serialize, + OutputType: serde::de::DeserializeOwned, +{ + NexusRequest::objects_post(client, path, input) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap_or_else(|e| { + panic!("failed to make \"POST\" request to {path}: {e}") + }) + .parsed_body() + .unwrap() +} + +/// Make a POST, assert status code, return error response body +pub async fn object_create_error( client: &ClientTestContext, path: &str, + input: &InputType, status: StatusCode, -) -> HttpErrorResponseBody { +) -> HttpErrorResponseBody +where + InputType: serde::Serialize, +{ NexusRequest::new( - RequestBuilder::new(client, Method::DELETE, path) + RequestBuilder::new(client, Method::POST, path) + .body(Some(&input)) .expect_status(Some(status)), ) .authn_as(AuthnMode::PrivilegedUser) @@ -147,6 +165,23 @@ pub async fn object_delete(client: &ClientTestContext, path: &str) { }); } +pub async fn object_delete_error( + client: &ClientTestContext, + path: &str, + status: StatusCode, +) -> HttpErrorResponseBody { + NexusRequest::new( + RequestBuilder::new(client, Method::DELETE, path) + .expect_status(Some(status)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body::() + .unwrap() +} + /// Create an IP pool with a single range for testing. /// /// The IP range may be specified if it's important for testing the behavior diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index c5564886e6..559328138c 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -20,6 +20,9 @@ use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::object_create; use nexus_test_utils::resource_helpers::object_create_error; use nexus_test_utils::resource_helpers::object_delete; +use nexus_test_utils::resource_helpers::object_delete_error; +use nexus_test_utils::resource_helpers::object_get; +use nexus_test_utils::resource_helpers::object_get_error; use nexus_test_utils::resource_helpers::object_put; use nexus_test_utils::resource_helpers::objects_list_page_authz; use nexus_test_utils::resource_helpers::{ @@ -76,34 +79,15 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { assert_eq!(ip_pools.len(), 0, "Expected empty list of IP pools"); // Verify 404 if the pool doesn't exist yet, both for creating or deleting - let error: HttpErrorResponseBody = NexusRequest::expect_failure( - client, - StatusCode::NOT_FOUND, - Method::GET, - &ip_pool_url, - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + let error = + object_get_error(client, &ip_pool_url, StatusCode::NOT_FOUND).await; assert_eq!( error.message, format!("not found: ip-pool with name \"{}\"", pool_name), ); - let error: HttpErrorResponseBody = NexusRequest::expect_failure( - client, - StatusCode::NOT_FOUND, - Method::DELETE, - &ip_pool_url, - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + + let error = + object_delete_error(client, &ip_pool_url, StatusCode::NOT_FOUND).await; assert_eq!( error.message, format!("not found: ip-pool with name \"{}\"", pool_name), @@ -116,20 +100,11 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, - // silo: None, - // is_default: false, }; let created_pool: IpPool = - NexusRequest::objects_post(client, ip_pools_url, ¶ms) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + object_create(client, ip_pools_url, ¶ms).await; assert_eq!(created_pool.identity.name, pool_name); assert_eq!(created_pool.identity.description, description); - // assert_eq!(created_pool.silo_id, None); let list = NexusRequest::iter_collection_authn::( client, @@ -143,27 +118,18 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { assert_eq!(list.len(), 1, "Expected exactly 1 IP pool"); assert_pools_eq(&created_pool, &list[0]); - let fetched_pool: IpPool = NexusRequest::object_get(client, &ip_pool_url) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + let fetched_pool: IpPool = object_get(client, &ip_pool_url).await; assert_pools_eq(&created_pool, &fetched_pool); // Verify we get a conflict error if we insert it again - let error: HttpErrorResponseBody = NexusRequest::new( - RequestBuilder::new(client, Method::POST, ip_pools_url) - .body(Some(¶ms)) - .expect_status(Some(StatusCode::BAD_REQUEST)), + let error = object_create_error( + client, + ip_pools_url, + ¶ms, + StatusCode::BAD_REQUEST, ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + .await; + assert_eq!( error.message, format!("already exists: ip-pool \"{}\"", pool_name) @@ -178,27 +144,13 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { .unwrap(), ); let created_range: IpPoolRange = - NexusRequest::objects_post(client, &ip_pool_add_range_url, &range) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + object_create(client, &ip_pool_add_range_url, &range).await; assert_eq!(range.first_address(), created_range.range.first_address()); assert_eq!(range.last_address(), created_range.range.last_address()); - let error: HttpErrorResponseBody = NexusRequest::expect_failure( - client, - StatusCode::BAD_REQUEST, - Method::DELETE, - &ip_pool_url, - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + + let error: HttpErrorResponseBody = + object_delete_error(client, &ip_pool_url, StatusCode::BAD_REQUEST) + .await; assert_eq!( error.message, "IP Pool cannot be deleted while it contains IP ranges", @@ -219,13 +171,7 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { }, }; let modified_pool: IpPool = - NexusRequest::object_put(client, &ip_pool_url, Some(&updates)) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + object_put(client, &ip_pool_url, &updates).await; assert_eq!(modified_pool.identity.name, new_pool_name); assert_eq!(modified_pool.identity.id, created_pool.identity.id); assert_eq!( @@ -242,27 +188,11 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { ); let fetched_modified_pool: IpPool = - NexusRequest::object_get(client, &new_ip_pool_url) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + object_get(client, &new_ip_pool_url).await; assert_pools_eq(&modified_pool, &fetched_modified_pool); - let error: HttpErrorResponseBody = NexusRequest::expect_failure( - client, - StatusCode::NOT_FOUND, - Method::GET, - &ip_pool_url, - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + let error: HttpErrorResponseBody = + object_get_error(client, &ip_pool_url, StatusCode::NOT_FOUND).await; assert_eq!( error.message, format!("not found: ip-pool with name \"{}\"", pool_name), @@ -425,7 +355,19 @@ async fn test_ip_pool_silo_link(cptestctx: &ControlPlaneTestContext) { StatusCode::BAD_REQUEST, ) .await; - assert_eq!(error.error_code, Some("ObjectAlreadyExists".to_string())); + assert_eq!(error.error_code.unwrap(), "ObjectAlreadyExists"); + + // pool delete fails because it is linked to a silo + let error = object_delete_error( + client, + "/v1/system/ip-pools/p1", + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!( + error.message, + "IP Pool cannot be deleted while it is linked to a silo", + ); // unlink silo (doesn't matter that it's a default) let url = format!("/v1/system/ip-pools/p1/silos/{}", cptestctx.silo_name); @@ -433,6 +375,9 @@ async fn test_ip_pool_silo_link(cptestctx: &ControlPlaneTestContext) { let silos_p1 = silos_for_pool(client, "p1").await; assert_eq!(silos_p1.items.len(), 0); + + // now we can delete the pool too + object_delete(client, "/v1/system/ip-pools/p1").await; } #[nexus_test] From 5db1f09e3b7209f51a4b0f8e88aa0b464912c7a2 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 12 Dec 2023 21:53:50 -0600 Subject: [PATCH 51/67] always use fully qualified name for SQL enum --- nexus/db-model/src/schema.rs | 14 +++++++------- schema/crdb/20.0.0/up2.sql | 2 +- schema/crdb/dbinit.sql | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 07679819c2..99d7cfd9da 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -8,13 +8,6 @@ use omicron_common::api::external::SemverVersion; -/// The version of the database schema this particular version of Nexus was -/// built against. -/// -/// This should be updated whenever the schema is changed. For more details, -/// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(20, 0, 1); - table! { disk (id) { id -> Uuid, @@ -1331,6 +1324,13 @@ table! { } } +/// The version of the database schema this particular version of Nexus was +/// built against. +/// +/// This should be updated whenever the schema is changed. For more details, +/// refer to: schema/crdb/README.adoc +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(20, 0, 1); + allow_tables_to_appear_in_same_query!( system_update, component_update, diff --git a/schema/crdb/20.0.0/up2.sql b/schema/crdb/20.0.0/up2.sql index c2be7785ce..cf4668c325 100644 --- a/schema/crdb/20.0.0/up2.sql +++ b/schema/crdb/20.0.0/up2.sql @@ -1,6 +1,6 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool_resource ( ip_pool_id UUID NOT NULL, - resource_type ip_pool_resource_type NOT NULL, + resource_type omicron.public.ip_pool_resource_type NOT NULL, resource_id UUID NOT NULL, is_default BOOL NOT NULL, diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index e0a622f288..8daf4ac7c1 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1554,7 +1554,7 @@ CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_resource_type AS ENUM ( -- join table associating IP pools with resources like fleet or silo CREATE TABLE IF NOT EXISTS omicron.public.ip_pool_resource ( ip_pool_id UUID NOT NULL, - resource_type ip_pool_resource_type NOT NULL, + resource_type omicron.public.ip_pool_resource_type NOT NULL, resource_id UUID NOT NULL, is_default BOOL NOT NULL, -- TODO: timestamps for soft deletes? From bbcd7316b3ed7bd728611194a1b5b0630ec5315a Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 13 Dec 2023 09:56:52 -0600 Subject: [PATCH 52/67] set up IP pool properly for e2e test --- end-to-end-tests/src/bin/bootstrap.rs | 23 +++++++++++++++++++++-- end-to-end-tests/src/helpers/ctx.rs | 4 ++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/end-to-end-tests/src/bin/bootstrap.rs b/end-to-end-tests/src/bin/bootstrap.rs index 83a37b8c21..0c3cb8f150 100644 --- a/end-to-end-tests/src/bin/bootstrap.rs +++ b/end-to-end-tests/src/bin/bootstrap.rs @@ -4,7 +4,8 @@ use end_to_end_tests::helpers::{generate_name, get_system_ip_pool}; use omicron_test_utils::dev::poll::{wait_for_condition, CondCheckError}; use oxide_client::types::{ ByteCount, DeviceAccessTokenRequest, DeviceAuthRequest, DeviceAuthVerify, - DiskCreate, DiskSource, IpRange, Ipv4Range, + DiskCreate, DiskSource, IpPoolCreate, IpPoolSiloLink, IpRange, Ipv4Range, + NameOrId, }; use oxide_client::{ ClientDisksExt, ClientHiddenExt, ClientProjectsExt, @@ -38,9 +39,27 @@ async fn main() -> Result<()> { // ===== CREATE IP POOL ===== // eprintln!("creating IP pool... {:?} - {:?}", first, last); + let pool_name = "default"; + client + .ip_pool_create() + .body(IpPoolCreate { + name: pool_name.parse().unwrap(), + description: "Default IP pool".to_string(), + }) + .send() + .await?; + client + .ip_pool_silo_link() + .pool(pool_name) + .body(IpPoolSiloLink { + silo: NameOrId::Name(params.silo_name().parse().unwrap()), + is_default: true, + }) + .send() + .await?; client .ip_pool_range_add() - .pool("default") + .pool(pool_name) .body(IpRange::V4(Ipv4Range { first, last })) .send() .await?; diff --git a/end-to-end-tests/src/helpers/ctx.rs b/end-to-end-tests/src/helpers/ctx.rs index 2c66bd4724..0b062915d3 100644 --- a/end-to-end-tests/src/helpers/ctx.rs +++ b/end-to-end-tests/src/helpers/ctx.rs @@ -287,6 +287,10 @@ impl ClientParams { .build()?; Ok(Client::new_with_client(&base_url, reqwest_client)) } + + pub fn silo_name(&self) -> String { + self.rss_config.recovery_silo.silo_name.to_string() + } } async fn wait_for_records( From c7615597fff78721585bf1e595322d7ff03e0d13 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 13 Dec 2023 10:09:05 -0600 Subject: [PATCH 53/67] move migration one more number up to 21 --- nexus/db-model/src/schema.rs | 2 +- schema/crdb/{20.0.0 => 21.0.0}/up1.sql | 0 schema/crdb/{20.0.0 => 21.0.0}/up2.sql | 0 schema/crdb/{20.0.0 => 21.0.0}/up3.sql | 0 schema/crdb/{20.0.0 => 21.0.0}/up4.sql | 0 schema/crdb/{20.0.0 => 21.0.0}/up5.sql | 0 schema/crdb/{20.0.0 => 21.0.0}/up6.sql | 0 schema/crdb/{20.0.1 => 21.0.1}/README.md | 0 schema/crdb/{20.0.1 => 21.0.1}/up1.sql | 0 schema/crdb/{20.0.1 => 21.0.1}/up2.sql | 0 schema/crdb/dbinit.sql | 2 +- 11 files changed, 2 insertions(+), 2 deletions(-) rename schema/crdb/{20.0.0 => 21.0.0}/up1.sql (100%) rename schema/crdb/{20.0.0 => 21.0.0}/up2.sql (100%) rename schema/crdb/{20.0.0 => 21.0.0}/up3.sql (100%) rename schema/crdb/{20.0.0 => 21.0.0}/up4.sql (100%) rename schema/crdb/{20.0.0 => 21.0.0}/up5.sql (100%) rename schema/crdb/{20.0.0 => 21.0.0}/up6.sql (100%) rename schema/crdb/{20.0.1 => 21.0.1}/README.md (100%) rename schema/crdb/{20.0.1 => 21.0.1}/up1.sql (100%) rename schema/crdb/{20.0.1 => 21.0.1}/up2.sql (100%) diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 99d7cfd9da..d1a6b0a07b 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1329,7 +1329,7 @@ table! { /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(20, 0, 1); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(21, 0, 1); allow_tables_to_appear_in_same_query!( system_update, diff --git a/schema/crdb/20.0.0/up1.sql b/schema/crdb/21.0.0/up1.sql similarity index 100% rename from schema/crdb/20.0.0/up1.sql rename to schema/crdb/21.0.0/up1.sql diff --git a/schema/crdb/20.0.0/up2.sql b/schema/crdb/21.0.0/up2.sql similarity index 100% rename from schema/crdb/20.0.0/up2.sql rename to schema/crdb/21.0.0/up2.sql diff --git a/schema/crdb/20.0.0/up3.sql b/schema/crdb/21.0.0/up3.sql similarity index 100% rename from schema/crdb/20.0.0/up3.sql rename to schema/crdb/21.0.0/up3.sql diff --git a/schema/crdb/20.0.0/up4.sql b/schema/crdb/21.0.0/up4.sql similarity index 100% rename from schema/crdb/20.0.0/up4.sql rename to schema/crdb/21.0.0/up4.sql diff --git a/schema/crdb/20.0.0/up5.sql b/schema/crdb/21.0.0/up5.sql similarity index 100% rename from schema/crdb/20.0.0/up5.sql rename to schema/crdb/21.0.0/up5.sql diff --git a/schema/crdb/20.0.0/up6.sql b/schema/crdb/21.0.0/up6.sql similarity index 100% rename from schema/crdb/20.0.0/up6.sql rename to schema/crdb/21.0.0/up6.sql diff --git a/schema/crdb/20.0.1/README.md b/schema/crdb/21.0.1/README.md similarity index 100% rename from schema/crdb/20.0.1/README.md rename to schema/crdb/21.0.1/README.md diff --git a/schema/crdb/20.0.1/up1.sql b/schema/crdb/21.0.1/up1.sql similarity index 100% rename from schema/crdb/20.0.1/up1.sql rename to schema/crdb/21.0.1/up1.sql diff --git a/schema/crdb/20.0.1/up2.sql b/schema/crdb/21.0.1/up2.sql similarity index 100% rename from schema/crdb/20.0.1/up2.sql rename to schema/crdb/21.0.1/up2.sql diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 8daf4ac7c1..e9ab60ee0c 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -3069,7 +3069,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '20.0.1', NULL) + ( TRUE, NOW(), NOW(), '21.0.1', NULL) ON CONFLICT DO NOTHING; COMMIT; From 6b2ffbcffde36bf592646b5b2febbd083c1f4c59 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 13 Dec 2023 10:38:56 -0600 Subject: [PATCH 54/67] update quotas test for new ip pools helpers --- nexus/tests/integration_tests/quotas.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/nexus/tests/integration_tests/quotas.rs b/nexus/tests/integration_tests/quotas.rs index 2fddf4e05c..2dcca9c4bf 100644 --- a/nexus/tests/integration_tests/quotas.rs +++ b/nexus/tests/integration_tests/quotas.rs @@ -6,16 +6,17 @@ use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::http_testing::TestResponse; +use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_local_user; use nexus_test_utils::resource_helpers::grant_iam; +use nexus_test_utils::resource_helpers::link_ip_pool; use nexus_test_utils::resource_helpers::object_create; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; use nexus_types::external_api::shared; use nexus_types::external_api::shared::SiloRole; -use nexus_types::external_api::views::SiloQuotas; +use nexus_types::external_api::views::{Silo, SiloQuotas}; use omicron_common::api::external::ByteCount; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::InstanceCpuCount; @@ -168,7 +169,7 @@ async fn setup_silo_with_quota( silo_name: &str, quotas: params::SiloQuotasCreate, ) -> ResourceAllocator { - let silo = object_create( + let silo: Silo = object_create( client, "/v1/system/silos", ¶ms::SiloCreate { @@ -186,7 +187,10 @@ async fn setup_silo_with_quota( ) .await; - populate_ip_pool(&client, "default", None).await; + // create default pool and link to this silo. can't use + // create_default_ip_pool because that links to the default silo + create_ip_pool(&client, "default", None).await; + link_ip_pool(&client, "default", &silo.identity.id, true).await; // Create a silo user let user = create_local_user( From e4e969b79164e39fca5b6d84ff26b096f76d295e Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 13 Dec 2023 12:04:51 -0600 Subject: [PATCH 55/67] don't 500 when there is no default IP pool! be cool instead --- nexus/db-queries/src/db/datastore/ip_pool.rs | 36 ++++++---- nexus/tests/integration_tests/instances.rs | 74 ++++++++++++++------ 2 files changed, 76 insertions(+), 34 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 7abbbbd53b..4b0b579ace 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -11,6 +11,7 @@ use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; use crate::db::error::public_error_from_diesel; +use crate::db::error::public_error_from_diesel_lookup; use crate::db::error::ErrorHandler; use crate::db::fixed_data::silo::INTERNAL_SILO_ID; use crate::db::identity::Resource; @@ -137,6 +138,11 @@ impl DataStore { // join ip_pool to ip_pool_resource and filter + // used in both success and error outcomes + let lookup_type = LookupType::ByCompositeId( + "Default pool for current silo".to_string(), + ); + ip_pool::table .inner_join(ip_pool_resource::table) .filter( @@ -156,19 +162,21 @@ impl DataStore { &*self.pool_connection_authorized(opctx).await?, ) .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) - .map(|ip_pool| { - ( - authz::IpPool::new( - authz::FLEET, - ip_pool.id(), - LookupType::ByCompositeId( - "Default IP Pool".to_string(), - ), - ), - ip_pool, + .map_err(|e| { + // janky to do this manually, but this is an unusual kind of + // lookup in that it is by (silo_id, is_default=true), which is + // arguably a composite ID. + public_error_from_diesel_lookup( + e, + ResourceType::IpPool, + &lookup_type, ) }) + .map(|ip_pool| { + let authz_pool = + authz::IpPool::new(authz::FLEET, ip_pool.id(), lookup_type); + (authz_pool, ip_pool) + }) } /// Looks up an IP pool intended for internal services. @@ -858,7 +866,7 @@ mod test { // we start out with no default pool, so we expect not found let error = datastore.ip_pools_fetch_default(&opctx).await.unwrap_err(); - assert_matches!(error, Error::InternalError { .. }); + assert_matches!(error, Error::ObjectNotFound { .. }); let silo_id = opctx.authn.silo_required().unwrap().id(); @@ -887,7 +895,7 @@ mod test { // because that one was not a default, when we ask for the silo default // pool, we still get nothing let error = datastore.ip_pools_fetch_default(&opctx).await.unwrap_err(); - assert_matches!(error, Error::InternalError { .. }); + assert_matches!(error, Error::ObjectNotFound { .. }); // now we can change that association to is_default=true and // it should update rather than erroring out @@ -948,7 +956,7 @@ mod test { .expect("Failed to dissociate IP pool from silo"); let error = datastore.ip_pools_fetch_default(&opctx).await.unwrap_err(); - assert_matches!(error, Error::InternalError { .. }); + assert_matches!(error, Error::ObjectNotFound { .. }); db.cleanup().await.unwrap(); logctx.cleanup_successful(); diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 0ab773dab8..61b5ca2035 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -3693,7 +3693,6 @@ async fn test_instance_ephemeral_ip_from_orphan_pool( ); create_ip_pool(&client, "orphan-pool", Some(orphan_pool_range)).await; - // this should 404 let instance_name = "orphan-pool-inst"; let body = params::InstanceCreate { identity: IdentityMetadataCreateParams { @@ -3703,9 +3702,7 @@ async fn test_instance_ephemeral_ip_from_orphan_pool( ncpus: InstanceCpuCount(4), memory: ByteCount::from_gibibytes_u32(1), hostname: String::from("the_host"), - user_data: - b"#cloud-config\nsystem_info:\n default_user:\n name: oxide" - .to_vec(), + user_data: vec![], network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![params::ExternalIpCreate::Ephemeral { pool_name: Some("orphan-pool".parse().unwrap()), @@ -3714,15 +3711,10 @@ async fn test_instance_ephemeral_ip_from_orphan_pool( start: true, }; + // instance create 404s let url = format!("/v1/instances?project={}", PROJECT_NAME); - let error = NexusRequest::new( - RequestBuilder::new(&client, http::Method::POST, &url) - .expect_status(Some(StatusCode::NOT_FOUND)) - .body(Some(&body)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute_and_parse_unwrap::() - .await; + let error = + object_create_error(client, &url, &body, StatusCode::NOT_FOUND).await; assert_eq!(error.error_code.unwrap(), "ObjectNotFound".to_string()); assert_eq!( @@ -3740,14 +3732,8 @@ async fn test_instance_ephemeral_ip_from_orphan_pool( object_create(client, "/v1/system/ip-pools/orphan-pool/silos", ¶ms) .await; - let error = NexusRequest::new( - RequestBuilder::new(&client, http::Method::POST, &url) - .expect_status(Some(StatusCode::NOT_FOUND)) - .body(Some(&body)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute_and_parse_unwrap::() - .await; + let error = + object_create_error(client, &url, &body, StatusCode::NOT_FOUND).await; assert_eq!(error.error_code.unwrap(), "ObjectNotFound".to_string()); assert_eq!( @@ -3756,6 +3742,54 @@ async fn test_instance_ephemeral_ip_from_orphan_pool( ); } +// Test the error when creating an instance with an IP from the default pool, +// but there is no default pool +#[nexus_test] +async fn test_instance_ephemeral_ip_no_default_pool_error( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + let _ = create_project(&client, PROJECT_NAME).await; + + // important: no pool create, so there is no pool + + let body = params::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: "no-default-pool".parse().unwrap(), + description: "".to_string(), + }, + ncpus: InstanceCpuCount(4), + memory: ByteCount::from_gibibytes_u32(1), + hostname: String::from("the_host"), + user_data: vec![], + network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![params::ExternalIpCreate::Ephemeral { + pool_name: None, // <--- the only important thing here + }], + disks: vec![], + start: true, + }; + + let url = format!("/v1/instances?project={}", PROJECT_NAME); + let error = + object_create_error(client, &url, &body, StatusCode::NOT_FOUND).await; + let msg = "not found: ip-pool with id \"Default pool for current silo\"" + .to_string(); + assert_eq!(error.message, msg); + + // same deal if you specify a pool that doesn't exist + let body = params::InstanceCreate { + external_ips: vec![params::ExternalIpCreate::Ephemeral { + pool_name: Some("nonexistent-pool".parse().unwrap()), + }], + ..body + }; + let error = + object_create_error(client, &url, &body, StatusCode::NOT_FOUND).await; + assert_eq!(error.message, msg); +} + #[nexus_test] async fn test_instance_attach_several_external_ips( cptestctx: &ControlPlaneTestContext, From 80cb5d0614bc8f7fd1848322c05e832043c146b7 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 13 Dec 2023 13:50:46 -0600 Subject: [PATCH 56/67] self-review cleanup --- nexus/db-queries/src/db/datastore/ip_pool.rs | 102 +++++++++++++++---- nexus/src/external_api/http_entrypoints.rs | 18 ++-- nexus/tests/integration_tests/instances.rs | 3 - nexus/tests/integration_tests/ip_pools.rs | 14 +-- nexus/types/src/external_api/params.rs | 8 ++ nexus/types/src/external_api/views.rs | 7 ++ openapi/nexus.json | 12 ++- 7 files changed, 115 insertions(+), 49 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 4b0b579ace..393fbb08a5 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -15,22 +15,25 @@ use crate::db::error::public_error_from_diesel_lookup; use crate::db::error::ErrorHandler; use crate::db::fixed_data::silo::INTERNAL_SILO_ID; use crate::db::identity::Resource; -use crate::db::model::{ - IpPool, IpPoolRange, IpPoolResource, IpPoolResourceDelete, IpPoolUpdate, - Name, -}; +use crate::db::model::ExternalIp; +use crate::db::model::IpKind; +use crate::db::model::IpPool; +use crate::db::model::IpPoolRange; +use crate::db::model::IpPoolResource; +use crate::db::model::IpPoolResourceDelete; +use crate::db::model::IpPoolResourceType; +use crate::db::model::IpPoolUpdate; +use crate::db::model::Name; use crate::db::pagination::paginated; use crate::db::pool::DbConnection; use crate::db::queries::ip_pool::FilterOverlappingIpRanges; use crate::db::TransactionError; -use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; +use async_bb8_diesel::AsyncConnection; +use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; use diesel::result::Error as DieselError; use ipnetwork::IpNetwork; -use nexus_db_model::ExternalIp; -use nexus_db_model::IpKind; -use nexus_db_model::IpPoolResourceType; use nexus_types::external_api::shared::IpRange; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; @@ -330,25 +333,24 @@ impl DataStore { use db::schema::ip_pool; use db::schema::ip_pool_resource; - let result = ip_pool::table + ip_pool::table .inner_join(ip_pool_resource::table) .filter(ip_pool::id.eq(authz_pool.id())) .filter( - ip_pool_resource::resource_type - .eq(IpPoolResourceType::Silo) - .and(ip_pool_resource::resource_id.eq(*INTERNAL_SILO_ID)), + ip_pool_resource::resource_type.eq(IpPoolResourceType::Silo), ) + .filter(ip_pool_resource::resource_id.eq(*INTERNAL_SILO_ID)) .filter(ip_pool::time_deleted.is_null()) - .select(IpPool::as_select()) - .load_async::( + .select(ip_pool::id) + .first_async::( &*self.pool_connection_authorized(opctx).await?, ) .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; - - // if there is a result, the pool is associated with the internal silo, - // which makes it the internal pool - Ok(result.len() > 0) + .optional() + // if there is a result, the pool is associated with the internal silo, + // which makes it the internal pool + .map(|result| Ok(result.is_some())) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? } pub async fn ip_pool_update( @@ -400,8 +402,7 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } - // TODO: separate this operation from update so that we can have /link 409 - // or whatever when the association already exists? + // TODO: should this error on conflict instead of updating? pub async fn ip_pool_associate_resource( &self, opctx: &OpContext, @@ -847,13 +848,16 @@ impl DataStore { #[cfg(test)] mod test { + use crate::authz; use crate::db::datastore::datastore_test; use crate::db::model::{IpPool, IpPoolResource, IpPoolResourceType}; use assert_matches::assert_matches; use nexus_db_model::IpPoolResourceDelete; use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Resource; - use omicron_common::api::external::{Error, IdentityMetadataCreateParams}; + use omicron_common::api::external::{ + Error, IdentityMetadataCreateParams, LookupType, + }; use omicron_test_utils::dev; // TODO: add calls to the list endpoint throughout all this @@ -961,4 +965,58 @@ mod test { db.cleanup().await.unwrap(); logctx.cleanup_successful(); } + + #[tokio::test] + async fn test_internal_ip_pool() { + let logctx = dev::test_setup_log("test_internal_ip_pool"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // confirm internal pool appears as internal + let (authz_pool, _pool) = + datastore.ip_pools_service_lookup(&opctx).await.unwrap(); + + let is_internal = + datastore.ip_pool_is_internal(&opctx, &authz_pool).await; + assert_eq!(is_internal, Ok(true)); + + // another random pool should not be considered internal + let identity = IdentityMetadataCreateParams { + name: "other-pool".parse().unwrap(), + description: "".to_string(), + }; + let other_pool = datastore + .ip_pool_create(&opctx, IpPool::new(&identity)) + .await + .expect("Failed to create IP pool"); + + let authz_other_pool = authz::IpPool::new( + authz::FLEET, + other_pool.id(), + LookupType::ById(other_pool.id()), + ); + let is_internal = + datastore.ip_pool_is_internal(&opctx, &authz_other_pool).await; + assert_eq!(is_internal, Ok(false)); + + // now link it to the current silo, and it is still not internal + let silo_id = opctx.authn.silo_required().unwrap().id(); + let link = IpPoolResource { + ip_pool_id: other_pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: silo_id, + is_default: true, + }; + datastore + .ip_pool_associate_resource(&opctx, link) + .await + .expect("Failed to make IP pool default for silo"); + + let is_internal = + datastore.ip_pool_is_internal(&opctx, &authz_other_pool).await; + assert_eq!(is_internal, Ok(false)); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index ebc771ae17..0d29e1c22c 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -1278,12 +1278,6 @@ async fn project_ip_pool_view( .project_ip_pool_lookup(&opctx, &pool_selector, &project_lookup)? .fetch() .await?; - // TODO(2148): once we've actualy implemented filtering to pools belonging to - // the specified project, we can remove this internal check. - // TODO: do this? forget about it? - // if pool.silo_id == Some(*INTERNAL_SILO_ID) { - // return Err(authz_pool.not_found().into()); - // } Ok(HttpResponseOk(IpPool::from(pool))) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await @@ -1485,7 +1479,9 @@ async fn ip_pool_silo_link( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/// Remove an IP pool's association with a silo or project +/// Unlink an IP pool from a silo +/// +/// Will fail if there are any outstanding IPs allocated in the silo. #[endpoint { method = DELETE, path = "/v1/system/ip-pools/{pool}/silos/{silo}", @@ -1510,10 +1506,10 @@ async fn ip_pool_silo_unlink( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -// TODO: change this to PUT /ip-pools/{pool}/silos/{silo} so -// it can be used for both set default true and false - -/// Make an IP pool the default for a silo +/// Make an IP pool default or not-default for a silo +/// +/// When a pool is made default for a silo, any existing default will remain +/// linked to the silo, but will no longer be the default. #[endpoint { method = PUT, path = "/v1/system/ip-pools/{pool}/silos/{silo}", diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 61b5ca2035..b70ee707d5 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -3622,9 +3622,6 @@ async fn test_instance_ephemeral_ip_from_correct_pool( stop_instance(&cptestctx, "pool1-inst").await; stop_instance(&cptestctx, "pool2-inst").await; - // TODO: this still doesn't working because the SNAT IP is always created - // with the default pool, so even when pool2-inst was created by specifying - // pool2, the SNAT IP came from pool1 object_delete(client, &pool1_silo_url).await; // create instance with pool1, expecting allocation to fail diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 559328138c..99bda9a291 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -246,28 +246,22 @@ async fn test_ip_pool_service_no_cud(cptestctx: &ControlPlaneTestContext) { // deletes fail - let error = NexusRequest::expect_failure( + let error = object_delete_error( client, - StatusCode::NOT_FOUND, - Method::DELETE, &internal_pool_name_url, + StatusCode::NOT_FOUND, ) - .authn_as(AuthnMode::PrivilegedUser) - .execute_and_parse_unwrap::() .await; assert_eq!( error.message, "not found: ip-pool with name \"oxide-service-pool\"" ); - let error = NexusRequest::expect_failure( + let error = object_delete_error( client, - StatusCode::NOT_FOUND, - Method::DELETE, &internal_pool_id_url, + StatusCode::NOT_FOUND, ) - .authn_as(AuthnMode::PrivilegedUser) - .execute_and_parse_unwrap::() .await; assert_eq!( error.message, diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 9f4731708c..8e9e2a2905 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -829,11 +829,19 @@ pub struct IpPoolSiloPath { #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct IpPoolSiloLink { pub silo: NameOrId, + /// When a pool is the default for a silo, floating IPs and instance + /// ephemeral IPs will come from that pool when no other pool is specified. + /// There can be at most one default for a given silo. pub is_default: bool, } #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct IpPoolSiloUpdate { + /// When a pool is the default for a silo, floating IPs and instance + /// ephemeral IPs will come from that pool when no other pool is specified. + /// There can be at most one default for a given silo, so when a pool is + /// made default, an existing default will remain linked but will no longer + /// be the default. pub is_default: bool, } diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index fd0b86b75a..9ef25f7630 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -245,16 +245,23 @@ pub struct VpcRouter { // IP POOLS +/// A collection of IP ranges. If a pool is linked to a silo, IP addresses from +/// the pool can be allocated within that silo #[derive(ObjectIdentity, Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct IpPool { #[serde(flatten)] pub identity: IdentityMetadata, } +/// A link between an IP pool and a silo that allows one to allocate IPs from +/// the pool within the silo #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] pub struct IpPoolSilo { pub ip_pool_id: Uuid, pub silo_id: Uuid, + /// When a pool is the default for a silo, floating IPs and instance + /// ephemeral IPs will come from that pool when no other pool is specified. + /// There can be at most one default for a given silo. pub is_default: bool, } diff --git a/openapi/nexus.json b/openapi/nexus.json index db72d39914..e36d0232ef 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -5104,7 +5104,8 @@ "tags": [ "system/networking" ], - "summary": "Make an IP pool the default for a silo", + "summary": "Make an IP pool default or not-default for a silo", + "description": "When a pool is made default for a silo, any existing default will remain linked to the silo, but will no longer be the default.", "operationId": "ip_pool_silo_update", "parameters": [ { @@ -5157,7 +5158,8 @@ "tags": [ "system/networking" ], - "summary": "Remove an IP pool's association with a silo or project", + "summary": "Unlink an IP pool from a silo", + "description": "Will fail if there are any outstanding IPs allocated in the silo.", "operationId": "ip_pool_silo_unlink", "parameters": [ { @@ -12313,7 +12315,7 @@ ] }, "IpPool": { - "description": "Identity-related metadata that's included in nearly all public API objects", + "description": "A collection of IP ranges. If a pool is linked to a silo, IP addresses from the pool can be allocated within that silo", "type": "object", "properties": { "description": { @@ -12437,6 +12439,7 @@ ] }, "IpPoolSilo": { + "description": "A link between an IP pool and a silo that allows one to allocate IPs from the pool within the silo", "type": "object", "properties": { "ip_pool_id": { @@ -12444,6 +12447,7 @@ "format": "uuid" }, "is_default": { + "description": "When a pool is the default for a silo, floating IPs and instance ephemeral IPs will come from that pool when no other pool is specified. There can be at most one default for a given silo.", "type": "boolean" }, "silo_id": { @@ -12461,6 +12465,7 @@ "type": "object", "properties": { "is_default": { + "description": "When a pool is the default for a silo, floating IPs and instance ephemeral IPs will come from that pool when no other pool is specified. There can be at most one default for a given silo.", "type": "boolean" }, "silo": { @@ -12497,6 +12502,7 @@ "type": "object", "properties": { "is_default": { + "description": "When a pool is the default for a silo, floating IPs and instance ephemeral IPs will come from that pool when no other pool is specified. There can be at most one default for a given silo, so when a pool is made default, an existing default will remain linked but will no longer be the default.", "type": "boolean" } }, From bf2905294649a991acb42f974288c6cac996e2c1 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 13 Dec 2023 21:49:54 -0600 Subject: [PATCH 57/67] couldn't help it, had to fix the silo-scoped pools endpoints --- nexus/db-queries/src/db/datastore/ip_pool.rs | 42 ++++++ nexus/db-queries/src/db/datastore/project.rs | 38 ----- nexus/src/app/ip_pool.rs | 33 +++++ nexus/src/app/project.rs | 38 ----- nexus/src/external_api/http_entrypoints.rs | 28 +--- nexus/test-utils/src/resource_helpers.rs | 2 +- nexus/tests/integration_tests/endpoints.rs | 6 +- nexus/tests/integration_tests/ip_pools.rs | 148 +++---------------- openapi/nexus.json | 22 +-- 9 files changed, 109 insertions(+), 248 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 393fbb08a5..bf17b83149 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -40,6 +40,7 @@ use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; +use omicron_common::api::external::InternalContext; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::LookupType; @@ -86,6 +87,47 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } + /// List IP pools linked to the current silo + pub async fn silo_ip_pools_list( + &self, + opctx: &OpContext, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + use db::schema::ip_pool; + use db::schema::ip_pool_resource; + + // From the developer user's point of view, we treat IP pools linked to + // their silo as silo resources, so they can list them if they can list + // silo children + let authz_silo = + opctx.authn.silo_required().internal_context("listing IP pools")?; + opctx.authorize(authz::Action::ListChildren, &authz_silo).await?; + + let silo_id = authz_silo.id(); + + match pagparams { + PaginatedBy::Id(pagparams) => { + paginated(ip_pool::table, ip_pool::id, pagparams) + } + PaginatedBy::Name(pagparams) => paginated( + ip_pool::table, + ip_pool::name, + &pagparams.map_name(|n| Name::ref_cast(n)), + ), + } + .inner_join(ip_pool_resource::table) + .filter( + ip_pool_resource::resource_type + .eq(IpPoolResourceType::Silo) + .and(ip_pool_resource::resource_id.eq(silo_id)), + ) + .filter(ip_pool::time_deleted.is_null()) + .select(db::model::IpPool::as_select()) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + /// Look up whether the given pool is available to users in the current /// silo, i.e., whether there is an entry in the association table linking /// the pool with that silo diff --git a/nexus/db-queries/src/db/datastore/project.rs b/nexus/db-queries/src/db/datastore/project.rs index abf699f5ed..e3927fdfc1 100644 --- a/nexus/db-queries/src/db/datastore/project.rs +++ b/nexus/db-queries/src/db/datastore/project.rs @@ -27,7 +27,6 @@ use crate::transaction_retry::OptionalError; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; -use nexus_db_model::IpPoolResourceType; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DeleteResult; @@ -348,41 +347,4 @@ impl DataStore { ) }) } - - /// List IP Pools accessible to a project - pub async fn project_ip_pools_list( - &self, - opctx: &OpContext, - authz_project: &authz::Project, - pagparams: &PaginatedBy<'_>, - ) -> ListResultVec { - use db::schema::ip_pool; - use db::schema::ip_pool_resource; - - opctx.authorize(authz::Action::ListChildren, authz_project).await?; - - let silo_id = opctx.authn.silo_required().unwrap().id(); - - match pagparams { - PaginatedBy::Id(pagparams) => { - paginated(ip_pool::table, ip_pool::id, pagparams) - } - PaginatedBy::Name(pagparams) => paginated( - ip_pool::table, - ip_pool::name, - &pagparams.map_name(|n| Name::ref_cast(n)), - ), - } - .inner_join(ip_pool_resource::table) - .filter( - ip_pool_resource::resource_type - .eq(IpPoolResourceType::Silo) - .and(ip_pool_resource::resource_id.eq(silo_id)), - ) - .filter(ip_pool::time_deleted.is_null()) - .select(db::model::IpPool::as_select()) - .get_results_async(&*self.pool_connection_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) - } } diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 2a931d6786..1435ebfa5d 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -10,11 +10,13 @@ use ipnetwork::IpNetwork; use nexus_db_model::IpPoolResourceDelete; use nexus_db_model::IpPoolResourceType; use nexus_db_queries::authz; +use nexus_db_queries::authz::ApiResource; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; use nexus_db_queries::db::lookup; use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::db::model::Name; +use nexus_types::identity::Resource; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; @@ -73,6 +75,37 @@ impl super::Nexus { self.db_datastore.ip_pool_create(opctx, pool).await } + // TODO: this is used by a developer user to see what IP pools they can use + // in their silo, so it would be nice to say which one is the default + + /// List IP pools in current silo + pub(crate) async fn silo_ip_pools_list( + &self, + opctx: &OpContext, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + self.db_datastore.silo_ip_pools_list(opctx, pagparams).await + } + + // Look up pool by name or ID, but only return it if it's linked to the + // current silo + pub async fn silo_ip_pool_fetch<'a>( + &'a self, + opctx: &'a OpContext, + pool: &'a NameOrId, + ) -> LookupResult { + let (authz_pool, pool) = + self.ip_pool_lookup(opctx, pool)?.fetch().await?; + + // 404 if no link is found in the current silo + let link = self.db_datastore.ip_pool_fetch_link(opctx, pool.id()).await; + if link.is_err() { + return Err(authz_pool.not_found()); + } + + Ok(pool) + } + pub(crate) async fn ip_pool_association_list( &self, opctx: &OpContext, diff --git a/nexus/src/app/project.rs b/nexus/src/app/project.rs index 6e8727a889..2e852ba2d3 100644 --- a/nexus/src/app/project.rs +++ b/nexus/src/app/project.rs @@ -8,7 +8,6 @@ use crate::app::sagas; use crate::external_api::params; use crate::external_api::shared; use anyhow::Context; -use nexus_db_model::Name; use nexus_db_queries::authn; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; @@ -24,7 +23,6 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::NameOrId; use omicron_common::api::external::UpdateResult; -use ref_cast::RefCast; use std::sync::Arc; impl super::Nexus { @@ -149,40 +147,4 @@ impl super::Nexus { .collect::, _>>()?; Ok(shared::Policy { role_assignments }) } - - pub(crate) async fn project_ip_pools_list( - &self, - opctx: &OpContext, - project_lookup: &lookup::Project<'_>, - pagparams: &PaginatedBy<'_>, - ) -> ListResultVec { - let (.., authz_project) = - project_lookup.lookup_for(authz::Action::ListChildren).await?; - - self.db_datastore - .project_ip_pools_list(opctx, &authz_project, pagparams) - .await - } - - pub fn project_ip_pool_lookup<'a>( - &'a self, - opctx: &'a OpContext, - pool: &'a NameOrId, - _project_lookup: &Option>, - ) -> LookupResult> { - // TODO(2148, 2056): check that the given project has access (if one - // is provided to the call) once that relation is implemented - match pool { - NameOrId::Name(name) => { - let pool = LookupPath::new(opctx, &self.db_datastore) - .ip_pool_name(Name::ref_cast(name)); - Ok(pool) - } - NameOrId::Id(id) => { - let pool = - LookupPath::new(opctx, &self.db_datastore).ip_pool_id(*id); - Ok(pool) - } - } - } } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 0d29e1c22c..ea226d76ab 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -42,7 +42,7 @@ use nexus_db_queries::db::identity::Resource; use nexus_db_queries::db::lookup::ImageLookup; use nexus_db_queries::db::lookup::ImageParentLookup; use nexus_db_queries::db::model::Name; -use nexus_types::external_api::{params::ProjectSelector, views::SiloQuotas}; +use nexus_types::external_api::views::SiloQuotas; use nexus_types::{ external_api::views::{SledInstance, Switch}, identity::AssetIdentityMetadata, @@ -1211,7 +1211,7 @@ async fn project_policy_update( // IP Pools -/// List all IP Pools that can be used by a given project. +/// List all IP pools #[endpoint { method = GET, path = "/v1/ip-pools", @@ -1219,14 +1219,8 @@ async fn project_policy_update( }] async fn project_ip_pool_list( rqctx: RequestContext>, - query_params: Query>, + query_params: Query, ) -> Result>, HttpError> { - // Per https://github.com/oxidecomputer/omicron/issues/2148 - // This is currently the same list as /v1/system/ip-pools, that is to say, - // IP pools that are *available to* a given project, those being ones that - // are not the internal pools for Oxide service usage. This may change - // in the future as the scoping of pools is further developed, but for now, - // this is literally a near-duplicate of `ip_pool_list`: let apictx = rqctx.context(); let handler = async { let nexus = &apictx.nexus; @@ -1235,10 +1229,8 @@ async fn project_ip_pool_list( let scan_params = ScanByNameOrId::from_query(&query)?; let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let project_lookup = - nexus.project_lookup(&opctx, scan_params.selector.clone())?; let pools = nexus - .project_ip_pools_list(&opctx, &project_lookup, &paginated_by) + .silo_ip_pools_list(&opctx, &paginated_by) .await? .into_iter() .map(IpPool::from) @@ -1261,23 +1253,13 @@ async fn project_ip_pool_list( async fn project_ip_pool_view( rqctx: RequestContext>, path_params: Path, - project: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let nexus = &apictx.nexus; let pool_selector = path_params.into_inner().pool; - let project_lookup = if let Some(project) = project.into_inner().project - { - Some(nexus.project_lookup(&opctx, ProjectSelector { project })?) - } else { - None - }; - let (_authz_pool, pool) = nexus - .project_ip_pool_lookup(&opctx, &pool_selector, &project_lookup)? - .fetch() - .await?; + let pool = nexus.silo_ip_pool_fetch(&opctx, &pool_selector).await?; Ok(HttpResponseOk(IpPool::from(pool))) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index bff398a64b..278a02a336 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -81,7 +81,7 @@ pub async fn object_get_error( status: StatusCode, ) -> HttpErrorResponseBody { NexusRequest::new( - RequestBuilder::new(client, Method::DELETE, path) + RequestBuilder::new(client, Method::GET, path) .expect_status(Some(status)), ) .authn_as(AuthnMode::PrivilegedUser) diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 1211cebaf4..e72c351fcf 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -500,8 +500,7 @@ lazy_static! { }; // IP Pools - pub static ref DEMO_IP_POOLS_PROJ_URL: String = - format!("/v1/ip-pools?project={}", *DEMO_PROJECT_NAME); + pub static ref DEMO_IP_POOLS_PROJ_URL: String = "/v1/ip-pools".to_string(); pub static ref DEMO_IP_POOLS_URL: &'static str = "/v1/system/ip-pools"; pub static ref DEMO_IP_POOL_NAME: Name = "default".parse().unwrap(); pub static ref DEMO_IP_POOL_CREATE: params::IpPoolCreate = @@ -511,8 +510,7 @@ lazy_static! { description: String::from("an IP pool"), }, }; - pub static ref DEMO_IP_POOL_PROJ_URL: String = - format!("/v1/ip-pools/{}?project={}", *DEMO_IP_POOL_NAME, *DEMO_PROJECT_NAME); + pub static ref DEMO_IP_POOL_PROJ_URL: String = format!("/v1/ip-pools/{}", *DEMO_IP_POOL_NAME); pub static ref DEMO_IP_POOL_URL: String = format!("/v1/system/ip-pools/{}", *DEMO_IP_POOL_NAME); pub static ref DEMO_IP_POOL_UPDATE: params::IpPoolUpdate = params::IpPoolUpdate { diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 99bda9a291..1313ea37b9 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -4,8 +4,6 @@ //! Integration tests for operating on IP Pools -use std::collections::HashSet; - use dropshot::test_util::ClientTestContext; use dropshot::HttpErrorResponseBody; use dropshot::ResultsPage; @@ -16,7 +14,10 @@ use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; +use nexus_test_utils::resource_helpers::create_instance; +use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_project; +use nexus_test_utils::resource_helpers::link_ip_pool; use nexus_test_utils::resource_helpers::object_create; use nexus_test_utils::resource_helpers::object_create_error; use nexus_test_utils::resource_helpers::object_delete; @@ -25,14 +26,8 @@ use nexus_test_utils::resource_helpers::object_get; use nexus_test_utils::resource_helpers::object_get_error; use nexus_test_utils::resource_helpers::object_put; use nexus_test_utils::resource_helpers::objects_list_page_authz; -use nexus_test_utils::resource_helpers::{ - create_instance, create_instance_with, -}; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; -use nexus_types::external_api::params::ExternalIpCreate; -use nexus_types::external_api::params::InstanceDiskAttachment; -use nexus_types::external_api::params::InstanceNetworkInterfaceAttachment; use nexus_types::external_api::params::IpPoolCreate; use nexus_types::external_api::params::IpPoolSiloLink; use nexus_types::external_api::params::IpPoolSiloUpdate; @@ -810,48 +805,15 @@ async fn test_ip_pool_range_pagination(cptestctx: &ControlPlaneTestContext) { } #[nexus_test] -async fn test_ip_pool_list_usable_by_project( - cptestctx: &ControlPlaneTestContext, -) { +async fn test_ip_pool_list_in_silo(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let scoped_ip_pools_url = "/v1/ip-pools"; - let ip_pools_url = "/v1/system/ip-pools"; let mypool_name = "mypool"; - let mypool_silos_url = format!("{}/{}/silos", ip_pools_url, mypool_name); - let mypool_ip_pool_add_range_url = - format!("{}/{}/ranges/add", ip_pools_url, mypool_name); - let service_ip_pool_add_range_url = - "/v1/system/ip-pools-service/ranges/add".to_string(); - - // Create an org and project, and then try to make an instance with an IP from - // each range to which the project is expected have access. const PROJECT_NAME: &str = "myproj"; - const INSTANCE_NAME: &str = "myinst"; create_project(client, PROJECT_NAME).await; - let params = IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: String::from(mypool_name).parse().unwrap(), - description: String::from("right on cue"), - }, - }; - NexusRequest::objects_post(client, ip_pools_url, ¶ms) - .authn_as(AuthnMode::PrivilegedUser) - .execute_and_parse_unwrap::() - .await; - - // associate pool with default silo, which is the privileged user's silo - let params = IpPoolSiloLink { - silo: NameOrId::Id(DEFAULT_SILO.id()), - is_default: true, - }; - NexusRequest::objects_post(client, &mypool_silos_url, ¶ms) - .authn_as(AuthnMode::PrivilegedUser) - .execute_and_parse_unwrap::() - .await; - - // Add an IP range to mypool + // create pool with range and link (as default pool) to default silo, which + // is the privileged user's silo let mypool_range = IpRange::V4( Ipv4Range::new( std::net::Ipv4Addr::new(10, 0, 0, 51), @@ -859,97 +821,35 @@ async fn test_ip_pool_list_usable_by_project( ) .unwrap(), ); - let created_range: IpPoolRange = NexusRequest::objects_post( - client, - &mypool_ip_pool_add_range_url, - &mypool_range, - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); - assert_eq!( - mypool_range.first_address(), - created_range.range.first_address() - ); - assert_eq!(mypool_range.last_address(), created_range.range.last_address()); + create_ip_pool(client, mypool_name, Some(mypool_range)).await; + link_ip_pool(client, mypool_name, &DEFAULT_SILO.id(), true).await; - // add a service range we *don't* expect to see in the results - let service_range = IpRange::V4( + // create another pool and don't link it to anything + let otherpool_name = "other-pool"; + let otherpool_range = IpRange::V4( Ipv4Range::new( - std::net::Ipv4Addr::new(10, 0, 0, 101), - std::net::Ipv4Addr::new(10, 0, 0, 102), + std::net::Ipv4Addr::new(10, 0, 0, 53), + std::net::Ipv4Addr::new(10, 0, 0, 54), ) .unwrap(), ); + create_ip_pool(client, otherpool_name, Some(otherpool_range)).await; - let created_range: IpPoolRange = NexusRequest::objects_post( - client, - &service_ip_pool_add_range_url, - &service_range, - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); - assert_eq!( - service_range.first_address(), - created_range.range.first_address() - ); - assert_eq!( - service_range.last_address(), - created_range.range.last_address() - ); - - // TODO: add non-service ip pools that the project *can't* use - - let list_url = format!("{}?project={}", scoped_ip_pools_url, PROJECT_NAME); - let list = NexusRequest::iter_collection_authn::( - client, &list_url, "", None, - ) - .await - .expect("Failed to list IP Pools") - .all_items; + let list = + objects_list_page_authz::(client, "/v1/ip-pools").await.items; + // only mypool shows up because it's linked to my silo assert_eq!(list.len(), 1); assert_eq!(list[0].identity.name.to_string(), mypool_name); - // currently there's only one in the list, so this is overkill. But there might be more - let pool_names: HashSet = - list.iter().map(|pool| pool.identity.name.to_string()).collect(); + // fetch the pool directly too + let url = format!("/v1/ip-pools/{}", mypool_name); + let pool: IpPool = object_get(client, &url).await; + assert_eq!(pool.identity.name.as_str(), mypool_name); - // ensure we can view each pool returned - for pool_name in &pool_names { - let view_pool_url = format!( - "{}/{}?project={}", - scoped_ip_pools_url, pool_name, PROJECT_NAME - ); - let pool = NexusRequest::object_get(client, &view_pool_url) - .authn_as(AuthnMode::PrivilegedUser) - .execute_and_parse_unwrap::() - .await; - assert_eq!(pool.identity.name.as_str(), pool_name.as_str()); - } - - // ensure we can successfully create an instance with each of the pools we - // should be able to access - for pool_name in pool_names { - let instance_name = format!("{}-{}", INSTANCE_NAME, pool_name); - let pool_name = Some(Name::try_from(pool_name.to_string()).unwrap()); - create_instance_with( - client, - PROJECT_NAME, - &instance_name, - &InstanceNetworkInterfaceAttachment::Default, - Vec::::new(), - vec![ExternalIpCreate::Ephemeral { pool_name }], - ) - .await; - } + // fetching the other pool directly 404s + let url = format!("/v1/ip-pools/{}", otherpool_name); + object_get_error(client, &url, StatusCode::NOT_FOUND).await; } #[nexus_test] diff --git a/openapi/nexus.json b/openapi/nexus.json index e36d0232ef..ea3e8af8bc 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -2154,7 +2154,7 @@ "tags": [ "projects" ], - "summary": "List all IP Pools that can be used by a given project.", + "summary": "List all IP pools", "operationId": "project_ip_pool_list", "parameters": [ { @@ -2177,14 +2177,6 @@ "type": "string" } }, - { - "in": "query", - "name": "project", - "description": "Name or ID of the project", - "schema": { - "$ref": "#/components/schemas/NameOrId" - } - }, { "in": "query", "name": "sort_by", @@ -2212,9 +2204,7 @@ } }, "x-dropshot-pagination": { - "required": [ - "project" - ] + "required": [] } } }, @@ -2234,14 +2224,6 @@ "schema": { "$ref": "#/components/schemas/NameOrId" } - }, - { - "in": "query", - "name": "project", - "description": "Name or ID of the project", - "schema": { - "$ref": "#/components/schemas/NameOrId" - } } ], "responses": { From 32fcbc4c2731728a7254fdbdc5834c6884a07cb1 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 13 Dec 2023 22:57:21 -0600 Subject: [PATCH 58/67] make authz checks slightly more consistent, get rid of some assoc/dissoc language --- nexus/db-queries/src/db/datastore/ip_pool.rs | 22 ++++++++-------- nexus/db-queries/src/db/datastore/rack.rs | 2 +- .../db-queries/src/db/queries/external_ip.rs | 2 +- nexus/src/app/ip_pool.rs | 25 ++++++++----------- nexus/src/external_api/http_entrypoints.rs | 8 +++--- 5 files changed, 28 insertions(+), 31 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index bf17b83149..d766fa75d6 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -139,7 +139,9 @@ impl DataStore { use db::schema::ip_pool; use db::schema::ip_pool_resource; - let authz_silo = opctx.authn.silo_required()?; + let authz_silo = opctx.authn.silo_required().internal_context( + "fetching link from an IP pool to current silo", + )?; ip_pool::table .inner_join(ip_pool_resource::table) @@ -419,7 +421,7 @@ impl DataStore { }) } - pub async fn ip_pool_association_list( + pub async fn ip_pool_silo_list( &self, opctx: &OpContext, authz_pool: &authz::IpPool, @@ -445,7 +447,7 @@ impl DataStore { } // TODO: should this error on conflict instead of updating? - pub async fn ip_pool_associate_resource( + pub async fn ip_pool_link_silo( &self, opctx: &OpContext, ip_pool_resource: IpPoolResource, @@ -695,7 +697,7 @@ impl DataStore { /// Delete IP pool assocation with resource unless there are outstanding /// IPs allocated from the pool in the associated silo - pub async fn ip_pool_dissociate_resource( + pub async fn ip_pool_unlink_silo( &self, opctx: &OpContext, association: &IpPoolResourceDelete, @@ -926,7 +928,7 @@ mod test { .await .expect("Failed to create IP pool"); datastore - .ip_pool_associate_resource( + .ip_pool_link_silo( &opctx, IpPoolResource { ip_pool_id: pool1_for_silo.id(), @@ -946,7 +948,7 @@ mod test { // now we can change that association to is_default=true and // it should update rather than erroring out datastore - .ip_pool_associate_resource( + .ip_pool_link_silo( &opctx, IpPoolResource { ip_pool_id: pool1_for_silo.id(), @@ -975,7 +977,7 @@ mod test { .await .expect("Failed to create pool"); let err = datastore - .ip_pool_associate_resource( + .ip_pool_link_silo( &opctx, IpPoolResource { ip_pool_id: second_silo_default.id(), @@ -990,7 +992,7 @@ mod test { // now remove the association and we should get nothing again datastore - .ip_pool_dissociate_resource( + .ip_pool_unlink_silo( &opctx, &IpPoolResourceDelete { ip_pool_id: pool1_for_silo.id(), @@ -999,7 +1001,7 @@ mod test { }, ) .await - .expect("Failed to dissociate IP pool from silo"); + .expect("Failed to unlink IP pool from silo"); let error = datastore.ip_pools_fetch_default(&opctx).await.unwrap_err(); assert_matches!(error, Error::ObjectNotFound { .. }); @@ -1050,7 +1052,7 @@ mod test { is_default: true, }; datastore - .ip_pool_associate_resource(&opctx, link) + .ip_pool_link_silo(&opctx, link) .await .expect("Failed to make IP pool default for silo"); diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 32329ba23c..ca50607b29 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -773,7 +773,7 @@ impl DataStore { // make default for the internal silo. only need to do this if // the create went through, i.e., if it wasn't already there if internal_created { - self.ip_pool_associate_resource( + self.ip_pool_link_silo( opctx, db::model::IpPoolResource { ip_pool_id: internal_pool_id, diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 1ab878dc1c..49403aac61 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -897,7 +897,7 @@ mod tests { is_default, }; self.db_datastore - .ip_pool_associate_resource(&self.opctx, association) + .ip_pool_link_silo(&self.opctx, association) .await .expect("Failed to associate IP pool with silo"); diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 1435ebfa5d..6dd0d2988a 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -106,46 +106,43 @@ impl super::Nexus { Ok(pool) } - pub(crate) async fn ip_pool_association_list( + pub(crate) async fn ip_pool_silo_list( &self, opctx: &OpContext, pool_lookup: &lookup::IpPool<'_>, pagparams: &DataPageParams<'_, Uuid>, ) -> ListResultVec { - // TODO: is this the right action to check? let (.., authz_pool) = pool_lookup.lookup_for(authz::Action::ListChildren).await?; - self.db_datastore - .ip_pool_association_list(opctx, &authz_pool, pagparams) - .await + self.db_datastore.ip_pool_silo_list(opctx, &authz_pool, pagparams).await } - pub(crate) async fn ip_pool_associate_resource( + pub(crate) async fn ip_pool_link_silo( &self, opctx: &OpContext, pool_lookup: &lookup::IpPool<'_>, silo_link: ¶ms::IpPoolSiloLink, ) -> CreateResult { - let (.., authz_pool) = + let (authz_pool,) = pool_lookup.lookup_for(authz::Action::Modify).await?; - let (silo,) = self + let (authz_silo,) = self .silo_lookup(&opctx, silo_link.silo.clone())? - .lookup_for(authz::Action::Read) + .lookup_for(authz::Action::Modify) .await?; self.db_datastore - .ip_pool_associate_resource( + .ip_pool_link_silo( opctx, db::model::IpPoolResource { ip_pool_id: authz_pool.id(), resource_type: db::model::IpPoolResourceType::Silo, - resource_id: silo.id(), + resource_id: authz_silo.id(), is_default: silo_link.is_default, }, ) .await } - pub(crate) async fn ip_pool_dissociate_resource( + pub(crate) async fn ip_pool_unlink_silo( &self, opctx: &OpContext, pool_lookup: &lookup::IpPool<'_>, @@ -157,7 +154,7 @@ impl super::Nexus { silo_lookup.lookup_for(authz::Action::Modify).await?; self.db_datastore - .ip_pool_dissociate_resource( + .ip_pool_unlink_silo( opctx, &IpPoolResourceDelete { ip_pool_id: authz_pool.id(), @@ -178,7 +175,7 @@ impl super::Nexus { let (.., authz_pool) = pool_lookup.lookup_for(authz::Action::Modify).await?; let (.., authz_silo) = - silo_lookup.lookup_for(authz::Action::Read).await?; + silo_lookup.lookup_for(authz::Action::Modify).await?; self.db_datastore .ip_pool_set_default( diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index ea226d76ab..b45e0f0de4 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -1420,7 +1420,7 @@ async fn ip_pool_silo_list( let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; let assocs = nexus - .ip_pool_association_list(&opctx, &pool_lookup, &pag_params) + .ip_pool_silo_list(&opctx, &pool_lookup, &pag_params) .await? .into_iter() .map(|assoc| assoc.into()) @@ -1454,7 +1454,7 @@ async fn ip_pool_silo_link( let resource_assoc = resource_assoc.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; let assoc = nexus - .ip_pool_associate_resource(&opctx, &pool_lookup, &resource_assoc) + .ip_pool_link_silo(&opctx, &pool_lookup, &resource_assoc) .await?; Ok(HttpResponseCreated(assoc.into())) }; @@ -1480,9 +1480,7 @@ async fn ip_pool_silo_unlink( let path = path_params.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; - nexus - .ip_pool_dissociate_resource(&opctx, &pool_lookup, &silo_lookup) - .await?; + nexus.ip_pool_unlink_silo(&opctx, &pool_lookup, &silo_lookup).await?; Ok(HttpResponseUpdatedNoContent()) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await From dc9de629229a391740fb9f4f012e198f84f180f8 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Thu, 14 Dec 2023 10:49:15 -0600 Subject: [PATCH 59/67] PUT link 404s instead of 500 when the link doesn't exist --- nexus/db-queries/src/db/datastore/ip_pool.rs | 23 +++++++++++++------ nexus/test-utils/src/resource_helpers.rs | 22 ++++++++++++++++++ nexus/tests/integration_tests/ip_pools.rs | 24 ++++++++++++-------- 3 files changed, 53 insertions(+), 16 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index d766fa75d6..b98e171989 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -508,10 +508,8 @@ impl DataStore { ) -> UpdateResult { use db::schema::ip_pool_resource::dsl; - // TODO: correct auth check - opctx - .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) - .await?; + opctx.authorize(authz::Action::Modify, authz_ip_pool).await?; + opctx.authorize(authz::Action::Modify, authz_silo).await?; let ip_pool_id = authz_ip_pool.id(); let silo_id = authz_silo.id(); @@ -580,7 +578,6 @@ impl DataStore { } } - // TODO: test that this errors if the link doesn't exist already let updated_link = diesel::update(dsl::ip_pool_resource) .filter(dsl::resource_id.eq(silo_id)) .filter(dsl::ip_pool_id.eq(ip_pool_id)) @@ -592,8 +589,20 @@ impl DataStore { Ok(updated_link) }) .await - .map_err(|e| { - Error::internal_error(&format!("Transaction error: {:?}", e)) + .map_err(|e| match e { + TransactionError::CustomError( + IpPoolResourceUpdateError::FailedToUnsetDefault(e), + ) => public_error_from_diesel(e, ErrorHandler::Server), + TransactionError::Database(e) => public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::IpPoolResource, + // TODO: would be nice to put the actual names and/or ids in + // here but LookupType on each of the two silos doesn't have + // a nice to_string yet or a way of composing them + LookupType::ByCompositeId(format!("(pool, silo)")), + ), + ), }) } diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 278a02a336..c2516a1509 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -155,6 +155,28 @@ where .unwrap() } +pub async fn object_put_error( + client: &ClientTestContext, + path: &str, + input: &InputType, + status: StatusCode, +) -> HttpErrorResponseBody +where + InputType: serde::Serialize, +{ + NexusRequest::new( + RequestBuilder::new(client, Method::PUT, path) + .body(Some(&input)) + .expect_status(Some(status)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body::() + .unwrap() +} + pub async fn object_delete(client: &ClientTestContext, path: &str) { NexusRequest::object_delete(client, path) .authn_as(AuthnMode::PrivilegedUser) diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 1313ea37b9..770796affc 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -25,6 +25,7 @@ use nexus_test_utils::resource_helpers::object_delete_error; use nexus_test_utils::resource_helpers::object_get; use nexus_test_utils::resource_helpers::object_get_error; use nexus_test_utils::resource_helpers::object_put; +use nexus_test_utils::resource_helpers::object_put_error; use nexus_test_utils::resource_helpers::objects_list_page_authz; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; @@ -306,12 +307,7 @@ async fn test_ip_pool_silo_link(cptestctx: &ControlPlaneTestContext) { // get silo ID so we can test association by ID as well let silo_url = format!("/v1/system/silos/{}", cptestctx.silo_name); - let silo_id = NexusRequest::object_get(client, &silo_url) - .authn_as(AuthnMode::PrivilegedUser) - .execute_and_parse_unwrap::() - .await - .identity - .id; + let silo_id = object_get::(client, &silo_url).await.identity.id; let assocs_p0 = silos_for_pool(client, "p0").await; let silo_link = @@ -383,7 +379,19 @@ async fn test_ip_pool_update_default(cptestctx: &ControlPlaneTestContext) { let silos_p1 = silos_for_pool(client, "p1").await; assert_eq!(silos_p1.items.len(), 0); - // associated both pools with the test silo + // put 404s if link doesn't exist yet + let params = IpPoolSiloUpdate { is_default: true }; + let p0_silo_url = + format!("/v1/system/ip-pools/p0/silos/{}", cptestctx.silo_name); + let error = + object_put_error(client, &p0_silo_url, ¶ms, StatusCode::NOT_FOUND) + .await; + assert_eq!( + error.message, + "not found: ip-pool-resource with id \"(pool, silo)\"" + ); + + // associate both pools with the test silo let silo = NameOrId::Name(cptestctx.silo_name.clone()); let params = params::IpPoolSiloLink { silo: silo.clone(), is_default: false }; @@ -403,8 +411,6 @@ async fn test_ip_pool_update_default(cptestctx: &ControlPlaneTestContext) { // make p0 default let params = IpPoolSiloUpdate { is_default: true }; - let p0_silo_url = - format!("/v1/system/ip-pools/p0/silos/{}", cptestctx.silo_name); let _: IpPoolSilo = object_put(client, &p0_silo_url, ¶ms).await; // making the same one default again is not an error From ee0f363378a039a3d5e179ed5a22b83c9dcae0fb Mon Sep 17 00:00:00 2001 From: David Crespo Date: Thu, 14 Dec 2023 11:07:03 -0600 Subject: [PATCH 60/67] clarify and clean up migrations, remove a couple of TODOs --- nexus/db-queries/src/db/datastore/ip_pool.rs | 2 +- nexus/db-queries/src/db/datastore/rack.rs | 8 ++---- schema/crdb/21.0.0/up4.sql | 26 +++++++++++--------- schema/crdb/21.0.0/up5.sql | 8 +++--- schema/crdb/21.0.0/up6.sql | 10 +++++--- 5 files changed, 30 insertions(+), 24 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index b98e171989..fc47db9f09 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -600,7 +600,7 @@ impl DataStore { // TODO: would be nice to put the actual names and/or ids in // here but LookupType on each of the two silos doesn't have // a nice to_string yet or a way of composing them - LookupType::ByCompositeId(format!("(pool, silo)")), + LookupType::ByCompositeId("(pool, silo)".to_string()), ), ), }) diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index ca50607b29..50bae03c2d 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -1330,9 +1330,7 @@ mod test { // been allocated as a part of the service IP pool. let (.., svc_pool) = datastore.ip_pools_service_lookup(&opctx).await.unwrap(); - // TODO: do we care? we should just check that the name or ID of the - // pool itself matches the known name or ID of the service pool - // assert_eq!(svc_pool.silo_id, Some(*INTERNAL_SILO_ID)); + assert_eq!(svc_pool.name().as_str(), "oxide-service-pool"); let observed_ip_pool_ranges = get_all_ip_pool_ranges(&datastore).await; assert_eq!(observed_ip_pool_ranges.len(), 1); @@ -1534,9 +1532,7 @@ mod test { // allocated as a part of the service IP pool. let (.., svc_pool) = datastore.ip_pools_service_lookup(&opctx).await.unwrap(); - // TODO: do we care? we should just check that the name or ID of the - // pool itself matches the known name or ID of the service pool - // assert_eq!(svc_pool.silo_id, Some(*INTERNAL_SILO_ID)); + assert_eq!(svc_pool.name().as_str(), "oxide-service-pool"); let observed_ip_pool_ranges = get_all_ip_pool_ranges(&datastore).await; assert_eq!(observed_ip_pool_ranges.len(), 1); diff --git a/schema/crdb/21.0.0/up4.sql b/schema/crdb/21.0.0/up4.sql index a5ba739755..51cdaaa4f3 100644 --- a/schema/crdb/21.0.0/up4.sql +++ b/schema/crdb/21.0.0/up4.sql @@ -1,22 +1,26 @@ --- copy existing fleet associations into association table. treat all existing --- pools as fleet-associated because that is the current behavior +-- Fleet-scoped pools are going away, but we recreate the equivalent of a fleet +-- link for existing fleet-scoped pools by associating them with every existing +-- silo, i.e., inserting a row into the association table for each (pool, silo) +-- pair. +-- +-- Note special handling is required for conflicts between a fleet default and +-- a silo default. If pool P1 is a fleet default and pool P2 is a silo default +-- on silo S1, we cannot link both to S1 with is_default = true. What we really +-- want in that case is link it to S1 with is_default = false. So first, here we +-- copy the "original" value of is_default to the link between P1 and S1. Then, +-- in up5, we flip is_default to false on P1 when we see that P2 wants to be the +-- default for S1. INSERT INTO omicron.public.ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) SELECT p.id AS ip_pool_id, 'silo' AS resource_type, s.id AS resource_id, - -- note problem solved by up5.sql after this regarding is_default: if pool P1 - -- is a fleet default and pool P2 is a silo default on silo S1, we cannot link - -- both to S1 with is_default = true. what we really want in that case is link - -- it to S1 with is_default = false. So first, here we copy the "original" - -- value of is_default, and then in up5 we flip is_default to false if there - -- is a conflicting default silo-linked pool p.is_default -FROM ip_pool AS p -CROSS JOIN silo AS s +FROM omicron.public.ip_pool AS p +CROSS JOIN omicron.public.silo AS s WHERE p.time_deleted IS null AND p.silo_id IS null -- means it's a fleet pool AND s.time_deleted IS null --- make this idempotent +-- this makes it idempotent ON CONFLICT (ip_pool_id, resource_type, resource_id) DO NOTHING; diff --git a/schema/crdb/21.0.0/up5.sql b/schema/crdb/21.0.0/up5.sql index d5f86a8ca7..037274b015 100644 --- a/schema/crdb/21.0.0/up5.sql +++ b/schema/crdb/21.0.0/up5.sql @@ -1,10 +1,12 @@ --- turn any former fleet defaults into non-defaults if there's going to be a --- silo conflicting with it +-- Preemptively turn any former fleet defaults into non-defaults if there's +-- going to be a silo conflicting with it after up6 UPDATE omicron.public.ip_pool_resource AS ipr SET is_default = false FROM omicron.public.ip_pool as ip -WHERE ipr.is_default = true +WHERE ipr.resource_type = 'silo' -- technically unnecessary because there is only silo + AND ipr.is_default = true AND ip.is_default = true -- both being default is the conflict being resolved + AND ip.silo_id IS NOT NULL AND ip.silo_id = ipr.resource_id AND ip.id = ipr.ip_pool_id; diff --git a/schema/crdb/21.0.0/up6.sql b/schema/crdb/21.0.0/up6.sql index 570f3dba7b..f8b2e3ddf9 100644 --- a/schema/crdb/21.0.0/up6.sql +++ b/schema/crdb/21.0.0/up6.sql @@ -1,9 +1,13 @@ -- copy existing ip_pool-to-silo associations into association table INSERT INTO omicron.public.ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) -SELECT id, 'silo', silo_id, is_default -FROM ip_pool +SELECT + id as ip_pool_id, + 'silo' as resource_type, + silo_id as resource_id, + is_default +FROM omicron.public.ip_pool AS ip WHERE silo_id IS NOT null AND time_deleted IS null --- make this idempotent +-- this makes it idempotent ON CONFLICT (ip_pool_id, resource_type, resource_id) DO NOTHING; From 3830c2a59c205b1e3c8398edcee24eb5ef2d21c3 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Thu, 14 Dec 2023 11:57:54 -0600 Subject: [PATCH 61/67] unlink takes authz_silo and authz_pool directly and does better auth check --- nexus/db-model/src/ip_pool.rs | 9 ---- nexus/db-queries/src/db/datastore/ip_pool.rs | 54 +++++++++----------- nexus/src/app/ip_pool.rs | 11 +--- 3 files changed, 24 insertions(+), 50 deletions(-) diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index fc29455e1b..bec1113151 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -97,15 +97,6 @@ pub struct IpPoolResource { pub is_default: bool, } -/// Information required to delete an IP pool association. Comes from request -/// params -- silo is a NameOrId and must be resolved to ID. -#[derive(Clone, Debug)] -pub struct IpPoolResourceDelete { - pub ip_pool_id: Uuid, - pub resource_type: IpPoolResourceType, - pub resource_id: Uuid, -} - impl From for views::IpPoolSilo { fn from(assoc: IpPoolResource) -> Self { Self { diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index fc47db9f09..c7c1064a27 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -20,7 +20,6 @@ use crate::db::model::IpKind; use crate::db::model::IpPool; use crate::db::model::IpPoolRange; use crate::db::model::IpPoolResource; -use crate::db::model::IpPoolResourceDelete; use crate::db::model::IpPoolResourceType; use crate::db::model::IpPoolUpdate; use crate::db::model::Name; @@ -612,14 +611,12 @@ impl DataStore { async fn ensure_no_instance_ips_outstanding( &self, opctx: &OpContext, - association: &IpPoolResourceDelete, + authz_pool: &authz::IpPool, + authz_silo: &authz::Silo, ) -> Result<(), Error> { use db::schema::external_ip; use db::schema::instance; use db::schema::project; - opctx - .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) - .await?; let existing_ips = external_ip::table .inner_join( @@ -630,12 +627,12 @@ impl DataStore { .filter(external_ip::is_service.eq(false)) .filter(external_ip::parent_id.is_not_null()) .filter(external_ip::time_deleted.is_null()) - .filter(external_ip::ip_pool_id.eq(association.ip_pool_id)) + .filter(external_ip::ip_pool_id.eq(authz_pool.id())) // important, floating IPs are handled separately .filter(external_ip::kind.eq(IpKind::Ephemeral).or(external_ip::kind.eq(IpKind::SNat))) .filter(instance::time_deleted.is_null()) // we have to join through IPs to instances to projects to get the silo ID - .filter(project::silo_id.eq(association.resource_id)) + .filter(project::silo_id.eq(authz_silo.id())) .select(ExternalIp::as_select()) .limit(1) .load_async::( @@ -663,13 +660,11 @@ impl DataStore { async fn ensure_no_floating_ips_outstanding( &self, opctx: &OpContext, - association: &IpPoolResourceDelete, + authz_pool: &authz::IpPool, + authz_silo: &authz::Silo, ) -> Result<(), Error> { use db::schema::external_ip; use db::schema::project; - opctx - .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) - .await?; let existing_ips = external_ip::table .inner_join(project::table.on(external_ip::project_id.eq(project::id.nullable()))) @@ -677,10 +672,10 @@ impl DataStore { .filter(external_ip::time_deleted.is_null()) // all floating IPs have a project .filter(external_ip::project_id.is_not_null()) - .filter(external_ip::ip_pool_id.eq(association.ip_pool_id)) + .filter(external_ip::ip_pool_id.eq(authz_pool.id())) .filter(external_ip::kind.eq(IpKind::Floating)) // we have to join through IPs to projects to get the silo ID - .filter(project::silo_id.eq(association.resource_id)) + .filter(project::silo_id.eq(authz_silo.id())) .filter(project::time_deleted.is_null()) .select(ExternalIp::as_select()) .limit(1) @@ -709,21 +704,24 @@ impl DataStore { pub async fn ip_pool_unlink_silo( &self, opctx: &OpContext, - association: &IpPoolResourceDelete, + authz_pool: &authz::IpPool, + authz_silo: &authz::Silo, ) -> DeleteResult { use db::schema::ip_pool_resource; - opctx - .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) - .await?; + + opctx.authorize(authz::Action::Modify, authz_pool).await?; + opctx.authorize(authz::Action::Modify, authz_silo).await?; // We can only delete the association if there are no IPs allocated // from this pool in the associated resource. - self.ensure_no_instance_ips_outstanding(opctx, association).await?; - self.ensure_no_floating_ips_outstanding(opctx, association).await?; + self.ensure_no_instance_ips_outstanding(opctx, authz_pool, authz_silo) + .await?; + self.ensure_no_floating_ips_outstanding(opctx, authz_pool, authz_silo) + .await?; diesel::delete(ip_pool_resource::table) - .filter(ip_pool_resource::ip_pool_id.eq(association.ip_pool_id)) - .filter(ip_pool_resource::resource_id.eq(association.resource_id)) + .filter(ip_pool_resource::ip_pool_id.eq(authz_pool.id())) + .filter(ip_pool_resource::resource_id.eq(authz_silo.id())) .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map(|_rows_deleted| ()) @@ -905,7 +903,6 @@ mod test { use crate::db::datastore::datastore_test; use crate::db::model::{IpPool, IpPoolResource, IpPoolResourceType}; use assert_matches::assert_matches; - use nexus_db_model::IpPoolResourceDelete; use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Resource; use omicron_common::api::external::{ @@ -970,7 +967,7 @@ mod test { .expect("Failed to make IP pool default for silo"); // now when we ask for the default pool again, we get that one - let (.., ip_pool) = datastore + let (authz_pool1_for_silo, ip_pool) = datastore .ip_pools_fetch_default(&opctx) .await .expect("Failed to get silo's default IP pool"); @@ -1000,15 +997,10 @@ mod test { assert_matches!(err, Error::ObjectAlreadyExists { .. }); // now remove the association and we should get nothing again + let authz_silo = + authz::Silo::new(authz::Fleet, silo_id, LookupType::ById(silo_id)); datastore - .ip_pool_unlink_silo( - &opctx, - &IpPoolResourceDelete { - ip_pool_id: pool1_for_silo.id(), - resource_id: silo_id, - resource_type: IpPoolResourceType::Silo, - }, - ) + .ip_pool_unlink_silo(&opctx, &authz_pool1_for_silo, &authz_silo) .await .expect("Failed to unlink IP pool from silo"); diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 6dd0d2988a..830876591e 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -7,8 +7,6 @@ use crate::external_api::params; use crate::external_api::shared::IpRange; use ipnetwork::IpNetwork; -use nexus_db_model::IpPoolResourceDelete; -use nexus_db_model::IpPoolResourceType; use nexus_db_queries::authz; use nexus_db_queries::authz::ApiResource; use nexus_db_queries::context::OpContext; @@ -154,14 +152,7 @@ impl super::Nexus { silo_lookup.lookup_for(authz::Action::Modify).await?; self.db_datastore - .ip_pool_unlink_silo( - opctx, - &IpPoolResourceDelete { - ip_pool_id: authz_pool.id(), - resource_id: authz_silo.id(), - resource_type: IpPoolResourceType::Silo, - }, - ) + .ip_pool_unlink_silo(opctx, &authz_pool, &authz_silo) .await } From 06fcc99e279e3ce9ad31bc88cf8e9594d424c37f Mon Sep 17 00:00:00 2001 From: David Crespo Date: Thu, 14 Dec 2023 13:31:22 -0600 Subject: [PATCH 62/67] simplify and clarify migrations by using a subquery --- schema/crdb/21.0.0/up4.sql | 30 +++++++++++++++++++++--------- schema/crdb/21.0.0/up5.sql | 25 +++++++++++++------------ schema/crdb/21.0.0/up6.sql | 13 ------------- 3 files changed, 34 insertions(+), 34 deletions(-) delete mode 100644 schema/crdb/21.0.0/up6.sql diff --git a/schema/crdb/21.0.0/up4.sql b/schema/crdb/21.0.0/up4.sql index 51cdaaa4f3..8fb43f9cf1 100644 --- a/schema/crdb/21.0.0/up4.sql +++ b/schema/crdb/21.0.0/up4.sql @@ -1,22 +1,34 @@ +-- Copy existing fleet-scoped pools over to the pool-silo join table +-- -- Fleet-scoped pools are going away, but we recreate the equivalent of a fleet -- link for existing fleet-scoped pools by associating them with every existing -- silo, i.e., inserting a row into the association table for each (pool, silo) -- pair. --- --- Note special handling is required for conflicts between a fleet default and --- a silo default. If pool P1 is a fleet default and pool P2 is a silo default --- on silo S1, we cannot link both to S1 with is_default = true. What we really --- want in that case is link it to S1 with is_default = false. So first, here we --- copy the "original" value of is_default to the link between P1 and S1. Then, --- in up5, we flip is_default to false on P1 when we see that P2 wants to be the --- default for S1. +set local disallow_full_table_scans = off; + INSERT INTO omicron.public.ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) SELECT p.id AS ip_pool_id, 'silo' AS resource_type, s.id AS resource_id, - p.is_default + -- Special handling is required for conflicts between a fleet default and a + -- silo default. If pool P1 is a fleet default and pool P2 is a silo default + -- on silo S1, we cannot link both to S1 with is_default = true. What we + -- really want in that case is: + -- + -- row 1: (P1, S1, is_default=false) + -- row 2: (P2, S1, is_default=true) + -- + -- i.e., we want to link both, but have the silo default take precedence. The + -- AND NOT EXISTS here causes is_default to be false in row 1 if there is a + -- conflicting silo default pool. row 2 is inserted in up5. + p.is_default AND NOT EXISTS ( + SELECT 1 FROM omicron.public.ip_pool + WHERE silo_id = s.id AND is_default + ) FROM omicron.public.ip_pool AS p +-- cross join means we are looking at the cartesian product of all fleet-scoped +-- IP pools and all silos CROSS JOIN omicron.public.silo AS s WHERE p.time_deleted IS null AND p.silo_id IS null -- means it's a fleet pool diff --git a/schema/crdb/21.0.0/up5.sql b/schema/crdb/21.0.0/up5.sql index 037274b015..3c1b100c9b 100644 --- a/schema/crdb/21.0.0/up5.sql +++ b/schema/crdb/21.0.0/up5.sql @@ -1,12 +1,13 @@ --- Preemptively turn any former fleet defaults into non-defaults if there's --- going to be a silo conflicting with it after up6 -UPDATE omicron.public.ip_pool_resource AS ipr -SET is_default = false -FROM omicron.public.ip_pool as ip -WHERE ipr.resource_type = 'silo' -- technically unnecessary because there is only silo - AND ipr.is_default = true - AND ip.is_default = true -- both being default is the conflict being resolved - AND ip.silo_id IS NOT NULL - AND ip.silo_id = ipr.resource_id - AND ip.id = ipr.ip_pool_id; - +-- Copy existing silo-scoped pools over to the pool-silo join table +INSERT INTO omicron.public.ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) +SELECT + id as ip_pool_id, + 'silo' as resource_type, + silo_id as resource_id, + is_default +FROM omicron.public.ip_pool AS ip +WHERE silo_id IS NOT null + AND time_deleted IS null +-- this makes it idempotent +ON CONFLICT (ip_pool_id, resource_type, resource_id) +DO NOTHING; diff --git a/schema/crdb/21.0.0/up6.sql b/schema/crdb/21.0.0/up6.sql deleted file mode 100644 index f8b2e3ddf9..0000000000 --- a/schema/crdb/21.0.0/up6.sql +++ /dev/null @@ -1,13 +0,0 @@ --- copy existing ip_pool-to-silo associations into association table -INSERT INTO omicron.public.ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) -SELECT - id as ip_pool_id, - 'silo' as resource_type, - silo_id as resource_id, - is_default -FROM omicron.public.ip_pool AS ip -WHERE silo_id IS NOT null - AND time_deleted IS null --- this makes it idempotent -ON CONFLICT (ip_pool_id, resource_type, resource_id) -DO NOTHING; From e02a0e620541f2e9bb1685b796dd9b87d9aa85d3 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 3 Jan 2024 13:07:34 -0500 Subject: [PATCH 63/67] move migration up --- nexus/db-model/src/schema.rs | 14 +++++++------- schema/crdb/{21.0.0 => 22.0.0}/up1.sql | 0 schema/crdb/{21.0.0 => 22.0.0}/up2.sql | 0 schema/crdb/{21.0.0 => 22.0.0}/up3.sql | 0 schema/crdb/{21.0.0 => 22.0.0}/up4.sql | 0 schema/crdb/{21.0.0 => 22.0.0}/up5.sql | 0 schema/crdb/{21.0.1 => 22.0.1}/README.md | 0 schema/crdb/{21.0.1 => 22.0.1}/up1.sql | 0 schema/crdb/{21.0.1 => 22.0.1}/up2.sql | 0 schema/crdb/dbinit.sql | 2 +- 10 files changed, 8 insertions(+), 8 deletions(-) rename schema/crdb/{21.0.0 => 22.0.0}/up1.sql (100%) rename schema/crdb/{21.0.0 => 22.0.0}/up2.sql (100%) rename schema/crdb/{21.0.0 => 22.0.0}/up3.sql (100%) rename schema/crdb/{21.0.0 => 22.0.0}/up4.sql (100%) rename schema/crdb/{21.0.0 => 22.0.0}/up5.sql (100%) rename schema/crdb/{21.0.1 => 22.0.1}/README.md (100%) rename schema/crdb/{21.0.1 => 22.0.1}/up1.sql (100%) rename schema/crdb/{21.0.1 => 22.0.1}/up2.sql (100%) diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 4c42b26519..b157b1be5f 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -8,6 +8,13 @@ use omicron_common::api::external::SemverVersion; +/// The version of the database schema this particular version of Nexus was +/// built against. +/// +/// This should be updated whenever the schema is changed. For more details, +/// refer to: schema/crdb/README.adoc +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(22, 0, 1); + table! { disk (id) { id -> Uuid, @@ -1335,13 +1342,6 @@ table! { } } -/// The version of the database schema this particular version of Nexus was -/// built against. -/// -/// This should be updated whenever the schema is changed. For more details, -/// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(21, 0, 1); - allow_tables_to_appear_in_same_query!( system_update, component_update, diff --git a/schema/crdb/21.0.0/up1.sql b/schema/crdb/22.0.0/up1.sql similarity index 100% rename from schema/crdb/21.0.0/up1.sql rename to schema/crdb/22.0.0/up1.sql diff --git a/schema/crdb/21.0.0/up2.sql b/schema/crdb/22.0.0/up2.sql similarity index 100% rename from schema/crdb/21.0.0/up2.sql rename to schema/crdb/22.0.0/up2.sql diff --git a/schema/crdb/21.0.0/up3.sql b/schema/crdb/22.0.0/up3.sql similarity index 100% rename from schema/crdb/21.0.0/up3.sql rename to schema/crdb/22.0.0/up3.sql diff --git a/schema/crdb/21.0.0/up4.sql b/schema/crdb/22.0.0/up4.sql similarity index 100% rename from schema/crdb/21.0.0/up4.sql rename to schema/crdb/22.0.0/up4.sql diff --git a/schema/crdb/21.0.0/up5.sql b/schema/crdb/22.0.0/up5.sql similarity index 100% rename from schema/crdb/21.0.0/up5.sql rename to schema/crdb/22.0.0/up5.sql diff --git a/schema/crdb/21.0.1/README.md b/schema/crdb/22.0.1/README.md similarity index 100% rename from schema/crdb/21.0.1/README.md rename to schema/crdb/22.0.1/README.md diff --git a/schema/crdb/21.0.1/up1.sql b/schema/crdb/22.0.1/up1.sql similarity index 100% rename from schema/crdb/21.0.1/up1.sql rename to schema/crdb/22.0.1/up1.sql diff --git a/schema/crdb/21.0.1/up2.sql b/schema/crdb/22.0.1/up2.sql similarity index 100% rename from schema/crdb/21.0.1/up2.sql rename to schema/crdb/22.0.1/up2.sql diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 37d1987514..22b7fb7d82 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -3078,7 +3078,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '21.0.1', NULL) + ( TRUE, NOW(), NOW(), '22.0.1', NULL) ON CONFLICT DO NOTHING; COMMIT; From ea4cbfa5a36ddf5ba8a2126164739c6b859ab6ce Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 3 Jan 2024 13:49:26 -0500 Subject: [PATCH 64/67] resolve conflicts --- nexus/db-model/src/schema.rs | 11 --- .../src/db/datastore/external_ip.rs | 50 ---------- nexus/src/app/sagas/disk_create.rs | 16 ---- nexus/src/app/sagas/disk_delete.rs | 16 ---- nexus/src/app/sagas/snapshot_create.rs | 8 -- nexus/src/external_api/http_entrypoints.rs | 48 ++-------- nexus/test-utils/src/resource_helpers.rs | 9 -- nexus/tests/integration_tests/disks.rs | 3 +- nexus/tests/integration_tests/endpoints.rs | 92 +++---------------- nexus/tests/integration_tests/external_ips.rs | 28 ++---- nexus/tests/integration_tests/instances.rs | 10 +- nexus/tests/integration_tests/unauthorized.rs | 11 --- nexus/tests/integration_tests/utilization.rs | 4 +- openapi/nexus.json | 6 -- schema/crdb/dbinit.sql | 6 -- 15 files changed, 35 insertions(+), 283 deletions(-) diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 5633950320..f80b5b1c6a 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -8,7 +8,6 @@ use omicron_common::api::external::SemverVersion; -<<<<<<< HEAD /// The version of the database schema this particular version of Nexus was /// built against. /// @@ -16,16 +15,6 @@ use omicron_common::api::external::SemverVersion; /// refer to: schema/crdb/README.adoc pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(22, 0, 1); -||||||| 7c3cd6abe -======= -/// The version of the database schema this particular version of Nexus was -/// built against. -/// -/// This should be updated whenever the schema is changed. For more details, -/// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(21, 0, 0); - ->>>>>>> main table! { disk (id) { id -> Uuid, diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 220ea6dcb2..02ce950118 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -143,7 +143,6 @@ impl DataStore { ) -> CreateResult { let ip_id = Uuid::new_v4(); -<<<<<<< HEAD // TODO: NameOrId resolution should happen a level higher, in the nexus function let (.., authz_pool, pool) = match params.pool { Some(NameOrId::Name(name)) => { @@ -151,41 +150,7 @@ impl DataStore { .ip_pool_name(&Name(name)) .fetch_for(authz::Action::Read) .await? -||||||| 7c3cd6abe - let pool_id = match params.pool { - Some(NameOrId::Name(name)) => { - LookupPath::new(opctx, self) - .ip_pool_name(&Name(name)) - .fetch_for(authz::Action::Read) - .await? - .1 -======= - // See `allocate_instance_ephemeral_ip`: we're replicating - // its strucutre to prevent cross-silo pool access. - let pool_id = if let Some(name_or_id) = params.pool { - let (.., authz_pool, pool) = match name_or_id { - NameOrId::Name(name) => { - LookupPath::new(opctx, self) - .ip_pool_name(&Name(name)) - .fetch_for(authz::Action::CreateChild) - .await? - } - NameOrId::Id(id) => { - LookupPath::new(opctx, self) - .ip_pool_id(id) - .fetch_for(authz::Action::CreateChild) - .await? - } - }; - - let authz_silo_id = opctx.authn.silo_required()?.id(); - if let Some(pool_silo_id) = pool.silo_id { - if pool_silo_id != authz_silo_id { - return Err(authz_pool.not_found()); - } ->>>>>>> main } -<<<<<<< HEAD Some(NameOrId::Id(id)) => { LookupPath::new(opctx, self) .ip_pool_id(id) @@ -200,21 +165,6 @@ impl DataStore { // If this pool is not linked to the current silo, 404 if self.ip_pool_fetch_link(opctx, pool_id).await.is_err() { return Err(authz_pool.not_found()); -||||||| 7c3cd6abe - Some(NameOrId::Id(id)) => { - LookupPath::new(opctx, self) - .ip_pool_id(id) - .fetch_for(authz::Action::Read) - .await? - .1 - } - None => self.ip_pools_fetch_default(opctx).await?, -======= - - pool - } else { - self.ip_pools_fetch_default(opctx).await? ->>>>>>> main } let data = if let Some(ip) = params.address { diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index 9f5781bf96..9d52ec1501 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -851,22 +851,6 @@ pub(crate) mod test { const DISK_NAME: &str = "my-disk"; const PROJECT_NAME: &str = "springfield-squidport"; -<<<<<<< HEAD -||||||| 7c3cd6abe - async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None).await; - let project = create_project(client, PROJECT_NAME).await; - project.identity.id - } - -======= - async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None, None).await; - let project = create_project(client, PROJECT_NAME).await; - project.identity.id - } - ->>>>>>> main pub fn new_disk_create_params() -> params::DiskCreate { params::DiskCreate { identity: IdentityMetadataCreateParams { diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index a5f78d6a55..333e6c1672 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -198,22 +198,6 @@ pub(crate) mod test { const PROJECT_NAME: &str = "springfield-squidport"; -<<<<<<< HEAD -||||||| 7c3cd6abe - async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None).await; - let project = create_project(client, PROJECT_NAME).await; - project.identity.id - } - -======= - async fn create_org_and_project(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None, None).await; - let project = create_project(client, PROJECT_NAME).await; - project.identity.id - } - ->>>>>>> main pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index fb52a45652..ed8c8ccebf 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -1782,18 +1782,10 @@ mod test { const DISK_NAME: &str = "disky-mcdiskface"; const INSTANCE_NAME: &str = "base-instance"; -<<<<<<< HEAD async fn create_project_and_disk_and_pool( client: &ClientTestContext, ) -> Uuid { create_default_ip_pool(&client).await; -||||||| 7c3cd6abe - async fn create_org_project_and_disk(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None).await; -======= - async fn create_org_project_and_disk(client: &ClientTestContext) -> Uuid { - create_ip_pool(&client, "p0", None, None).await; ->>>>>>> main create_project(client, PROJECT_NAME).await; create_disk(client, PROJECT_NAME, DISK_NAME).await.identity.id } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 10e90eec87..21acb45ed3 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -6,13 +6,10 @@ use super::{ console_api, device_auth, params, - params::{ProjectSelector, UninitializedSledId}, - shared::UninitializedSled, views::{ self, Certificate, Group, IdentityProvider, Image, IpPool, IpPoolRange, - PhysicalDisk, Project, Rack, Role, Silo, SiloQuotas, SiloUtilization, - Sled, SledInstance, Snapshot, SshKey, Switch, User, UserBuiltin, Vpc, - VpcRouter, VpcSubnet, + PhysicalDisk, Project, Rack, Role, Silo, SiloUtilization, Sled, + Snapshot, SshKey, User, UserBuiltin, Vpc, VpcRouter, VpcSubnet, }, }; use crate::external_api::shared; @@ -40,32 +37,13 @@ use dropshot::{ use ipnetwork::IpNetwork; use nexus_db_queries::authz; use nexus_db_queries::db; -use nexus_db_queries::db::identity::AssetIdentityMetadata; use nexus_db_queries::db::identity::Resource; use nexus_db_queries::db::lookup::ImageLookup; use nexus_db_queries::db::lookup::ImageParentLookup; use nexus_db_queries::db::model::Name; -<<<<<<< HEAD use nexus_types::external_api::views::SiloQuotas; -use nexus_types::{ - external_api::views::{SledInstance, Switch}, - identity::AssetIdentityMetadata, -}; -||||||| 7c3cd6abe -use nexus_db_queries::{ - authz::ApiResource, db::fixed_data::silo::INTERNAL_SILO_ID, -}; -use nexus_types::external_api::{params::ProjectSelector, views::SiloQuotas}; -use nexus_types::{ - external_api::views::{SledInstance, Switch}, - identity::AssetIdentityMetadata, -}; -======= -use nexus_db_queries::{ - authz::ApiResource, db::fixed_data::silo::INTERNAL_SILO_ID, -}; use nexus_types::external_api::views::Utilization; ->>>>>>> main +use nexus_types::identity::AssetIdentityMetadata; use omicron_common::api::external::http_pagination::data_page_params_for; use omicron_common::api::external::http_pagination::marker_for_name; use omicron_common::api::external::http_pagination::marker_for_name_or_id; @@ -1315,13 +1293,7 @@ async fn project_policy_update( // IP Pools -<<<<<<< HEAD /// List all IP pools -||||||| 7c3cd6abe -/// List all IP Pools that can be used by a given project. -======= -/// List all IP pools that can be used by a given project ->>>>>>> main #[endpoint { method = GET, path = "/v1/ip-pools", @@ -4788,7 +4760,7 @@ async fn rack_view( async fn sled_list_uninitialized( rqctx: RequestContext>, query: Query>, -) -> Result>, HttpError> { +) -> Result>, HttpError> { let apictx = rqctx.context(); // We don't actually support real pagination let pag_params = query.into_inner(); @@ -4819,7 +4791,7 @@ async fn sled_list_uninitialized( }] async fn sled_add( rqctx: RequestContext>, - sled: TypedBody, + sled: TypedBody, ) -> Result { let apictx = rqctx.context(); let nexus = &apictx.nexus; @@ -4933,7 +4905,7 @@ async fn sled_instance_list( rqctx: RequestContext>, path_params: Path, query_params: Query, -) -> Result>, HttpError> { +) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let nexus = &apictx.nexus; @@ -4954,7 +4926,7 @@ async fn sled_instance_list( Ok(HttpResponseOk(ScanById::results_page( &query, sled_instances, - &|_, sled_instance: &SledInstance| sled_instance.identity.id, + &|_, sled_instance: &views::SledInstance| sled_instance.identity.id, )?)) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await @@ -5003,7 +4975,7 @@ async fn physical_disk_list( async fn switch_list( rqctx: RequestContext>, query_params: Query, -) -> Result>, HttpError> { +) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let nexus = &apictx.nexus; @@ -5018,7 +4990,7 @@ async fn switch_list( Ok(HttpResponseOk(ScanById::results_page( &query, switches, - &|_, switch: &Switch| switch.identity.id, + &|_, switch: &views::Switch| switch.identity.id, )?)) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await @@ -5033,7 +5005,7 @@ async fn switch_list( async fn switch_view( rqctx: RequestContext>, path_params: Path, -) -> Result, HttpError> { +) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let nexus = &apictx.nexus; diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 12f5ab5c1b..c2516a1509 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -213,7 +213,6 @@ pub async fn create_ip_pool( client: &ClientTestContext, pool_name: &str, ip_range: Option, - silo: Option, ) -> (IpPool, IpPoolRange) { let pool = object_create( client, @@ -223,14 +222,6 @@ pub async fn create_ip_pool( name: pool_name.parse().unwrap(), description: String::from("an ip pool"), }, -<<<<<<< HEAD -||||||| 7c3cd6abe - silo: None, - is_default: false, -======= - silo: silo.map(|id| NameOrId::Id(id)), - is_default: false, ->>>>>>> main }, ) .await; diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index db5e7a0fcb..2d206db53a 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -1392,7 +1392,8 @@ async fn test_phantom_disk_rename(cptestctx: &ControlPlaneTestContext) { let _disk_test = DiskTest::new(&cptestctx).await; - populate_ip_pool(&client, "default", None).await; + // TODO + // populate_ip_pool(&client, "default", None).await; let _project_id1 = create_project(client, PROJECT_NAME).await.identity.id; // Create a 1 GB disk diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 931fdd74d0..b7b838ca50 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -579,87 +579,12 @@ pub static DEMO_PROJECT_PROMOTE_IMAGE_URL: Lazy = Lazy::new(|| { ) }); -<<<<<<< HEAD - // IP Pools - pub static ref DEMO_IP_POOLS_PROJ_URL: String = "/v1/ip-pools".to_string(); - pub static ref DEMO_IP_POOLS_URL: &'static str = "/v1/system/ip-pools"; - pub static ref DEMO_IP_POOL_NAME: Name = "default".parse().unwrap(); - pub static ref DEMO_IP_POOL_CREATE: params::IpPoolCreate = - params::IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: DEMO_IP_POOL_NAME.clone(), - description: String::from("an IP pool"), - }, - }; - pub static ref DEMO_IP_POOL_PROJ_URL: String = format!("/v1/ip-pools/{}", *DEMO_IP_POOL_NAME); - pub static ref DEMO_IP_POOL_URL: String = format!("/v1/system/ip-pools/{}", *DEMO_IP_POOL_NAME); - pub static ref DEMO_IP_POOL_UPDATE: params::IpPoolUpdate = - params::IpPoolUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: Some(String::from("a new IP pool")), - }, - }; - pub static ref DEMO_IP_POOL_SILOS_URL: String = format!("{}/silos", *DEMO_IP_POOL_URL); - pub static ref DEMO_IP_POOL_SILOS_BODY: params::IpPoolSiloLink = - params::IpPoolSiloLink { - silo: NameOrId::Id(DEFAULT_SILO.identity().id), - is_default: true, // necessary for demo instance create to go through - }; - - pub static ref DEMO_IP_POOL_SILO_URL: String = format!("{}/silos/{}", *DEMO_IP_POOL_URL, *DEMO_SILO_NAME); - pub static ref DEMO_IP_POOL_SILO_UPDATE_BODY: params::IpPoolSiloUpdate = - params::IpPoolSiloUpdate { - is_default: false, - }; - - pub static ref DEMO_IP_POOL_RANGE: IpRange = IpRange::V4(Ipv4Range::new( - std::net::Ipv4Addr::new(10, 0, 0, 0), - std::net::Ipv4Addr::new(10, 0, 0, 255), - ).unwrap()); - pub static ref DEMO_IP_POOL_RANGES_URL: String = format!("{}/ranges", *DEMO_IP_POOL_URL); - pub static ref DEMO_IP_POOL_RANGES_ADD_URL: String = format!("{}/add", *DEMO_IP_POOL_RANGES_URL); - pub static ref DEMO_IP_POOL_RANGES_DEL_URL: String = format!("{}/remove", *DEMO_IP_POOL_RANGES_URL); -||||||| 7c3cd6abe - // IP Pools - pub static ref DEMO_IP_POOLS_PROJ_URL: String = - format!("/v1/ip-pools?project={}", *DEMO_PROJECT_NAME); - pub static ref DEMO_IP_POOLS_URL: &'static str = "/v1/system/ip-pools"; - pub static ref DEMO_IP_POOL_NAME: Name = "default".parse().unwrap(); - pub static ref DEMO_IP_POOL_CREATE: params::IpPoolCreate = - params::IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: DEMO_IP_POOL_NAME.clone(), - description: String::from("an IP pool"), - }, - silo: None, - is_default: true, - }; - pub static ref DEMO_IP_POOL_PROJ_URL: String = - format!("/v1/ip-pools/{}?project={}", *DEMO_IP_POOL_NAME, *DEMO_PROJECT_NAME); - pub static ref DEMO_IP_POOL_URL: String = format!("/v1/system/ip-pools/{}", *DEMO_IP_POOL_NAME); - pub static ref DEMO_IP_POOL_UPDATE: params::IpPoolUpdate = - params::IpPoolUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: Some(String::from("a new IP pool")), - }, - }; - pub static ref DEMO_IP_POOL_RANGE: IpRange = IpRange::V4(Ipv4Range::new( - std::net::Ipv4Addr::new(10, 0, 0, 0), - std::net::Ipv4Addr::new(10, 0, 0, 255), - ).unwrap()); - pub static ref DEMO_IP_POOL_RANGES_URL: String = format!("{}/ranges", *DEMO_IP_POOL_URL); - pub static ref DEMO_IP_POOL_RANGES_ADD_URL: String = format!("{}/add", *DEMO_IP_POOL_RANGES_URL); - pub static ref DEMO_IP_POOL_RANGES_DEL_URL: String = format!("{}/remove", *DEMO_IP_POOL_RANGES_URL); -======= pub static DEMO_SILO_DEMOTE_IMAGE_URL: Lazy = Lazy::new(|| { format!( "/v1/images/{}/demote?project={}", *DEMO_IMAGE_NAME, *DEMO_PROJECT_NAME ) }); ->>>>>>> main pub static DEMO_IMAGE_CREATE: Lazy = Lazy::new(|| params::ImageCreate { @@ -674,7 +599,7 @@ pub static DEMO_IMAGE_CREATE: Lazy = // IP Pools pub static DEMO_IP_POOLS_PROJ_URL: Lazy = - Lazy::new(|| format!("/v1/ip-pools?project={}", *DEMO_PROJECT_NAME)); + Lazy::new(|| "/v1/ip-pools".to_string()); pub const DEMO_IP_POOLS_URL: &'static str = "/v1/system/ip-pools"; pub static DEMO_IP_POOL_NAME: Lazy = Lazy::new(|| "default".parse().unwrap()); @@ -684,8 +609,6 @@ pub static DEMO_IP_POOL_CREATE: Lazy = name: DEMO_IP_POOL_NAME.clone(), description: String::from("an IP pool"), }, - silo: None, - is_default: true, }); pub static DEMO_IP_POOL_PROJ_URL: Lazy = Lazy::new(|| { format!( @@ -702,6 +625,19 @@ pub static DEMO_IP_POOL_UPDATE: Lazy = description: Some(String::from("a new IP pool")), }, }); +pub static DEMO_IP_POOL_SILOS_URL: Lazy = + Lazy::new(|| format!("{}/silos", *DEMO_IP_POOL_URL)); +pub static DEMO_IP_POOL_SILOS_BODY: Lazy = + Lazy::new(|| params::IpPoolSiloLink { + silo: NameOrId::Id(DEFAULT_SILO.identity().id), + is_default: true, // necessary for demo instance create to go through + }); + +pub static DEMO_IP_POOL_SILO_URL: Lazy = + Lazy::new(|| format!("{}/silos/{}", *DEMO_IP_POOL_URL, *DEMO_SILO_NAME)); +pub static DEMO_IP_POOL_SILO_UPDATE_BODY: Lazy = + Lazy::new(|| params::IpPoolSiloUpdate { is_default: false }); + pub static DEMO_IP_POOL_RANGE: Lazy = Lazy::new(|| { IpRange::V4( Ipv4Range::new( diff --git a/nexus/tests/integration_tests/external_ips.rs b/nexus/tests/integration_tests/external_ips.rs index fb5aaa8992..3e6fee0a52 100644 --- a/nexus/tests/integration_tests/external_ips.rs +++ b/nexus/tests/integration_tests/external_ips.rs @@ -21,18 +21,12 @@ use nexus_test_utils::resource_helpers::create_floating_ip; use nexus_test_utils::resource_helpers::create_instance_with; use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_project; -<<<<<<< HEAD +use nexus_test_utils::resource_helpers::create_silo; use nexus_test_utils::resource_helpers::link_ip_pool; use nexus_test_utils::resource_helpers::object_create; use nexus_test_utils::resource_helpers::object_create_error; use nexus_test_utils::resource_helpers::object_delete; use nexus_test_utils::resource_helpers::object_delete_error; -||||||| 7c3cd6abe -use nexus_test_utils::resource_helpers::populate_ip_pool; -======= -use nexus_test_utils::resource_helpers::create_silo; -use nexus_test_utils::resource_helpers::populate_ip_pool; ->>>>>>> main use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; use nexus_types::external_api::shared; @@ -126,14 +120,8 @@ async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { Ipv4Range::new(Ipv4Addr::new(10, 1, 0, 1), Ipv4Addr::new(10, 1, 0, 5)) .unwrap(), ); -<<<<<<< HEAD // not automatically linked to currently silo. see below create_ip_pool(&client, "other-pool", Some(other_pool_range)).await; -||||||| 7c3cd6abe - create_ip_pool(&client, "other-pool", Some(other_pool_range)).await; -======= - create_ip_pool(&client, "other-pool", Some(other_pool_range), None).await; ->>>>>>> main let project = create_project(client, PROJECT_NAME).await; @@ -168,13 +156,7 @@ async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { assert_eq!(fip.instance_id, None); assert_eq!(fip.ip, ip_addr); -<<<<<<< HEAD // Creating with other-pool fails with 404 until it is linked to the current silo -||||||| 7c3cd6abe - // Create with no chosen IP from named pool. -======= - // Create with no chosen IP from fleet-scoped named pool. ->>>>>>> main let fip_name = FIP_NAMES[2]; let params = params::FloatingIpCreate { identity: IdentityMetadataCreateParams { @@ -223,12 +205,13 @@ async fn test_floating_ip_create_fails_in_other_silo_pool( ) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; + // TODO + // populate_ip_pool(&client, "default", None).await; let project = create_project(client, PROJECT_NAME).await; // Create other silo and pool linked to that silo - let other_silo = create_silo( + let _other_silo = create_silo( &client, "not-my-silo", true, @@ -243,7 +226,8 @@ async fn test_floating_ip_create_fails_in_other_silo_pool( &client, "external-silo-pool", Some(other_pool_range), - Some(other_silo.identity.id), + // TODO + // Some(other_silo.identity.id), ) .await; diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 0ba60b5058..99ef165188 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -882,7 +882,7 @@ async fn test_instance_failed_after_sled_agent_error( let instance_name = "losing-is-fun"; // Create and start the test instance. - create_org_and_project(&client).await; + create_project_and_pool(&client).await; let instance_url = get_instance_url(instance_name); let instance = create_instance(client, PROJECT_NAME, instance_name).await; instance_simulate(nexus, &instance.identity.id).await; @@ -3643,7 +3643,6 @@ async fn test_instance_ephemeral_ip_from_correct_pool( ) .unwrap(), ); -<<<<<<< HEAD // make first pool the default for the priv user's silo create_ip_pool(&client, "pool1", Some(range1)).await; @@ -3652,13 +3651,6 @@ async fn test_instance_ephemeral_ip_from_correct_pool( // second pool is associated with the silo but not default create_ip_pool(&client, "pool2", Some(range2)).await; link_ip_pool(&client, "pool2", &DEFAULT_SILO.id(), /*default*/ false).await; -||||||| 7c3cd6abe - populate_ip_pool(&client, "default", Some(default_pool_range)).await; - create_ip_pool(&client, "other-pool", Some(other_pool_range)).await; -======= - populate_ip_pool(&client, "default", Some(default_pool_range)).await; - create_ip_pool(&client, "other-pool", Some(other_pool_range), None).await; ->>>>>>> main // Create an instance with pool name blank, expect IP from default pool create_instance_with_pool(client, "pool1-inst", None).await; diff --git a/nexus/tests/integration_tests/unauthorized.rs b/nexus/tests/integration_tests/unauthorized.rs index c5e543279e..3671564866 100644 --- a/nexus/tests/integration_tests/unauthorized.rs +++ b/nexus/tests/integration_tests/unauthorized.rs @@ -201,23 +201,12 @@ static SETUP_REQUESTS: Lazy> = Lazy::new(|| { &*DEMO_SILO_USER_ID_SET_PASSWORD_URL, ], }, -<<<<<<< HEAD // Create the default IP pool SetupReq::Post { url: &DEMO_IP_POOLS_URL, body: serde_json::to_value(&*DEMO_IP_POOL_CREATE).unwrap(), id_routes: vec!["/v1/ip-pools/{id}"], }, -||||||| 7c3cd6abe - // Get the default IP pool - SetupReq::Get { - url: &DEMO_IP_POOL_URL, - id_routes: vec![], - }, -======= - // Get the default IP pool - SetupReq::Get { url: &DEMO_IP_POOL_URL, id_routes: vec![] }, ->>>>>>> main // Create an IP pool range SetupReq::Post { url: &DEMO_IP_POOL_RANGES_ADD_URL, diff --git a/nexus/tests/integration_tests/utilization.rs b/nexus/tests/integration_tests/utilization.rs index 5ebf56f35a..e09e71a9e3 100644 --- a/nexus/tests/integration_tests/utilization.rs +++ b/nexus/tests/integration_tests/utilization.rs @@ -3,10 +3,10 @@ use http::StatusCode; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; +use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_instance; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::objects_list_page_authz; -use nexus_test_utils::resource_helpers::populate_ip_pool; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; @@ -27,7 +27,7 @@ type ControlPlaneTestContext = async fn test_utilization(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - populate_ip_pool(&client, "default", None).await; + create_default_ip_pool(&client).await; let current_util = objects_list_page_authz::( client, diff --git a/openapi/nexus.json b/openapi/nexus.json index bb4b653327..a4ba6cbb86 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -2154,13 +2154,7 @@ "tags": [ "projects" ], -<<<<<<< HEAD "summary": "List all IP pools", -||||||| 7c3cd6abe - "summary": "List all IP Pools that can be used by a given project.", -======= - "summary": "List all IP pools that can be used by a given project", ->>>>>>> main "operationId": "project_ip_pool_list", "parameters": [ { diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 48a3800d55..a2c901cb25 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -3103,13 +3103,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES -<<<<<<< HEAD ( TRUE, NOW(), NOW(), '22.0.1', NULL) -||||||| 7c3cd6abe - ( TRUE, NOW(), NOW(), '20.0.0', NULL) -======= - ( TRUE, NOW(), NOW(), '21.0.0', NULL) ->>>>>>> main ON CONFLICT DO NOTHING; COMMIT; From 8aa01b9b5eebc39bc56b189a101ffacd2e9c4289 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Thu, 4 Jan 2024 10:59:32 -0500 Subject: [PATCH 65/67] add ip pool resource type to list of allowed enum names --- nexus/db-queries/src/db/pool_connection.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/nexus/db-queries/src/db/pool_connection.rs b/nexus/db-queries/src/db/pool_connection.rs index e96a15894d..5016dbd031 100644 --- a/nexus/db-queries/src/db/pool_connection.rs +++ b/nexus/db-queries/src/db/pool_connection.rs @@ -48,6 +48,7 @@ static CUSTOM_TYPE_KEYS: &'static [&'static str] = &[ "identity_type", "instance_state", "ip_kind", + "ip_pool_resource_type", "network_interface_kind", "physical_disk_kind", "producer_kind", From 584cb14bc803844c2c3801782d4cb480a565e3e3 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Thu, 4 Jan 2024 14:10:30 -0500 Subject: [PATCH 66/67] clean up merge leftovers and TODOs that are becoming issues --- nexus/db-queries/src/db/datastore/ip_pool.rs | 137 ++++++++++++------ nexus/src/app/ip_pool.rs | 3 - nexus/tests/integration_tests/disks.rs | 2 - nexus/tests/integration_tests/external_ips.rs | 36 ++--- nexus/tests/integration_tests/ip_pools.rs | 51 ++++++- 5 files changed, 151 insertions(+), 78 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index c7c1064a27..f51f54d592 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -445,7 +445,6 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } - // TODO: should this error on conflict instead of updating? pub async fn ip_pool_link_silo( &self, opctx: &OpContext, @@ -458,26 +457,6 @@ impl DataStore { diesel::insert_into(dsl::ip_pool_resource) .values(ip_pool_resource.clone()) - // We have two constraints that are relevant here, and we need to - // make this behave correctly with respect to both. If the entry - // matches an existing (ip pool, silo/fleet), we want to update - // is_default because we want to handle the case where someone is - // trying to change is_default on an existing association. But - // you can only have one default pool for a given resource, so - // if that update violates the unique index ensuring one default, - // the insert should still fail. - // note that this on_conflict has to have all three because that's - // how the pk is defined. if it only has the IDs and not the type, - // CRDB complains that the tuple doesn't match any constraints it's - // aware of - .on_conflict(( - dsl::ip_pool_id, - dsl::resource_type, - dsl::resource_id, - )) - .do_update() - .set(dsl::is_default.eq(ip_pool_resource.is_default)) - .returning(IpPoolResource::as_returning()) .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { @@ -496,8 +475,6 @@ impl DataStore { }) } - // TODO: make default should fail when the association doesn't exist. - // should it also fail when it's already default? probably not? pub async fn ip_pool_set_default( &self, opctx: &OpContext, @@ -899,19 +876,20 @@ impl DataStore { #[cfg(test)] mod test { + use std::num::NonZeroU32; + use crate::authz; use crate::db::datastore::datastore_test; use crate::db::model::{IpPool, IpPoolResource, IpPoolResourceType}; use assert_matches::assert_matches; use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Resource; + use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::{ - Error, IdentityMetadataCreateParams, LookupType, + DataPageParams, Error, IdentityMetadataCreateParams, LookupType, }; use omicron_test_utils::dev; - // TODO: add calls to the list endpoint throughout all this - #[tokio::test] async fn test_default_ip_pools() { let logctx = dev::test_setup_log("test_default_ip_pools"); @@ -922,7 +900,26 @@ mod test { let error = datastore.ip_pools_fetch_default(&opctx).await.unwrap_err(); assert_matches!(error, Error::ObjectNotFound { .. }); - let silo_id = opctx.authn.silo_required().unwrap().id(); + let pagparams_id = DataPageParams { + marker: None, + limit: NonZeroU32::new(100).unwrap(), + direction: dropshot::PaginationOrder::Ascending, + }; + let pagbyid = PaginatedBy::Id(pagparams_id); + + let all_pools = datastore + .ip_pools_list(&opctx, &pagbyid) + .await + .expect("Should list IP pools"); + assert_eq!(all_pools.len(), 0); + let silo_pools = datastore + .silo_ip_pools_list(&opctx, &pagbyid) + .await + .expect("Should list silo IP pools"); + assert_eq!(silo_pools.len(), 0); + + let authz_silo = opctx.authn.silo_required().unwrap(); + let silo_id = authz_silo.id(); // create a non-default pool for the silo let identity = IdentityMetadataCreateParams { @@ -933,16 +930,40 @@ mod test { .ip_pool_create(&opctx, IpPool::new(&identity)) .await .expect("Failed to create IP pool"); + + // shows up in full list but not silo list + let all_pools = datastore + .ip_pools_list(&opctx, &pagbyid) + .await + .expect("Should list IP pools"); + assert_eq!(all_pools.len(), 1); + let silo_pools = datastore + .silo_ip_pools_list(&opctx, &pagbyid) + .await + .expect("Should list silo IP pools"); + assert_eq!(silo_pools.len(), 0); + + // make default should fail when there is no link yet + let authz_pool = authz::IpPool::new( + authz::FLEET, + pool1_for_silo.id(), + LookupType::ById(pool1_for_silo.id()), + ); + let error = datastore + .ip_pool_set_default(&opctx, &authz_pool, &authz_silo, true) + .await + .expect_err("Should not be able to make non-existent link default"); + assert_matches!(error, Error::ObjectNotFound { .. }); + + // now link to silo + let link_body = IpPoolResource { + ip_pool_id: pool1_for_silo.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: silo_id, + is_default: false, + }; datastore - .ip_pool_link_silo( - &opctx, - IpPoolResource { - ip_pool_id: pool1_for_silo.id(), - resource_type: IpPoolResourceType::Silo, - resource_id: silo_id, - is_default: false, - }, - ) + .ip_pool_link_silo(&opctx, link_body.clone()) .await .expect("Failed to associate IP pool with silo"); @@ -951,20 +972,32 @@ mod test { let error = datastore.ip_pools_fetch_default(&opctx).await.unwrap_err(); assert_matches!(error, Error::ObjectNotFound { .. }); - // now we can change that association to is_default=true and - // it should update rather than erroring out + // now it shows up in the silo list + let silo_pools = datastore + .silo_ip_pools_list(&opctx, &pagbyid) + .await + .expect("Should list silo IP pools"); + assert_eq!(silo_pools.len(), 1); + assert_eq!(silo_pools[0].id(), pool1_for_silo.id()); + + // linking an already linked silo errors due to PK conflict + let err = datastore + .ip_pool_link_silo(&opctx, link_body) + .await + .expect_err("Creating the same link again should conflict"); + assert_matches!(err, Error::ObjectAlreadyExists { .. }); + + // now make it default datastore - .ip_pool_link_silo( - &opctx, - IpPoolResource { - ip_pool_id: pool1_for_silo.id(), - resource_type: IpPoolResourceType::Silo, - resource_id: silo_id, - is_default: true, - }, - ) + .ip_pool_set_default(&opctx, &authz_pool, &authz_silo, true) .await - .expect("Failed to make IP pool default for silo"); + .expect("Should be able to make pool default"); + + // setting default if already default is allowed + datastore + .ip_pool_set_default(&opctx, &authz_pool, &authz_silo, true) + .await + .expect("Should be able to make pool default again"); // now when we ask for the default pool again, we get that one let (authz_pool1_for_silo, ip_pool) = datastore @@ -1004,9 +1037,17 @@ mod test { .await .expect("Failed to unlink IP pool from silo"); + // no default let error = datastore.ip_pools_fetch_default(&opctx).await.unwrap_err(); assert_matches!(error, Error::ObjectNotFound { .. }); + // and silo pools list is empty again + let silo_pools = datastore + .silo_ip_pools_list(&opctx, &pagbyid) + .await + .expect("Should list silo IP pools"); + assert_eq!(silo_pools.len(), 0); + db.cleanup().await.unwrap(); logctx.cleanup_successful(); } diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 830876591e..1d9b3e515e 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -73,9 +73,6 @@ impl super::Nexus { self.db_datastore.ip_pool_create(opctx, pool).await } - // TODO: this is used by a developer user to see what IP pools they can use - // in their silo, so it would be nice to say which one is the default - /// List IP pools in current silo pub(crate) async fn silo_ip_pools_list( &self, diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index 2d206db53a..b9023a8212 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -1392,8 +1392,6 @@ async fn test_phantom_disk_rename(cptestctx: &ControlPlaneTestContext) { let _disk_test = DiskTest::new(&cptestctx).await; - // TODO - // populate_ip_pool(&client, "default", None).await; let _project_id1 = create_project(client, PROJECT_NAME).await.identity.id; // Create a 1 GB disk diff --git a/nexus/tests/integration_tests/external_ips.rs b/nexus/tests/integration_tests/external_ips.rs index 3e6fee0a52..3b6127ceb1 100644 --- a/nexus/tests/integration_tests/external_ips.rs +++ b/nexus/tests/integration_tests/external_ips.rs @@ -205,13 +205,10 @@ async fn test_floating_ip_create_fails_in_other_silo_pool( ) { let client = &cptestctx.external_client; - // TODO - // populate_ip_pool(&client, "default", None).await; - let project = create_project(client, PROJECT_NAME).await; // Create other silo and pool linked to that silo - let _other_silo = create_silo( + let other_silo = create_silo( &client, "not-my-silo", true, @@ -222,14 +219,8 @@ async fn test_floating_ip_create_fails_in_other_silo_pool( Ipv4Range::new(Ipv4Addr::new(10, 2, 0, 1), Ipv4Addr::new(10, 2, 0, 5)) .unwrap(), ); - create_ip_pool( - &client, - "external-silo-pool", - Some(other_pool_range), - // TODO - // Some(other_silo.identity.id), - ) - .await; + create_ip_pool(&client, "external-silo-pool", Some(other_pool_range)).await; + // don't link pool to silo yet let fip_name = FIP_NAMES[4]; @@ -246,14 +237,19 @@ async fn test_floating_ip_create_fails_in_other_silo_pool( pool: Some(NameOrId::Name("external-silo-pool".parse().unwrap())), }; - let error = NexusRequest::new( - RequestBuilder::new(client, Method::POST, &url) - .body(Some(&body)) - .expect_status(Some(StatusCode::NOT_FOUND)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute_and_parse_unwrap::() - .await; + let error = + object_create_error(client, &url, &body, StatusCode::NOT_FOUND).await; + assert_eq!( + error.message, + "not found: ip-pool with name \"external-silo-pool\"" + ); + + // error is the same after linking the pool to the other silo + link_ip_pool(&client, "external-silo-pool", &other_silo.identity.id, false) + .await; + + let error = + object_create_error(client, &url, &body, StatusCode::NOT_FOUND).await; assert_eq!( error.message, "not found: ip-pool with name \"external-silo-pool\"" diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 770796affc..5682df2c3a 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -253,18 +253,49 @@ async fn test_ip_pool_service_no_cud(cptestctx: &ControlPlaneTestContext) { "not found: ip-pool with name \"oxide-service-pool\"" ); + let not_found_id = + format!("not found: ip-pool with id \"{}\"", pool.identity.id); let error = object_delete_error( client, &internal_pool_id_url, StatusCode::NOT_FOUND, ) .await; - assert_eq!( - error.message, - format!("not found: ip-pool with id \"{}\"", pool.identity.id) - ); + assert_eq!(error.message, not_found_id); - // TODO: update, assoc, dissoc, add/remove range by name or ID should all fail + // Update not allowed + let put_body = params::IpPoolUpdate { + identity: IdentityMetadataUpdateParams { + name: Some("test".parse().unwrap()), + description: Some("test".to_string()), + }, + }; + let error = object_put_error( + client, + &internal_pool_id_url, + &put_body, + StatusCode::NOT_FOUND, + ) + .await; + assert_eq!(error.message, not_found_id); + + // linking not allowed + + // let link_body = params::IpPoolSiloLink { + // silo: NameOrId::Name(cptestctx.silo_name.clone()), + // is_default: false, + // }; + // let link_url = format!("{}/silos", internal_pool_id_url); + // let error = object_create_error( + // client, + // &link_url, + // &link_body, + // StatusCode::NOT_FOUND, + // ) + // .await; + // assert_eq!(error.message, not_found_id); + + // TODO: link, unlink, add/remove range by name or ID should all fail } #[nexus_test] @@ -305,6 +336,16 @@ async fn test_ip_pool_silo_link(cptestctx: &ControlPlaneTestContext) { let _: IpPoolSilo = object_create(client, "/v1/system/ip-pools/p0/silos", ¶ms).await; + // second attempt to create the same link errors due to conflict + let error = object_create_error( + client, + "/v1/system/ip-pools/p0/silos", + ¶ms, + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!(error.error_code.unwrap(), "ObjectAlreadyExists"); + // get silo ID so we can test association by ID as well let silo_url = format!("/v1/system/silos/{}", cptestctx.silo_name); let silo_id = object_get::(client, &silo_url).await.identity.id; From 497640416218df43446dcd3569c877c2978908a8 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Fri, 5 Jan 2024 11:39:03 -0500 Subject: [PATCH 67/67] move migrations up one --- nexus/db-model/src/schema.rs | 2 +- schema/crdb/{22.0.0 => 23.0.0}/up1.sql | 0 schema/crdb/{22.0.0 => 23.0.0}/up2.sql | 0 schema/crdb/{22.0.0 => 23.0.0}/up3.sql | 0 schema/crdb/{22.0.0 => 23.0.0}/up4.sql | 0 schema/crdb/{22.0.0 => 23.0.0}/up5.sql | 0 schema/crdb/{22.0.1 => 23.0.1}/README.md | 0 schema/crdb/{22.0.1 => 23.0.1}/up1.sql | 0 schema/crdb/{22.0.1 => 23.0.1}/up2.sql | 0 schema/crdb/dbinit.sql | 2 +- 10 files changed, 2 insertions(+), 2 deletions(-) rename schema/crdb/{22.0.0 => 23.0.0}/up1.sql (100%) rename schema/crdb/{22.0.0 => 23.0.0}/up2.sql (100%) rename schema/crdb/{22.0.0 => 23.0.0}/up3.sql (100%) rename schema/crdb/{22.0.0 => 23.0.0}/up4.sql (100%) rename schema/crdb/{22.0.0 => 23.0.0}/up5.sql (100%) rename schema/crdb/{22.0.1 => 23.0.1}/README.md (100%) rename schema/crdb/{22.0.1 => 23.0.1}/up1.sql (100%) rename schema/crdb/{22.0.1 => 23.0.1}/up2.sql (100%) diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index f80b5b1c6a..e7900b68ab 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -13,7 +13,7 @@ use omicron_common::api::external::SemverVersion; /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(22, 0, 1); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(23, 0, 1); table! { disk (id) { diff --git a/schema/crdb/22.0.0/up1.sql b/schema/crdb/23.0.0/up1.sql similarity index 100% rename from schema/crdb/22.0.0/up1.sql rename to schema/crdb/23.0.0/up1.sql diff --git a/schema/crdb/22.0.0/up2.sql b/schema/crdb/23.0.0/up2.sql similarity index 100% rename from schema/crdb/22.0.0/up2.sql rename to schema/crdb/23.0.0/up2.sql diff --git a/schema/crdb/22.0.0/up3.sql b/schema/crdb/23.0.0/up3.sql similarity index 100% rename from schema/crdb/22.0.0/up3.sql rename to schema/crdb/23.0.0/up3.sql diff --git a/schema/crdb/22.0.0/up4.sql b/schema/crdb/23.0.0/up4.sql similarity index 100% rename from schema/crdb/22.0.0/up4.sql rename to schema/crdb/23.0.0/up4.sql diff --git a/schema/crdb/22.0.0/up5.sql b/schema/crdb/23.0.0/up5.sql similarity index 100% rename from schema/crdb/22.0.0/up5.sql rename to schema/crdb/23.0.0/up5.sql diff --git a/schema/crdb/22.0.1/README.md b/schema/crdb/23.0.1/README.md similarity index 100% rename from schema/crdb/22.0.1/README.md rename to schema/crdb/23.0.1/README.md diff --git a/schema/crdb/22.0.1/up1.sql b/schema/crdb/23.0.1/up1.sql similarity index 100% rename from schema/crdb/22.0.1/up1.sql rename to schema/crdb/23.0.1/up1.sql diff --git a/schema/crdb/22.0.1/up2.sql b/schema/crdb/23.0.1/up2.sql similarity index 100% rename from schema/crdb/22.0.1/up2.sql rename to schema/crdb/23.0.1/up2.sql diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index a2c901cb25..eaa7aa067c 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -3103,7 +3103,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '22.0.1', NULL) + ( TRUE, NOW(), NOW(), '23.0.1', NULL) ON CONFLICT DO NOTHING; COMMIT;