From 9cbc4231893eba0599bdbff1082231d44cd8d200 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Thu, 19 Oct 2023 10:48:09 -0500 Subject: [PATCH] get conflict/update logic right around is_default --- common/src/api/external/mod.rs | 1 + nexus/db-queries/src/db/datastore/ip_pool.rs | 61 +++++++++++--------- nexus/db-queries/src/db/datastore/rack.rs | 2 - schema/crdb/dbinit.sql | 48 +++++++-------- 4 files changed, 58 insertions(+), 54 deletions(-) diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 53512408af..c7cabc5f97 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -711,6 +711,7 @@ pub enum ResourceType { LoopbackAddress, SwitchPortSettings, IpPool, + IpPoolResource, InstanceNetworkInterface, PhysicalDisk, Rack, diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index abc5286c23..e1d4ec5dcc 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -322,19 +322,36 @@ impl DataStore { .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) .await?; - // TODO: make sure on_conflict_do_nothing doesn't suppress the error for - // trying to give a silo two default pools diesel::insert_into(dsl::ip_pool_resource) .values(ip_pool_resource.clone()) + // We have two constraints that are relevant here, and we need to + // make this behave correctly with respect to both. If the entry + // matches an existing (ip pool, silo/fleet), we want to update + // is_default because we want to handle the case where someone is + // trying to change is_default on an existing association. But + // you can only have one default pool for a given resource, so + // if that update violates the unique index ensuring one default, + // the insert should still fail. + // note that this on_conflict has to have all three because that's + // how the pk is defined. if it only has the IDs and not the type, + // CRDB complains that the tuple doesn't match any constraints it's + // aware of + .on_conflict(( + dsl::ip_pool_id, + dsl::resource_type, + dsl::resource_id, + )) + .do_update() + .set(dsl::is_default.eq(ip_pool_resource.is_default)) .returning(IpPoolResource::as_returning()) - .on_conflict_do_nothing() .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { public_error_from_diesel( e, ErrorHandler::Conflict( - ResourceType::IpPool, + ResourceType::IpPoolResource, + // TODO: make string more useful &ip_pool_resource.ip_pool_id.to_string(), ), ) @@ -530,8 +547,6 @@ mod test { assert_eq!(fleet_default_pool.identity.name.as_str(), "default"); // unique index prevents second fleet-level default - // TODO: create the pool, and then the failure is on the attempt to - // associate it with the fleet as default let identity = IdentityMetadataCreateParams { name: "another-fleet-default".parse().unwrap(), description: "".to_string(), @@ -552,27 +567,27 @@ mod test { ) .await .expect_err("Failed to fail to make IP pool fleet default"); + assert_matches!(err, Error::ObjectAlreadyExists { .. }); - // when we fetch the default pool for a silo, if those scopes do not - // have a default IP pool, we will still get back the fleet default + // now test logic preferring most specific available default let silo_id = opctx.authn.silo_required().unwrap().id(); // create a non-default pool for the silo let identity = IdentityMetadataCreateParams { - name: "non-default-for-silo".parse().unwrap(), + name: "pool1-for-silo".parse().unwrap(), description: "".to_string(), }; - let default_for_silo = datastore + let pool1_for_silo = datastore .ip_pool_create(&opctx, IpPool::new(&identity)) .await - .expect("Failed to create silo non-default IP pool"); + .expect("Failed to create IP pool"); datastore .ip_pool_associate_resource( &opctx, IpPoolResource { - ip_pool_id: default_for_silo.id(), + ip_pool_id: pool1_for_silo.id(), resource_type: IpPoolResourceType::Silo, resource_id: silo_id, is_default: false, @@ -589,37 +604,27 @@ mod test { .expect("Failed to get silo default IP pool"); assert_eq!(ip_pool.id(), fleet_default_pool.id()); - // TODO: instead of a separate pool, this could now be done by - // associating the same pool? Would be nice to test both - - // now create a default pool for the silo - let identity = IdentityMetadataCreateParams { - name: "default-for-silo".parse().unwrap(), - description: "".to_string(), - }; - let default_for_silo = datastore - .ip_pool_create(&opctx, IpPool::new(&identity)) - .await - .expect("Failed to create silo default IP pool"); + // now we can change that association to is_default=true and + // it should update rather than erroring out datastore .ip_pool_associate_resource( &opctx, IpPoolResource { - ip_pool_id: default_for_silo.id(), + ip_pool_id: pool1_for_silo.id(), resource_type: IpPoolResourceType::Silo, resource_id: silo_id, is_default: true, }, ) .await - .expect("Failed to associate IP pool with silo"); + .expect("Failed to make IP pool default for silo"); - // now when we ask for the default pool, we get the one we just made + // now when we ask for the default pool again, we get the one we just changed let ip_pool = datastore .ip_pools_fetch_default(&opctx) .await .expect("Failed to get silo's default IP pool"); - assert_eq!(ip_pool.name().as_str(), "default-for-silo"); + assert_eq!(ip_pool.name().as_str(), "pool1-for-silo"); // and we can't create a second default pool for the silo let identity = IdentityMetadataCreateParams { diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 4aca246020..b7059b3db4 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -556,9 +556,7 @@ impl DataStore { ) -> Result<(), Error> { use omicron_common::api::external::Name; - dbg!("a"); self.rack_insert(opctx, &db::model::Rack::new(rack_id)).await?; - dbg!("b"); let internal_pool = db::model::IpPool::new(&IdentityMetadataCreateParams { diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 88454d1841..8bb35f4e13 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1466,6 +1466,30 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_pool_by_name ON omicron.public.ip_pool ) WHERE time_deleted IS NULL; +CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_resource_type AS ENUM ( + 'fleet', + 'silo' +); + +-- join table associating IP pools with resources like fleet or silo +CREATE TABLE IF NOT EXISTS omicron.public.ip_pool_resource ( + ip_pool_id UUID NOT NULL, + resource_type ip_pool_resource_type NOT NULL, + resource_id UUID NOT NULL, + is_default BOOL NOT NULL, + -- TODO: timestamps for soft deletes? + + -- resource_type is redundant because resource IDs are globally unique, but + -- logically it belongs here + PRIMARY KEY (ip_pool_id, resource_type, resource_id) +); + +-- a given resource can only have one default ip pool +CREATE UNIQUE INDEX IF NOT EXISTS one_default_ip_pool_per_resource ON omicron.public.ip_pool_resource ( + resource_id +) where + is_default = true; + /* * IP Pools are made up of a set of IP ranges, which are start/stop addresses. * Note that these need not be CIDR blocks or well-behaved subnets with a @@ -2539,30 +2563,6 @@ FROM WHERE instance.time_deleted IS NULL AND vmm.time_deleted IS NULL; -CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_resource_type AS ENUM ( - 'fleet', - 'silo' -); - --- join table associating IP pools with resources like fleet or silo -CREATE TABLE IF NOT EXISTS omicron.public.ip_pool_resource ( - ip_pool_id UUID NOT NULL, - resource_type ip_pool_resource_type NOT NULL, - resource_id UUID NOT NULL, - is_default BOOL NOT NULL, - -- TODO: timestamps for soft deletes? - - -- resource_type is redundant because resource IDs are globally unique, but - -- logically it belongs here - PRIMARY KEY (ip_pool_id, resource_type, resource_id) -); - --- a given resource can only have one default ip pool -CREATE UNIQUE INDEX IF NOT EXISTS one_default_ip_pool_per_resource ON omicron.public.ip_pool_resource ( - resource_id -) where - is_default = true; - CREATE TABLE IF NOT EXISTS omicron.public.db_metadata ( -- There should only be one row of this table for the whole DB. -- It's a little goofy, but filter on "singleton = true" before querying