Skip to content

Commit

Permalink
Delete IP pool links on silo delete and IP pool delete (#4867)
Browse files Browse the repository at this point in the history
Closes #4849 

Before this change, IP pool delete is blocked if there are any
outstanding silo links, which would mean the user would have to unlink
every silo before deleting a pool. This is annoying. The main insight
here, discussed in #4849, is that once we get past the other checks to
ensure that the pool or silo is not in use (pool contains no IP ranges,
silo contains no projects), there is no need to block on links, and it
is fine for us to delete any associated links when we delete the thing.
  • Loading branch information
david-crespo authored Jan 23, 2024
1 parent 4cf2a69 commit 8183138
Show file tree
Hide file tree
Showing 7 changed files with 180 additions and 37 deletions.
2 changes: 1 addition & 1 deletion nexus/db-model/src/schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use omicron_common::api::external::SemverVersion;
///
/// This should be updated whenever the schema is changed. For more details,
/// refer to: schema/crdb/README.adoc
pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(24, 0, 0);
pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(25, 0, 0);

table! {
disk (id) {
Expand Down
37 changes: 16 additions & 21 deletions nexus/db-queries/src/db/datastore/ip_pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -224,15 +224,15 @@ impl DataStore {
use db::schema::ip_pool_resource;
opctx.authorize(authz::Action::Delete, authz_pool).await?;

let conn = self.pool_connection_authorized(opctx).await?;

// Verify there are no IP ranges still in this pool
let range = ip_pool_range::dsl::ip_pool_range
.filter(ip_pool_range::dsl::ip_pool_id.eq(authz_pool.id()))
.filter(ip_pool_range::dsl::time_deleted.is_null())
.select(ip_pool_range::dsl::id)
.limit(1)
.first_async::<Uuid>(
&*self.pool_connection_authorized(opctx).await?,
)
.first_async::<Uuid>(&*conn)
.await
.optional()
.map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?;
Expand All @@ -242,23 +242,6 @@ impl DataStore {
));
}

// Verify there are no linked silos
let silo_link = ip_pool_resource::table
.filter(ip_pool_resource::ip_pool_id.eq(authz_pool.id()))
.select(ip_pool_resource::resource_id)
.limit(1)
.first_async::<Uuid>(
&*self.pool_connection_authorized(opctx).await?,
)
.await
.optional()
.map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?;
if silo_link.is_some() {
return Err(Error::invalid_request(
"IP Pool cannot be deleted while it is linked to a silo",
));
}

// Delete the pool, conditional on the rcgen not having changed. This
// protects the delete from occuring if clients created a new IP range
// in between the above check for children and this query.
Expand All @@ -268,7 +251,7 @@ impl DataStore {
.filter(dsl::id.eq(authz_pool.id()))
.filter(dsl::rcgen.eq(db_pool.rcgen))
.set(dsl::time_deleted.eq(now))
.execute_async(&*self.pool_connection_authorized(opctx).await?)
.execute_async(&*conn)
.await
.map_err(|e| {
public_error_from_diesel(
Expand All @@ -282,6 +265,18 @@ impl DataStore {
"deletion failed due to concurrent modification",
));
}

// Rather than treating outstanding links as a blocker for pool delete,
// just delete them. If we've gotten this far, we know there are no
// ranges in the pool, which means it can't be in use.

// delete any links from this pool to any other resources (silos)
diesel::delete(ip_pool_resource::table)
.filter(ip_pool_resource::ip_pool_id.eq(authz_pool.id()))
.execute_async(&*conn)
.await
.map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?;

Ok(())
}

Expand Down
18 changes: 18 additions & 0 deletions nexus/db-queries/src/db/datastore/silo.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ use crate::db::error::TransactionError;
use crate::db::fixed_data::silo::{DEFAULT_SILO, INTERNAL_SILO};
use crate::db::identity::Resource;
use crate::db::model::CollectionTypeProvisioned;
use crate::db::model::IpPoolResourceType;
use crate::db::model::Name;
use crate::db::model::Silo;
use crate::db::model::VirtualProvisioningCollection;
Expand Down Expand Up @@ -547,6 +548,23 @@ impl DataStore {

debug!(opctx.log, "deleted {} silo IdPs for silo {}", updated_rows, id);

// delete IP pool links (not IP pools, just the links)
use db::schema::ip_pool_resource;

let updated_rows = diesel::delete(ip_pool_resource::table)
.filter(ip_pool_resource::resource_id.eq(id))
.filter(
ip_pool_resource::resource_type.eq(IpPoolResourceType::Silo),
)
.execute_async(&*conn)
.await
.map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?;

debug!(
opctx.log,
"deleted {} IP pool links for silo {}", updated_rows, id
);

Ok(())
}
}
38 changes: 26 additions & 12 deletions nexus/tests/integration_tests/ip_pools.rs
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,32 @@ async fn test_ip_pool_list_dedupe(cptestctx: &ControlPlaneTestContext) {
let silo3_pools = pools_for_silo(client, "silo3").await;
assert_eq!(silo3_pools.len(), 1);
assert_eq!(silo3_pools[0].identity.name, "pool1");

// this is a great spot to check that deleting a pool cleans up the links!

// first we have to delete the range, otherwise delete will fail
let url = "/v1/system/ip-pools/pool1/ranges/remove";
NexusRequest::new(
RequestBuilder::new(client, Method::POST, url)
.body(Some(&range1))
.expect_status(Some(StatusCode::NO_CONTENT)),
)
.authn_as(AuthnMode::PrivilegedUser)
.execute()
.await
.expect("Failed to delete IP range from a pool");

object_delete(client, "/v1/system/ip-pools/pool1").await;

let silo1_pools = pools_for_silo(client, "silo1").await;
assert_eq!(silo1_pools.len(), 1);
assert_eq!(silo1_pools[0].id(), pool2.id());

let silo2_pools = pools_for_silo(client, "silo2").await;
assert_eq!(silo2_pools.len(), 0);

let silo3_pools = pools_for_silo(client, "silo3").await;
assert_eq!(silo3_pools.len(), 0);
}

/// The internal IP pool, defined by its association with the internal silo,
Expand Down Expand Up @@ -474,18 +500,6 @@ async fn test_ip_pool_silo_link(cptestctx: &ControlPlaneTestContext) {
.await;
assert_eq!(error.error_code.unwrap(), "ObjectAlreadyExists");

// pool delete fails because it is linked to a silo
let error = object_delete_error(
client,
"/v1/system/ip-pools/p1",
StatusCode::BAD_REQUEST,
)
.await;
assert_eq!(
error.message,
"IP Pool cannot be deleted while it is linked to a silo",
);

// unlink p1 from silo (doesn't matter that it's a default)
let url = format!("/v1/system/ip-pools/p1/silos/{}", cptestctx.silo_name);
object_delete(client, &url).await;
Expand Down
105 changes: 103 additions & 2 deletions nexus/tests/integration_tests/silos.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,17 @@ use nexus_db_queries::db::identity::Asset;
use nexus_db_queries::db::lookup::LookupPath;
use nexus_test_utils::http_testing::{AuthnMode, NexusRequest, RequestBuilder};
use nexus_test_utils::resource_helpers::{
create_local_user, create_project, create_silo, grant_iam, object_create,
objects_list_page_authz, projects_list,
create_ip_pool, create_local_user, create_project, create_silo, grant_iam,
link_ip_pool, object_create, object_delete, objects_list_page_authz,
projects_list,
};
use nexus_test_utils_macros::nexus_test;
use nexus_types::external_api::views::Certificate;
use nexus_types::external_api::views::{
self, IdentityProvider, Project, SamlIdentityProvider, Silo,
};
use nexus_types::external_api::{params, shared};
use omicron_common::address::{IpRange, Ipv4Range};
use omicron_common::api::external::ObjectIdentity;
use omicron_common::api::external::{
IdentityMetadataCreateParams, LookupType, Name,
Expand Down Expand Up @@ -2526,3 +2528,102 @@ async fn test_silo_admin_can_create_certs(cptestctx: &ControlPlaneTestContext) {
assert_eq!(silo_certs.len(), 1);
assert_eq!(silo_certs[0].identity.id, cert.identity.id);
}

// Test that silo delete cleans up associated groups
#[nexus_test]
async fn test_silo_delete_cleans_up_ip_pool_links(
cptestctx: &ControlPlaneTestContext,
) {
let client = &cptestctx.external_client;

// Create a silo
let silo1 =
create_silo(&client, "silo1", true, shared::SiloIdentityMode::SamlJit)
.await;
let silo2 =
create_silo(&client, "silo2", true, shared::SiloIdentityMode::SamlJit)
.await;

// link pool1 to both, link pool2 to silo1 only
let range1 = IpRange::V4(
Ipv4Range::new(
std::net::Ipv4Addr::new(10, 0, 0, 51),
std::net::Ipv4Addr::new(10, 0, 0, 52),
)
.unwrap(),
);
create_ip_pool(client, "pool1", Some(range1)).await;
link_ip_pool(client, "pool1", &silo1.identity.id, true).await;
link_ip_pool(client, "pool1", &silo2.identity.id, true).await;

let range2 = IpRange::V4(
Ipv4Range::new(
std::net::Ipv4Addr::new(10, 0, 0, 53),
std::net::Ipv4Addr::new(10, 0, 0, 54),
)
.unwrap(),
);
create_ip_pool(client, "pool2", Some(range2)).await;
link_ip_pool(client, "pool2", &silo1.identity.id, false).await;

// we want to make sure the links are there before we make sure they're gone
let url = "/v1/system/ip-pools/pool1/silos";
let links =
objects_list_page_authz::<views::IpPoolSiloLink>(client, &url).await;
assert_eq!(links.items.len(), 2);

let url = "/v1/system/ip-pools/pool2/silos";
let links =
objects_list_page_authz::<views::IpPoolSiloLink>(client, &url).await;
assert_eq!(links.items.len(), 1);

// Delete the silo
let url = format!("/v1/system/silos/{}", silo1.identity.id);
object_delete(client, &url).await;

// Now make sure the links are gone
let url = "/v1/system/ip-pools/pool1/silos";
let links =
objects_list_page_authz::<views::IpPoolSiloLink>(client, &url).await;
assert_eq!(links.items.len(), 1);

let url = "/v1/system/ip-pools/pool2/silos";
let links =
objects_list_page_authz::<views::IpPoolSiloLink>(client, &url).await;
assert_eq!(links.items.len(), 0);

// but the pools are of course still there
let url = "/v1/system/ip-pools";
let pools = objects_list_page_authz::<views::IpPool>(client, &url).await;
assert_eq!(pools.items.len(), 2);
assert_eq!(pools.items[0].identity.name, "pool1");
assert_eq!(pools.items[1].identity.name, "pool2");

// nothing prevents us from deleting the pools (except the child ranges --
// we do have to remove those)

let url = "/v1/system/ip-pools/pool1/ranges/remove";
NexusRequest::new(
RequestBuilder::new(client, Method::POST, url)
.body(Some(&range1))
.expect_status(Some(StatusCode::NO_CONTENT)),
)
.authn_as(AuthnMode::PrivilegedUser)
.execute()
.await
.expect("Failed to delete IP range from a pool");

let url = "/v1/system/ip-pools/pool2/ranges/remove";
NexusRequest::new(
RequestBuilder::new(client, Method::POST, url)
.body(Some(&range2))
.expect_status(Some(StatusCode::NO_CONTENT)),
)
.authn_as(AuthnMode::PrivilegedUser)
.execute()
.await
.expect("Failed to delete IP range from a pool");

object_delete(client, "/v1/system/ip-pools/pool1").await;
object_delete(client, "/v1/system/ip-pools/pool2").await;
}
7 changes: 7 additions & 0 deletions schema/crdb/25.0.0/up.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
-- created solely to prevent a table scan when we delete links on silo delete
CREATE INDEX IF NOT EXISTS ip_pool_resource_id ON omicron.public.ip_pool_resource (
resource_id
);
CREATE INDEX IF NOT EXISTS ip_pool_resource_ip_pool_id ON omicron.public.ip_pool_resource (
ip_pool_id
);
10 changes: 9 additions & 1 deletion schema/crdb/dbinit.sql
Original file line number Diff line number Diff line change
Expand Up @@ -1604,6 +1604,14 @@ CREATE UNIQUE INDEX IF NOT EXISTS one_default_ip_pool_per_resource ON omicron.pu
) where
is_default = true;

-- created solely to prevent a table scan when we delete links on silo delete
CREATE INDEX IF NOT EXISTS ip_pool_resource_id ON omicron.public.ip_pool_resource (
resource_id
);
CREATE INDEX IF NOT EXISTS ip_pool_resource_ip_pool_id ON omicron.public.ip_pool_resource (
ip_pool_id
);

/*
* IP Pools are made up of a set of IP ranges, which are start/stop addresses.
* Note that these need not be CIDR blocks or well-behaved subnets with a
Expand Down Expand Up @@ -3258,7 +3266,7 @@ INSERT INTO omicron.public.db_metadata (
version,
target_version
) VALUES
( TRUE, NOW(), NOW(), '24.0.0', NULL)
( TRUE, NOW(), NOW(), '25.0.0', NULL)
ON CONFLICT DO NOTHING;

COMMIT;

0 comments on commit 8183138

Please sign in to comment.