From 0c9dbaef15b4528760f64be31cac74f1d8b33899 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 14 May 2024 11:29:37 -0700 Subject: [PATCH 1/9] Update Rust crate ref-cast to v1.0.23 (#5762) --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6cad030295..9b3688be5e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7512,18 +7512,18 @@ dependencies = [ [[package]] name = "ref-cast" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4846d4c50d1721b1a3bef8af76924eef20d5e723647333798c1b519b3a9473f" +checksum = "ccf0a6f84d5f1d581da8b41b47ec8600871962f2a528115b542b362d4b744931" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fddb4f8d99b0a2ebafc65a87a69a7b9875e4b1ae1f00db265d300ef7f28bccc" +checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", From 559ac47c9152fc103f05b1b068da11b6ec6a619e Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 14 May 2024 11:29:47 -0700 Subject: [PATCH 2/9] Update Rust crate proc-macro2 to v1.0.82 (#5761) --- Cargo.lock | 4 ++-- workspace-hack/Cargo.toml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9b3688be5e..38cb54142e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6979,9 +6979,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" +checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" dependencies = [ "unicode-ident", ] diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 75af094200..43392665c7 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -83,7 +83,7 @@ petgraph = { version = "0.6.5", features = ["serde-1"] } postgres-types = { version = "0.2.6", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } ppv-lite86 = { version = "0.2.17", default-features = false, features = ["simd", "std"] } predicates = { version = "3.1.0" } -proc-macro2 = { version = "1.0.81" } +proc-macro2 = { version = "1.0.82" } rand = { version = "0.8.5" } rand_chacha = { version = "0.3.1", default-features = false, features = ["std"] } regex = { version = "1.10.4" } @@ -190,7 +190,7 @@ petgraph = { version = "0.6.5", features = ["serde-1"] } postgres-types = { version = "0.2.6", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } ppv-lite86 = { version = "0.2.17", default-features = false, features = ["simd", "std"] } predicates = { version = "3.1.0" } -proc-macro2 = { version = "1.0.81" } +proc-macro2 = { version = "1.0.82" } rand = { version = "0.8.5" } rand_chacha = { version = "0.3.1", default-features = false, features = ["std"] } regex = { version = "1.10.4" } From 3dd9ec322f19c3832ae4d8f0ca504ed2efb81f97 Mon Sep 17 00:00:00 2001 From: bnaecker Date: Tue, 14 May 2024 11:35:34 -0700 Subject: [PATCH 3/9] Add the server kind to the Nexus API context (#5751) - Adds a server kind enum, used to distinguish which API server is running any particular handler. - Wraps the existing `ServerContext` into a higher-level `ApiContext` type, which includes the former in addition to the kind of server handling the request. - Fixes #5735 --- .../reconfigurator-cli/tests/test_basic.rs | 2 +- .../reconfigurator/execution/src/datasets.rs | 2 +- nexus/reconfigurator/execution/src/dns.rs | 2 +- .../execution/src/external_networking.rs | 4 +- .../execution/src/omicron_physical_disks.rs | 2 +- .../execution/src/omicron_zones.rs | 2 +- nexus/src/app/allow_list.rs | 13 +- .../src/app/background/blueprint_execution.rs | 2 +- nexus/src/app/background/blueprint_load.rs | 2 +- nexus/src/app/background/common.rs | 6 +- nexus/src/app/background/dns_config.rs | 2 +- nexus/src/app/background/dns_propagation.rs | 2 +- .../src/app/background/external_endpoints.rs | 2 +- nexus/src/app/background/init.rs | 2 +- .../app/background/inventory_collection.rs | 4 +- .../src/app/background/metrics_producer_gc.rs | 2 +- nexus/src/app/external_endpoints.rs | 4 +- nexus/src/app/mod.rs | 10 + nexus/src/app/sagas/disk_create.rs | 14 +- nexus/src/app/sagas/disk_delete.rs | 8 +- nexus/src/app/sagas/instance_create.rs | 10 +- nexus/src/app/sagas/instance_delete.rs | 13 +- nexus/src/app/sagas/instance_ip_attach.rs | 10 +- nexus/src/app/sagas/instance_ip_detach.rs | 10 +- nexus/src/app/sagas/instance_migrate.rs | 4 +- nexus/src/app/sagas/instance_start.rs | 8 +- nexus/src/app/sagas/project_create.rs | 6 +- nexus/src/app/sagas/snapshot_create.rs | 14 +- nexus/src/app/sagas/test_helpers.rs | 20 +- nexus/src/app/sagas/test_saga.rs | 2 +- nexus/src/app/sagas/vpc_create.rs | 10 +- nexus/src/context.rs | 76 +- nexus/src/external_api/console_api.rs | 90 +- nexus/src/external_api/device_auth.rs | 25 +- nexus/src/external_api/http_entrypoints.rs | 1919 ++++++++++++----- nexus/src/internal_api/http_entrypoints.rs | 149 +- nexus/src/lib.rs | 75 +- nexus/test-interface/src/lib.rs | 1 + nexus/test-utils/src/lib.rs | 13 + nexus/tests/integration_tests/allow_list.rs | 7 +- nexus/tests/integration_tests/disks.rs | 30 +- nexus/tests/integration_tests/external_ips.rs | 8 +- nexus/tests/integration_tests/instances.rs | 42 +- nexus/tests/integration_tests/ip_pools.rs | 4 +- nexus/tests/integration_tests/metrics.rs | 8 +- nexus/tests/integration_tests/pantry.rs | 16 +- nexus/tests/integration_tests/rack.rs | 7 +- nexus/tests/integration_tests/saml.rs | 6 +- nexus/tests/integration_tests/silo_users.rs | 4 +- nexus/tests/integration_tests/silos.rs | 26 +- nexus/tests/integration_tests/sleds.rs | 2 +- nexus/tests/integration_tests/snapshots.rs | 16 +- .../integration_tests/subnet_allocation.rs | 8 +- .../integration_tests/volume_management.rs | 34 +- nexus/tests/integration_tests/vpc_subnets.rs | 2 +- 55 files changed, 1834 insertions(+), 928 deletions(-) diff --git a/dev-tools/reconfigurator-cli/tests/test_basic.rs b/dev-tools/reconfigurator-cli/tests/test_basic.rs index a8fd91f156..1ae78487a3 100644 --- a/dev-tools/reconfigurator-cli/tests/test_basic.rs +++ b/dev-tools/reconfigurator-cli/tests/test_basic.rs @@ -56,7 +56,7 @@ type ControlPlaneTestContext = #[nexus_test] async fn test_blueprint_edit(cptestctx: &ControlPlaneTestContext) { // Setup - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let log = &cptestctx.logctx.log; let opctx = OpContext::for_background( diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index 6e4286f9db..c4f5cbae82 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -147,7 +147,7 @@ mod tests { const TEST_NAME: &str = "test_ensure_crucible_dataset_records_exist"; // Set up. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/reconfigurator/execution/src/dns.rs b/nexus/reconfigurator/execution/src/dns.rs index 1760421dee..4223652b00 100644 --- a/nexus/reconfigurator/execution/src/dns.rs +++ b/nexus/reconfigurator/execution/src/dns.rs @@ -1126,7 +1126,7 @@ mod test { async fn test_silos_external_dns_end_to_end( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let log = &cptestctx.logctx.log; let opctx = OpContext::for_background( diff --git a/nexus/reconfigurator/execution/src/external_networking.rs b/nexus/reconfigurator/execution/src/external_networking.rs index 40ad65816e..cff912c137 100644 --- a/nexus/reconfigurator/execution/src/external_networking.rs +++ b/nexus/reconfigurator/execution/src/external_networking.rs @@ -883,7 +883,7 @@ mod tests { cptestctx: &ControlPlaneTestContext, ) { // Set up. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), @@ -1141,7 +1141,7 @@ mod tests { cptestctx: &ControlPlaneTestContext, ) { // Set up. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs index 89287713c2..ab0c5cab45 100644 --- a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs +++ b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs @@ -149,7 +149,7 @@ mod test { #[nexus_test] async fn test_deploy_omicron_disks(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/reconfigurator/execution/src/omicron_zones.rs b/nexus/reconfigurator/execution/src/omicron_zones.rs index 3248269175..68c1455ee4 100644 --- a/nexus/reconfigurator/execution/src/omicron_zones.rs +++ b/nexus/reconfigurator/execution/src/omicron_zones.rs @@ -139,7 +139,7 @@ mod test { #[nexus_test] async fn test_deploy_omicron_zones(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/allow_list.rs b/nexus/src/app/allow_list.rs index 8f53db68a2..6b32f0c6f3 100644 --- a/nexus/src/app/allow_list.rs +++ b/nexus/src/app/allow_list.rs @@ -13,6 +13,8 @@ use omicron_common::api::external; use omicron_common::api::external::Error; use std::net::IpAddr; +use crate::context::ServerKind; + impl super::Nexus { /// Fetch the allowlist of source IPs that can reach user-facing services. pub async fn allow_list_view( @@ -30,6 +32,7 @@ impl super::Nexus { &self, opctx: &OpContext, remote_addr: IpAddr, + server_kind: ServerKind, params: params::AllowListUpdate, ) -> Result { if let external::AllowedSourceIps::List(list) = ¶ms.allowed_ips { @@ -50,6 +53,14 @@ impl super::Nexus { // the request came from is on the allowlist. This is our only real // guardrail to prevent accidentally preventing any future access to // the rack! + // + // Note that we elide this check when handling a request proxied + // from `wicketd`. This is intentional and used as a safety + // mechanism in the even of lockout or other recovery scenarios. + let check_remote_addr = match server_kind { + ServerKind::External => true, + ServerKind::Techport | ServerKind::Internal => false, + }; let mut contains_remote = false; for entry in list.iter() { contains_remote |= entry.contains(remote_addr); @@ -67,7 +78,7 @@ impl super::Nexus { )); } } - if !contains_remote { + if check_remote_addr && !contains_remote { return Err(Error::invalid_request( "The source IP allow list would prevent access \ from the current client! Ensure that the allowlist \ diff --git a/nexus/src/app/background/blueprint_execution.rs b/nexus/src/app/background/blueprint_execution.rs index 1291e72a9b..2ac1b3fd35 100644 --- a/nexus/src/app/background/blueprint_execution.rs +++ b/nexus/src/app/background/blueprint_execution.rs @@ -178,7 +178,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_deploy_omicron_zones(cptestctx: &ControlPlaneTestContext) { // Set up the test. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_background( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/blueprint_load.rs b/nexus/src/app/background/blueprint_load.rs index 8334abecb5..cda1d07fcb 100644 --- a/nexus/src/app/background/blueprint_load.rs +++ b/nexus/src/app/background/blueprint_load.rs @@ -233,7 +233,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_load_blueprints(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/common.rs b/nexus/src/app/background/common.rs index 0fbfa9938d..da595dc4e1 100644 --- a/nexus/src/app/background/common.rs +++ b/nexus/src/app/background/common.rs @@ -533,7 +533,7 @@ mod test { // activated #[nexus_test(server = crate::Server)] async fn test_driver_basic(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), @@ -698,7 +698,7 @@ mod test { // activated. #[nexus_test(server = crate::Server)] async fn test_activation_in_progress(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), @@ -843,7 +843,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_saga_request_flow(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/dns_config.rs b/nexus/src/app/background/dns_config.rs index be18ac3612..71e0a812a7 100644 --- a/nexus/src/app/background/dns_config.rs +++ b/nexus/src/app/background/dns_config.rs @@ -175,7 +175,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_basic(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/dns_propagation.rs b/nexus/src/app/background/dns_propagation.rs index cf7a399999..7d650f6f27 100644 --- a/nexus/src/app/background/dns_propagation.rs +++ b/nexus/src/app/background/dns_propagation.rs @@ -196,7 +196,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_basic(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/external_endpoints.rs b/nexus/src/app/background/external_endpoints.rs index ed530e0775..1a587298d5 100644 --- a/nexus/src/app/background/external_endpoints.rs +++ b/nexus/src/app/background/external_endpoints.rs @@ -131,7 +131,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_basic(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/init.rs b/nexus/src/app/background/init.rs index 9d9a65c23b..d2f940018d 100644 --- a/nexus/src/app/background/init.rs +++ b/nexus/src/app/background/init.rs @@ -503,7 +503,7 @@ pub mod test { // the new DNS configuration #[nexus_test(server = crate::Server)] async fn test_dns_propagation_basic(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/inventory_collection.rs b/nexus/src/app/background/inventory_collection.rs index 7455a14afb..52ee8f6e13 100644 --- a/nexus/src/app/background/inventory_collection.rs +++ b/nexus/src/app/background/inventory_collection.rs @@ -214,7 +214,7 @@ mod test { // collections, too. #[nexus_test(server = crate::Server)] async fn test_basic(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), @@ -328,7 +328,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_db_sled_enumerator(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/metrics_producer_gc.rs b/nexus/src/app/background/metrics_producer_gc.rs index 7bd2bd6c8c..1e3b070249 100644 --- a/nexus/src/app/background/metrics_producer_gc.rs +++ b/nexus/src/app/background/metrics_producer_gc.rs @@ -170,7 +170,7 @@ mod tests { #[nexus_test(server = crate::Server)] async fn test_pruning(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/external_endpoints.rs b/nexus/src/app/external_endpoints.rs index db87632bbf..18d2399eb5 100644 --- a/nexus/src/app/external_endpoints.rs +++ b/nexus/src/app/external_endpoints.rs @@ -26,7 +26,7 @@ //! "certificate resolver" object that impls //! [`rustls::server::ResolvesServerCert`]. See [`NexusCertResolver`]. -use crate::ServerContext; +use crate::context::ApiContext; use anyhow::anyhow; use anyhow::bail; use anyhow::Context; @@ -674,7 +674,7 @@ impl super::Nexus { /// case, we'll choose an arbitrary Silo. pub fn endpoint_for_request( &self, - rqctx: &dropshot::RequestContext>, + rqctx: &dropshot::RequestContext, ) -> Result, Error> { let log = &rqctx.log; let rqinfo = &rqctx.request; diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 2807f77455..a7f12d30cd 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -657,6 +657,16 @@ impl Nexus { .map(|server| server.local_addr()) } + pub(crate) async fn get_techport_server_address( + &self, + ) -> Option { + self.techport_external_server + .lock() + .unwrap() + .as_ref() + .map(|server| server.local_addr()) + } + pub(crate) async fn get_internal_server_address( &self, ) -> Option { diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index 165bf7573c..5c4f5bf1ee 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -882,7 +882,7 @@ pub(crate) mod test { pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } @@ -893,7 +893,7 @@ pub(crate) mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_project(&client, PROJECT_NAME).await.identity.id; @@ -1033,7 +1033,7 @@ pub(crate) mod test { test: &DiskTest, ) { let sled_agent = &cptestctx.sled_agent.sled_agent; - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); crate::app::sagas::test_helpers::assert_no_failed_undo_steps( &cptestctx.logctx.log, @@ -1063,7 +1063,7 @@ pub(crate) mod test { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_project(&client, PROJECT_NAME).await.identity.id; let opctx = test_opctx(cptestctx); @@ -1093,7 +1093,7 @@ pub(crate) mod test { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx.nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_project(&client, PROJECT_NAME).await.identity.id; let opctx = test_opctx(&cptestctx); @@ -1111,7 +1111,7 @@ pub(crate) mod test { } async fn destroy_disk(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx.nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let disk_selector = params::DiskSelector { project: Some( @@ -1134,7 +1134,7 @@ pub(crate) mod test { let test = DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx.nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_project(&client, PROJECT_NAME).await.identity.id; diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index 333e6c1672..24cf331a34 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -201,12 +201,12 @@ pub(crate) mod test { pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx.nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } async fn create_disk(cptestctx: &ControlPlaneTestContext) -> Disk { - let nexus = &cptestctx.server.apictx.nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let project_selector = params::ProjectSelector { @@ -232,7 +232,7 @@ pub(crate) mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx.nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_project(client, PROJECT_NAME).await.identity.id; let disk = create_disk(&cptestctx).await; @@ -258,7 +258,7 @@ pub(crate) mod test { let test = DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx.nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_project(client, PROJECT_NAME).await.identity.id; let disk = create_disk(&cptestctx).await; diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 73fe910c76..a6df7183d1 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -1137,7 +1137,7 @@ pub mod test { ) { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_project_and_disk(&client).await; // Build the saga DAG with the provided test parameters @@ -1264,7 +1264,7 @@ pub mod test { cptestctx: &ControlPlaneTestContext, ) { let sled_agent = &cptestctx.sled_agent.sled_agent; - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); // Check that no partial artifacts of instance creation exist assert!(no_instance_records_exist(datastore).await); @@ -1300,7 +1300,7 @@ pub mod test { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_project_and_disk(&client).await; // Build the saga DAG with the provided test parameters @@ -1329,7 +1329,7 @@ pub mod test { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_project_and_disk(&client).await; let opctx = test_helpers::test_opctx(&cptestctx); @@ -1353,7 +1353,7 @@ pub mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_project_and_disk(&client).await; // Build the saga DAG with the provided test parameters diff --git a/nexus/src/app/sagas/instance_delete.rs b/nexus/src/app/sagas/instance_delete.rs index 0e253913b0..d93c1455ad 100644 --- a/nexus/src/app/sagas/instance_delete.rs +++ b/nexus/src/app/sagas/instance_delete.rs @@ -210,7 +210,7 @@ mod test { instance_id: Uuid, ) -> Params { let opctx = test_opctx(&cptestctx); - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); let (.., authz_instance, instance) = LookupPath::new(&opctx, &datastore) @@ -253,7 +253,7 @@ mod test { pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } @@ -263,7 +263,7 @@ mod test { ) { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; create_org_project_and_disk(&client).await; // Build the saga DAG with the provided test parameters @@ -290,7 +290,7 @@ mod test { cptestctx: &ControlPlaneTestContext, params: params::InstanceCreate, ) -> db::model::Instance { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let project_selector = params::ProjectSelector { @@ -304,7 +304,8 @@ mod test { .await .unwrap(); - let datastore = cptestctx.server.apictx().nexus.datastore().clone(); + let datastore = + cptestctx.server.server_context().nexus.datastore().clone(); let (.., db_instance) = LookupPath::new(&opctx, &datastore) .instance_id(instance_state.instance().id()) .fetch() @@ -321,7 +322,7 @@ mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; create_org_project_and_disk(&client).await; // Build the saga DAG with the provided test parameters diff --git a/nexus/src/app/sagas/instance_ip_attach.rs b/nexus/src/app/sagas/instance_ip_attach.rs index 3cd6ac1c46..3332b71274 100644 --- a/nexus/src/app/sagas/instance_ip_attach.rs +++ b/nexus/src/app/sagas/instance_ip_attach.rs @@ -410,7 +410,7 @@ pub(crate) mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let sled_agent = &cptestctx.sled_agent.sled_agent; @@ -460,7 +460,7 @@ pub(crate) mod test { use nexus_db_queries::db::schema::external_ip::dsl; let sled_agent = &cptestctx.sled_agent.sled_agent; - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); let conn = datastore.pool_connection_for_tests().await.unwrap(); @@ -500,7 +500,7 @@ pub(crate) mod test { ) { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let opctx = test_helpers::test_opctx(cptestctx); @@ -526,7 +526,7 @@ pub(crate) mod test { ) { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let opctx = test_helpers::test_opctx(cptestctx); @@ -555,7 +555,7 @@ pub(crate) mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let opctx = test_helpers::test_opctx(cptestctx); diff --git a/nexus/src/app/sagas/instance_ip_detach.rs b/nexus/src/app/sagas/instance_ip_detach.rs index 7a71824376..2f1d76c853 100644 --- a/nexus/src/app/sagas/instance_ip_detach.rs +++ b/nexus/src/app/sagas/instance_ip_detach.rs @@ -381,7 +381,7 @@ pub(crate) mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let sled_agent = &cptestctx.sled_agent.sled_agent; @@ -425,7 +425,7 @@ pub(crate) mod test { let opctx = test_helpers::test_opctx(cptestctx); let sled_agent = &cptestctx.sled_agent.sled_agent; - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); let conn = datastore.pool_connection_for_tests().await.unwrap(); @@ -475,7 +475,7 @@ pub(crate) mod test { ) { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let opctx = test_helpers::test_opctx(cptestctx); @@ -503,7 +503,7 @@ pub(crate) mod test { ) { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let opctx = test_helpers::test_opctx(cptestctx); @@ -534,7 +534,7 @@ pub(crate) mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let opctx = test_helpers::test_opctx(cptestctx); diff --git a/nexus/src/app/sagas/instance_migrate.rs b/nexus/src/app/sagas/instance_migrate.rs index a727debbea..1cfd170faf 100644 --- a/nexus/src/app/sagas/instance_migrate.rs +++ b/nexus/src/app/sagas/instance_migrate.rs @@ -614,7 +614,7 @@ mod tests { ) { let other_sleds = add_sleds(cptestctx, 1).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); @@ -658,7 +658,7 @@ mod tests { let log = &cptestctx.logctx.log; let other_sleds = add_sleds(cptestctx, 1).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); diff --git a/nexus/src/app/sagas/instance_start.rs b/nexus/src/app/sagas/instance_start.rs index 55c00d8707..b76bc2e37d 100644 --- a/nexus/src/app/sagas/instance_start.rs +++ b/nexus/src/app/sagas/instance_start.rs @@ -767,7 +767,7 @@ mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); let instance = create_instance(client).await; @@ -806,7 +806,7 @@ mod test { ) { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); let instance = create_instance(client).await; @@ -868,7 +868,7 @@ mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); let instance = create_instance(client).await; @@ -910,7 +910,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_ensure_running_unwind(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); let instance = create_instance(client).await; diff --git a/nexus/src/app/sagas/project_create.rs b/nexus/src/app/sagas/project_create.rs index b31dd821f0..6893590519 100644 --- a/nexus/src/app/sagas/project_create.rs +++ b/nexus/src/app/sagas/project_create.rs @@ -188,7 +188,7 @@ mod test { fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } @@ -258,7 +258,7 @@ mod test { async fn test_saga_basic_usage_succeeds( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; // Before running the test, confirm we have no records of any projects. verify_clean_slate(nexus.datastore()).await; @@ -279,7 +279,7 @@ mod test { cptestctx: &ControlPlaneTestContext, ) { let log = &cptestctx.logctx.log; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); crate::app::sagas::test_helpers::action_failure_can_unwind::< SagaProjectCreate, diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index ff57470a5f..53e06e310d 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -1876,7 +1876,7 @@ mod test { pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } @@ -1889,7 +1889,7 @@ mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters @@ -1976,7 +1976,7 @@ mod test { // Verifies: // - No snapshot records exist // - No region snapshot records exist - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); assert!(no_snapshot_records_exist(datastore).await); assert!(no_region_snapshot_records_exist(datastore).await); } @@ -2016,7 +2016,7 @@ mod test { // Read out the instance's assigned sled, then poke the instance to get // it from the Starting state to the Running state so the test disk can // be snapshotted. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let (.., authz_instance) = LookupPath::new(&opctx, nexus.datastore()) .instance_id(instance.identity.id) @@ -2080,7 +2080,7 @@ mod test { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters @@ -2219,7 +2219,7 @@ mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters @@ -2324,7 +2324,7 @@ mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters diff --git a/nexus/src/app/sagas/test_helpers.rs b/nexus/src/app/sagas/test_helpers.rs index 1b383d27bb..bacd0f1c9d 100644 --- a/nexus/src/app/sagas/test_helpers.rs +++ b/nexus/src/app/sagas/test_helpers.rs @@ -34,7 +34,7 @@ type ControlPlaneTestContext = pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } @@ -42,7 +42,7 @@ pub(crate) async fn instance_start( cptestctx: &ControlPlaneTestContext, id: &Uuid, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let instance_selector = nexus_types::external_api::params::InstanceSelector { @@ -62,7 +62,7 @@ pub(crate) async fn instance_stop( cptestctx: &ControlPlaneTestContext, id: &Uuid, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let instance_selector = nexus_types::external_api::params::InstanceSelector { @@ -83,7 +83,7 @@ pub(crate) async fn instance_stop_by_name( name: &str, project_name: &str, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let instance_selector = nexus_types::external_api::params::InstanceSelector { @@ -104,7 +104,7 @@ pub(crate) async fn instance_delete_by_name( name: &str, project_name: &str, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let instance_selector = nexus_types::external_api::params::InstanceSelector { @@ -126,7 +126,7 @@ pub(crate) async fn instance_simulate( ) { info!(&cptestctx.logctx.log, "Poking simulated instance"; "instance_id" => %instance_id); - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let sa = nexus .instance_sled_by_id(instance_id) .await @@ -145,7 +145,7 @@ pub(crate) async fn instance_simulate_by_name( "instance_name" => %name, "project_name" => %project_name); - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let instance_selector = nexus_types::external_api::params::InstanceSelector { @@ -168,7 +168,7 @@ pub async fn instance_fetch( cptestctx: &ControlPlaneTestContext, instance_id: Uuid, ) -> InstanceAndActiveVmm { - let datastore = cptestctx.server.apictx().nexus.datastore().clone(); + let datastore = cptestctx.server.server_context().nexus.datastore().clone(); let opctx = test_opctx(&cptestctx); let (.., authz_instance) = LookupPath::new(&opctx, &datastore) .instance_id(instance_id) @@ -194,7 +194,7 @@ pub async fn no_virtual_provisioning_resource_records_exist( use nexus_db_queries::db::model::VirtualProvisioningResource; use nexus_db_queries::db::schema::virtual_provisioning_resource::dsl; - let datastore = cptestctx.server.apictx().nexus.datastore().clone(); + let datastore = cptestctx.server.server_context().nexus.datastore().clone(); let conn = datastore.pool_connection_for_tests().await.unwrap(); datastore @@ -223,7 +223,7 @@ pub async fn no_virtual_provisioning_collection_records_using_instances( use nexus_db_queries::db::model::VirtualProvisioningCollection; use nexus_db_queries::db::schema::virtual_provisioning_collection::dsl; - let datastore = cptestctx.server.apictx().nexus.datastore().clone(); + let datastore = cptestctx.server.server_context().nexus.datastore().clone(); let conn = datastore.pool_connection_for_tests().await.unwrap(); datastore diff --git a/nexus/src/app/sagas/test_saga.rs b/nexus/src/app/sagas/test_saga.rs index 0520a17602..9ccdc4aebc 100644 --- a/nexus/src/app/sagas/test_saga.rs +++ b/nexus/src/app/sagas/test_saga.rs @@ -75,7 +75,7 @@ type ControlPlaneTestContext = #[nexus_test(server = crate::Server)] async fn test_saga_stuck(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let params = Params {}; let dag = create_saga_dag::(params).unwrap(); let runnable_saga = nexus.create_runnable_saga(dag.clone()).await.unwrap(); diff --git a/nexus/src/app/sagas/vpc_create.rs b/nexus/src/app/sagas/vpc_create.rs index 6b48e4087a..fdd117b850 100644 --- a/nexus/src/app/sagas/vpc_create.rs +++ b/nexus/src/app/sagas/vpc_create.rs @@ -496,7 +496,7 @@ pub(crate) mod test { fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } @@ -505,7 +505,7 @@ pub(crate) mod test { project_id: Uuid, action: authz::Action, ) -> authz::Project { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_selector = params::ProjectSelector { project: NameOrId::Id(project_id) }; let opctx = test_opctx(&cptestctx); @@ -523,7 +523,7 @@ pub(crate) mod test { project_id: Uuid, ) { let opctx = test_opctx(&cptestctx); - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); let default_name = Name::try_from("default".to_string()).unwrap(); let system_name = Name::try_from("system".to_string()).unwrap(); @@ -710,7 +710,7 @@ pub(crate) mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_and_project(&client).await; delete_project_vpc_defaults(&cptestctx, project_id).await; @@ -740,7 +740,7 @@ pub(crate) mod test { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_and_project(&client).await; delete_project_vpc_defaults(&cptestctx, project_id).await; diff --git a/nexus/src/context.rs b/nexus/src/context.rs index cf2b9d6f17..72ecd6b8ac 100644 --- a/nexus/src/context.rs +++ b/nexus/src/context.rs @@ -29,6 +29,61 @@ use std::str::FromStr; use std::sync::Arc; use uuid::Uuid; +/// Indicates the kind of HTTP server. +#[derive(Clone, Copy)] +pub enum ServerKind { + /// This serves the internal API. + Internal, + /// This serves the external API over the normal public network. + External, + /// This serves the external API proxied over the technician port. + Techport, +} + +/// The API context for each distinct Dropshot server. +/// +/// This packages up the main server context, which is shared by all API servers +/// (e.g., internal, external, and techport). It also includes the +/// [`ServerKind`], which makes it possible to know which server is handling any +/// particular request. +#[derive(Clone)] +pub struct ApiContext { + /// The kind of server. + pub kind: ServerKind, + /// Shared state available to all endpoint handlers. + pub context: Arc, +} + +impl ApiContext { + /// Create a new context with a rack ID and logger. This creates the + /// underlying `Nexus` as well. + pub async fn for_internal( + rack_id: Uuid, + log: Logger, + config: &NexusConfig, + ) -> Result { + ServerContext::new(rack_id, log, config) + .await + .map(|context| Self { kind: ServerKind::Internal, context }) + } + + /// Clone self for use by the external Dropshot server. + pub fn for_external(&self) -> Self { + Self { kind: ServerKind::External, context: self.context.clone() } + } + + /// Clone self for use by the techport Dropshot server. + pub fn for_techport(&self) -> Self { + Self { kind: ServerKind::Techport, context: self.context.clone() } + } +} + +impl std::borrow::Borrow for ApiContext { + fn borrow(&self) -> &ServerContext { + &self.context + } +} + /// Shared state available to all API request handlers pub struct ServerContext { /// reference to the underlying nexus @@ -262,18 +317,19 @@ impl ServerContext { /// Authenticates an incoming request to the external API and produces a new /// operation context for it pub(crate) async fn op_context_for_external_api( - rqctx: &dropshot::RequestContext>, + rqctx: &dropshot::RequestContext, ) -> Result { let apictx = rqctx.context(); OpContext::new_async( &rqctx.log, async { - let authn = - Arc::new(apictx.external_authn.authn_request(rqctx).await?); - let datastore = Arc::clone(apictx.nexus.datastore()); + let authn = Arc::new( + apictx.context.external_authn.authn_request(rqctx).await?, + ); + let datastore = Arc::clone(apictx.context.nexus.datastore()); let authz = authz::Context::new( Arc::clone(&authn), - Arc::clone(&apictx.authz), + Arc::clone(&apictx.context.authz), datastore, ); Ok((authn, authz)) @@ -285,17 +341,17 @@ pub(crate) async fn op_context_for_external_api( } pub(crate) async fn op_context_for_internal_api( - rqctx: &dropshot::RequestContext>, + rqctx: &dropshot::RequestContext, ) -> OpContext { - let apictx = rqctx.context(); + let apictx = &rqctx.context(); OpContext::new_async( &rqctx.log, async { - let authn = Arc::clone(&apictx.internal_authn); - let datastore = Arc::clone(apictx.nexus.datastore()); + let authn = Arc::clone(&apictx.context.internal_authn); + let datastore = Arc::clone(apictx.context.nexus.datastore()); let authz = authz::Context::new( Arc::clone(&authn), - Arc::clone(&apictx.authz), + Arc::clone(&apictx.context.authz), datastore, ); Ok::<_, std::convert::Infallible>((authn, authz)) diff --git a/nexus/src/external_api/console_api.rs b/nexus/src/external_api/console_api.rs index d49e7f3be4..caff195047 100644 --- a/nexus/src/external_api/console_api.rs +++ b/nexus/src/external_api/console_api.rs @@ -21,7 +21,7 @@ // toolchain; we can remove this attribute then. #![allow(clippy::declare_interior_mutable_const)] -use crate::ServerContext; +use crate::context::ApiContext; use anyhow::Context; use camino::{Utf8Path, Utf8PathBuf}; use dropshot::{ @@ -56,7 +56,6 @@ use serde_urlencoded; use std::collections::HashMap; use std::num::NonZeroU32; use std::str::FromStr; -use std::sync::Arc; use tokio::fs::File; use tokio_util::codec::{BytesCodec, FramedRead}; @@ -239,7 +238,7 @@ impl RelayState { unpublished = true, }] pub(crate) async fn login_saml_begin( - rqctx: RequestContext>, + rqctx: RequestContext, _path_params: Path, _query_params: Query, ) -> Result, HttpError> { @@ -258,13 +257,13 @@ pub(crate) async fn login_saml_begin( unpublished = true, }] pub(crate) async fn login_saml_redirect( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path_params = path_params.into_inner(); // Use opctx_external_authn because this request will be @@ -303,7 +302,11 @@ pub(crate) async fn login_saml_redirect( } }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Authenticate a user via SAML @@ -313,13 +316,13 @@ pub(crate) async fn login_saml_redirect( tags = ["login"], }] pub(crate) async fn login_saml( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, body_bytes: dropshot::UntypedBody, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path_params = path_params.into_inner(); // By definition, this request is not authenticated. These operations @@ -379,14 +382,18 @@ pub(crate) async fn login_saml( // use absolute timeout even though session might idle out first. // browser expiration is mostly for convenience, as the API will // reject requests with an expired session regardless - apictx.session_absolute_timeout(), - apictx.external_tls_enabled, + apictx.context.session_absolute_timeout(), + apictx.context.external_tls_enabled, )?; headers.append(header::SET_COOKIE, cookie); } Ok(response) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[derive(Deserialize, JsonSchema)] @@ -401,14 +408,14 @@ pub struct LoginPathParam { unpublished = true, }] pub(crate) async fn login_local_begin( - rqctx: RequestContext>, + rqctx: RequestContext, _path_params: Path, _query_params: Query, ) -> Result, HttpError> { // TODO: figure out why instrumenting doesn't work // let apictx = rqctx.context(); // let handler = async { serve_console_index(rqctx.context()).await }; - // apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + // apictx.context.external_latencies.instrument_dropshot_handler(&rqctx, handler).await serve_console_index(rqctx).await } @@ -419,13 +426,13 @@ pub(crate) async fn login_local_begin( tags = ["login"], }] pub(crate) async fn login_local( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, credentials: dropshot::TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let credentials = credentials.into_inner(); let silo = path.silo_name.into(); @@ -448,22 +455,26 @@ pub(crate) async fn login_local( // use absolute timeout even though session might idle out first. // browser expiration is mostly for convenience, as the API will // reject requests with an expired session regardless - apictx.session_absolute_timeout(), - apictx.external_tls_enabled, + apictx.context.session_absolute_timeout(), + apictx.context.external_tls_enabled, )?; headers.append(header::SET_COOKIE, cookie); } Ok(response) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } async fn create_session( opctx: &OpContext, - apictx: &ServerContext, + apictx: &ApiContext, user: Option, ) -> Result { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let session = match user { Some(user) => nexus.session_create(&opctx, user.id()).await?, None => Err(Error::Unauthenticated { @@ -483,12 +494,12 @@ async fn create_session( tags = ["hidden"], }] pub(crate) async fn logout( - rqctx: RequestContext>, + rqctx: RequestContext, cookies: Cookies, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await; let token = cookies.get(SESSION_COOKIE_COOKIE_NAME); @@ -513,14 +524,20 @@ pub(crate) async fn logout( let headers = response.headers_mut(); headers.append( header::SET_COOKIE, - clear_session_cookie_header_value(apictx.external_tls_enabled)?, + clear_session_cookie_header_value( + apictx.context.external_tls_enabled, + )?, ); }; Ok(response) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[derive(Deserialize, JsonSchema)] @@ -574,10 +591,10 @@ pub struct LoginUrlQuery { /// `redirect_uri` represents the URL to send the user back to after successful /// login, and is included in `state` query param if present async fn get_login_url( - rqctx: &RequestContext>, + rqctx: &RequestContext, redirect_uri: Option, ) -> Result { - let nexus = &rqctx.context().nexus; + let nexus = &rqctx.context().context.nexus; let endpoint = nexus.endpoint_for_request(rqctx)?; let silo = endpoint.silo(); @@ -643,7 +660,7 @@ async fn get_login_url( unpublished = true, }] pub(crate) async fn login_begin( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result { let apictx = rqctx.context(); @@ -652,11 +669,15 @@ pub(crate) async fn login_begin( let login_url = get_login_url(&rqctx, query.redirect_uri).await?; http_response_found(login_url) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } pub(crate) async fn console_index_or_login_redirect( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { let opctx = crate::context::op_context_for_external_api(&rqctx).await; @@ -692,7 +713,7 @@ macro_rules! console_page { ($name:ident, $path:literal) => { #[endpoint { method = GET, path = $path, unpublished = true, }] pub(crate) async fn $name( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { console_index_or_login_redirect(rqctx).await } @@ -704,7 +725,7 @@ macro_rules! console_page_wildcard { ($name:ident, $path:literal) => { #[endpoint { method = GET, path = $path, unpublished = true, }] pub(crate) async fn $name( - rqctx: RequestContext>, + rqctx: RequestContext, _path_params: Path, ) -> Result, HttpError> { console_index_or_login_redirect(rqctx).await @@ -784,12 +805,13 @@ const WEB_SECURITY_HEADERS: [(HeaderName, HeaderValue); 3] = [ /// file is present in the directory and `gzip` is listed in the request's /// `Accept-Encoding` header. async fn serve_static( - rqctx: RequestContext>, + rqctx: RequestContext, path: &Utf8Path, cache_control: HeaderValue, ) -> Result, HttpError> { let apictx = rqctx.context(); let static_dir = apictx + .context .console_config .static_dir .as_deref() @@ -854,7 +876,7 @@ async fn serve_static( unpublished = true, }] pub(crate) async fn asset( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { // asset URLs contain hashes, so cache for 1 year @@ -868,7 +890,7 @@ pub(crate) async fn asset( /// Serve `/index.html` via [`serve_static`]. Disallow caching. pub(crate) async fn serve_console_index( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { // do not cache this response in browser const CACHE_CONTROL: HeaderValue = HeaderValue::from_static("no-store"); diff --git a/nexus/src/external_api/device_auth.rs b/nexus/src/external_api/device_auth.rs index 1697722f6f..2aa1965e79 100644 --- a/nexus/src/external_api/device_auth.rs +++ b/nexus/src/external_api/device_auth.rs @@ -12,7 +12,7 @@ use super::console_api::console_index_or_login_redirect; use super::views::DeviceAccessTokenGrant; use crate::app::external_endpoints::authority_for_request; -use crate::ServerContext; +use crate::ApiContext; use dropshot::{ endpoint, HttpError, HttpResponseUpdatedNoContent, RequestContext, TypedBody, @@ -23,7 +23,6 @@ use nexus_db_queries::db::model::DeviceAccessToken; use omicron_common::api::external::InternalContext; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use std::sync::Arc; use uuid::Uuid; // Token granting à la RFC 8628 (OAuth 2.0 Device Authorization Grant) @@ -64,11 +63,11 @@ pub struct DeviceAuthRequest { tags = ["hidden"], // "token" }] pub(crate) async fn device_auth_request( - rqctx: RequestContext>, + rqctx: RequestContext, params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = params.into_inner(); let handler = async { let opctx = nexus.opctx_external_authn(); @@ -116,7 +115,7 @@ pub struct DeviceAuthVerify { unpublished = true, }] pub(crate) async fn device_auth_verify( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { console_index_or_login_redirect(rqctx).await } @@ -127,7 +126,7 @@ pub(crate) async fn device_auth_verify( unpublished = true, }] pub(crate) async fn device_auth_success( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { console_index_or_login_redirect(rqctx).await } @@ -143,11 +142,11 @@ pub(crate) async fn device_auth_success( tags = ["hidden"], // "token" }] pub(crate) async fn device_auth_confirm( - rqctx: RequestContext>, + rqctx: RequestContext, params: TypedBody, ) -> Result { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -163,7 +162,11 @@ pub(crate) async fn device_auth_confirm( .await?; Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] @@ -192,11 +195,11 @@ pub enum DeviceAccessTokenResponse { tags = ["hidden"], // "token" }] pub(crate) async fn device_access_token( - rqctx: RequestContext>, + rqctx: RequestContext, params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = params.into_inner(); let handler = async { // RFC 8628 §3.4 diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 0b09e2c9ab..350836441e 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -13,8 +13,7 @@ use super::{ Utilization, Vpc, VpcRouter, VpcSubnet, }, }; -use crate::external_api::shared; -use crate::ServerContext; +use crate::{context::ApiContext, external_api::shared}; use dropshot::HttpError; use dropshot::HttpResponseAccepted; use dropshot::HttpResponseCreated; @@ -95,10 +94,9 @@ use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; use std::net::IpAddr; -use std::sync::Arc; use uuid::Uuid; -type NexusApiDescription = ApiDescription>; +type NexusApiDescription = ApiDescription; /// Returns a description of the external nexus API pub(crate) fn external_api() -> NexusApiDescription { @@ -372,9 +370,9 @@ pub(crate) fn external_api() -> NexusApiDescription { endpoint: T, ) -> Result<(), String> where - T: Into>>, + T: Into>, { - let mut ep: ApiEndpoint> = endpoint.into(); + let mut ep: ApiEndpoint = endpoint.into(); // only one tag is allowed ep.tags = vec![String::from("hidden")]; ep.path = String::from("/experimental") + &ep.path; @@ -450,7 +448,7 @@ pub(crate) fn external_api() -> NexusApiDescription { tags = ["system/status"], }] async fn ping( - _rqctx: RequestContext>, + _rqctx: RequestContext, ) -> Result, HttpError> { Ok(HttpResponseOk(views::Ping { status: views::PingStatus::Ok })) } @@ -462,16 +460,20 @@ async fn ping( tags = ["policy"], }] async fn system_policy_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let policy = nexus.fleet_fetch_policy(&opctx).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update top-level IAM policy @@ -481,12 +483,12 @@ async fn system_policy_view( tags = ["policy"], }] async fn system_policy_update( - rqctx: RequestContext>, + rqctx: RequestContext, new_policy: TypedBody>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let new_policy = new_policy.into_inner(); let nasgns = new_policy.role_assignments.len(); // This should have been validated during parsing. @@ -495,7 +497,11 @@ async fn system_policy_update( let policy = nexus.fleet_update_policy(&opctx, &new_policy).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch current silo's IAM policy @@ -505,11 +511,11 @@ async fn system_policy_update( tags = ["silos"], }] pub(crate) async fn policy_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let silo: NameOrId = opctx .authn @@ -522,7 +528,11 @@ pub(crate) async fn policy_view( let policy = nexus.silo_fetch_policy(&opctx, &silo_lookup).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update current silo's IAM policy @@ -532,12 +542,12 @@ pub(crate) async fn policy_view( tags = ["silos"], }] async fn policy_update( - rqctx: RequestContext>, + rqctx: RequestContext, new_policy: TypedBody>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let new_policy = new_policy.into_inner(); let nasgns = new_policy.role_assignments.len(); // This should have been validated during parsing. @@ -554,7 +564,11 @@ async fn policy_update( nexus.silo_update_policy(&opctx, &silo_lookup, &new_policy).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch resource utilization for user's current silo @@ -564,11 +578,11 @@ async fn policy_update( tags = ["silos"], }] async fn utilization_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let silo_lookup = nexus.current_silo_lookup(&opctx)?; let utilization = @@ -576,7 +590,11 @@ async fn utilization_view( Ok(HttpResponseOk(utilization.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch current utilization for given silo @@ -586,12 +604,12 @@ async fn utilization_view( tags = ["system/silos"], }] async fn silo_utilization_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let silo_lookup = @@ -600,7 +618,11 @@ async fn silo_utilization_view( Ok(HttpResponseOk(quotas.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List current utilization state for all silos #[endpoint { @@ -609,12 +631,12 @@ async fn silo_utilization_view( tags = ["system/silos"], }] async fn silo_utilization_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; @@ -635,7 +657,11 @@ async fn silo_utilization_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Lists resource quotas for all silos @@ -645,12 +671,12 @@ async fn silo_utilization_list( tags = ["system/silos"], }] async fn system_quotas_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; @@ -669,7 +695,11 @@ async fn system_quotas_list( &|_, quota: &SiloQuotas| quota.silo_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch resource quotas for silo @@ -679,12 +709,12 @@ async fn system_quotas_list( tags = ["system/silos"], }] async fn silo_quotas_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let silo_lookup = @@ -692,7 +722,11 @@ async fn silo_quotas_view( let quota = nexus.silo_quotas_view(&opctx, &silo_lookup).await?; Ok(HttpResponseOk(quota.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update resource quotas for silo @@ -704,13 +738,13 @@ async fn silo_quotas_view( tags = ["system/silos"], }] async fn silo_quotas_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, new_quota: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let silo_lookup = @@ -720,7 +754,11 @@ async fn silo_quotas_update( .await?; Ok(HttpResponseOk(quota.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List silos @@ -732,12 +770,12 @@ async fn silo_quotas_update( tags = ["system/silos"], }] async fn silo_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -755,7 +793,11 @@ async fn silo_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create a silo @@ -765,18 +807,22 @@ async fn silo_list( tags = ["system/silos"], }] async fn silo_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_silo_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let silo = nexus.silo_create(&opctx, new_silo_params.into_inner()).await?; Ok(HttpResponseCreated(silo.try_into()?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch silo @@ -788,19 +834,23 @@ async fn silo_create( tags = ["system/silos"], }] async fn silo_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; let (.., silo) = silo_lookup.fetch().await?; Ok(HttpResponseOk(silo.try_into()?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List IP pools linked to silo @@ -814,14 +864,14 @@ async fn silo_view( tags = ["system/silos"], }] async fn silo_ip_pool_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); @@ -846,7 +896,11 @@ async fn silo_ip_pool_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete a silo @@ -858,19 +912,23 @@ async fn silo_ip_pool_list( tags = ["system/silos"], }] async fn silo_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = path_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, params.silo)?; nexus.silo_delete(&opctx, &silo_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch silo IAM policy @@ -880,19 +938,23 @@ async fn silo_delete( tags = ["system/silos"], }] async fn silo_policy_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; let policy = nexus.silo_fetch_policy(&opctx, &silo_lookup).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update silo IAM policy @@ -902,7 +964,7 @@ async fn silo_policy_view( tags = ["system/silos"], }] async fn silo_policy_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, new_policy: TypedBody>, ) -> Result>, HttpError> { @@ -913,14 +975,18 @@ async fn silo_policy_update( // This should have been validated during parsing. bail_unless!(nasgns <= shared::MAX_ROLE_ASSIGNMENTS_PER_RESOURCE); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; let policy = nexus.silo_update_policy(&opctx, &silo_lookup, &new_policy).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Silo-specific user endpoints @@ -932,13 +998,13 @@ async fn silo_policy_update( tags = ["system/silos"], }] async fn silo_user_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanById::from_query(&query)?; @@ -956,7 +1022,11 @@ async fn silo_user_list( &|_, user: &User| user.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Path parameters for Silo User requests @@ -973,14 +1043,14 @@ struct UserParam { tags = ["system/silos"], }] async fn silo_user_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; @@ -988,7 +1058,11 @@ async fn silo_user_view( nexus.silo_user_fetch(&opctx, &silo_lookup, path.user_id).await?; Ok(HttpResponseOk(user.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Silo identity providers @@ -1000,13 +1074,13 @@ async fn silo_user_view( tags = ["system/silos"], }] async fn silo_identity_provider_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -1025,7 +1099,11 @@ async fn silo_identity_provider_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Silo SAML identity providers @@ -1037,14 +1115,14 @@ async fn silo_identity_provider_list( tags = ["system/silos"], }] async fn saml_identity_provider_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_provider: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; let provider = nexus @@ -1056,7 +1134,11 @@ async fn saml_identity_provider_create( .await?; Ok(HttpResponseCreated(provider.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch SAML IdP @@ -1066,14 +1148,14 @@ async fn saml_identity_provider_create( tags = ["system/silos"], }] async fn saml_identity_provider_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let saml_identity_provider_selector = @@ -1090,7 +1172,11 @@ async fn saml_identity_provider_view( .await?; Ok(HttpResponseOk(provider.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // TODO: no DELETE for identity providers? @@ -1108,14 +1194,14 @@ async fn saml_identity_provider_view( tags = ["system/silos"], }] async fn local_idp_user_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_user_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; let user = nexus @@ -1127,7 +1213,11 @@ async fn local_idp_user_create( .await?; Ok(HttpResponseCreated(user.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete user @@ -1137,21 +1227,25 @@ async fn local_idp_user_create( tags = ["system/silos"], }] async fn local_idp_user_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; nexus.local_idp_delete_user(&opctx, &silo_lookup, path.user_id).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Set or invalidate user's password @@ -1164,7 +1258,7 @@ async fn local_idp_user_delete( tags = ["system/silos"], }] async fn local_idp_user_set_password( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, update: TypedBody, @@ -1172,7 +1266,7 @@ async fn local_idp_user_set_password( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; @@ -1186,7 +1280,11 @@ async fn local_idp_user_set_password( .await?; Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List projects @@ -1196,12 +1294,12 @@ async fn local_idp_user_set_password( tags = ["projects"], }] async fn project_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -1219,7 +1317,11 @@ async fn project_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create project @@ -1229,18 +1331,22 @@ async fn project_list( tags = ["projects"], }] async fn project_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_project: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let project = nexus.project_create(&opctx, &new_project.into_inner()).await?; Ok(HttpResponseCreated(project.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch project @@ -1250,11 +1356,11 @@ async fn project_create( tags = ["projects"], }] async fn project_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -1264,7 +1370,11 @@ async fn project_view( nexus.project_lookup(&opctx, project_selector)?.fetch().await?; Ok(HttpResponseOk(project.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete project @@ -1274,11 +1384,11 @@ async fn project_view( tags = ["projects"], }] async fn project_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -1288,7 +1398,11 @@ async fn project_delete( nexus.project_delete(&opctx, &project_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // TODO-correctness: Is it valid for PUT to accept application/json that's a @@ -1303,12 +1417,12 @@ async fn project_delete( tags = ["projects"], }] async fn project_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, updated_project: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let updated_project = updated_project.into_inner(); let handler = async { @@ -1321,7 +1435,11 @@ async fn project_update( .await?; Ok(HttpResponseOk(project.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch project's IAM policy @@ -1331,11 +1449,11 @@ async fn project_update( tags = ["projects"], }] async fn project_policy_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result>, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -1346,7 +1464,11 @@ async fn project_policy_view( nexus.project_fetch_policy(&opctx, &project_lookup).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update project's IAM policy @@ -1356,12 +1478,12 @@ async fn project_policy_view( tags = ["projects"], }] async fn project_policy_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, new_policy: TypedBody>, ) -> Result>, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let new_policy = new_policy.into_inner(); let handler = async { @@ -1374,7 +1496,11 @@ async fn project_policy_update( .await?; Ok(HttpResponseOk(new_policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // IP Pools @@ -1386,12 +1512,12 @@ async fn project_policy_update( tags = ["projects"], }] async fn project_ip_pool_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -1412,7 +1538,11 @@ async fn project_ip_pool_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch IP pool @@ -1422,13 +1552,13 @@ async fn project_ip_pool_list( tags = ["projects"], }] async fn project_ip_pool_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let pool_selector = path_params.into_inner().pool; let (pool, silo_link) = nexus.silo_ip_pool_fetch(&opctx, &pool_selector).await?; @@ -1437,7 +1567,11 @@ async fn project_ip_pool_view( is_default: silo_link.is_default, })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List IP pools @@ -1447,12 +1581,12 @@ async fn project_ip_pool_view( tags = ["system/networking"], }] async fn ip_pool_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -1470,7 +1604,11 @@ async fn ip_pool_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[derive(Deserialize, JsonSchema)] @@ -1485,18 +1623,22 @@ pub struct IpPoolPathParam { tags = ["system/networking"], }] async fn ip_pool_create( - rqctx: RequestContext>, + rqctx: RequestContext, pool_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let pool_params = pool_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let pool = nexus.ip_pool_create(&opctx, &pool_params).await?; Ok(HttpResponseCreated(IpPool::from(pool))) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch IP pool @@ -1506,13 +1648,13 @@ async fn ip_pool_create( tags = ["system/networking"], }] async fn ip_pool_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let pool_selector = path_params.into_inner().pool; // We do not prevent the service pool from being fetched by name or ID // like we do for update, delete, associate. @@ -1520,7 +1662,11 @@ async fn ip_pool_view( nexus.ip_pool_lookup(&opctx, &pool_selector)?.fetch().await?; Ok(HttpResponseOk(IpPool::from(pool))) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete IP pool @@ -1530,19 +1676,23 @@ async fn ip_pool_view( tags = ["system/networking"], }] async fn ip_pool_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; nexus.ip_pool_delete(&opctx, &pool_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update IP pool @@ -1552,21 +1702,25 @@ async fn ip_pool_delete( tags = ["system/networking"], }] async fn ip_pool_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, updates: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let updates = updates.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; let pool = nexus.ip_pool_update(&opctx, &pool_lookup, &updates).await?; Ok(HttpResponseOk(pool.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch IP pool utilization @@ -1576,13 +1730,13 @@ async fn ip_pool_update( tags = ["system/networking"], }] async fn ip_pool_utilization_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let pool_selector = path_params.into_inner().pool; // We do not prevent the service pool from being fetched by name or ID // like we do for update, delete, associate. @@ -1591,7 +1745,11 @@ async fn ip_pool_utilization_view( nexus.ip_pool_utilization_view(&opctx, &pool_lookup).await?; Ok(HttpResponseOk(utilization.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List IP pool's linked silos @@ -1601,7 +1759,7 @@ async fn ip_pool_utilization_view( tags = ["system/networking"], }] async fn ip_pool_silo_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, // paginating by resource_id because they're unique per pool. most robust // option would be to paginate by a composite key representing the (pool, @@ -1618,7 +1776,7 @@ async fn ip_pool_silo_list( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; @@ -1639,7 +1797,11 @@ async fn ip_pool_silo_list( &|_, x: &views::IpPoolSiloLink| x.silo_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Link IP pool to silo @@ -1653,14 +1815,14 @@ async fn ip_pool_silo_list( tags = ["system/networking"], }] async fn ip_pool_silo_link( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, resource_assoc: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let resource_assoc = resource_assoc.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; @@ -1669,7 +1831,11 @@ async fn ip_pool_silo_link( .await?; Ok(HttpResponseCreated(assoc.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Unlink IP pool from silo @@ -1681,20 +1847,24 @@ async fn ip_pool_silo_link( tags = ["system/networking"], }] async fn ip_pool_silo_unlink( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; nexus.ip_pool_unlink_silo(&opctx, &pool_lookup, &silo_lookup).await?; Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Make IP pool default for silo @@ -1709,14 +1879,14 @@ async fn ip_pool_silo_unlink( tags = ["system/networking"], }] async fn ip_pool_silo_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, update: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let update = update.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; @@ -1726,7 +1896,11 @@ async fn ip_pool_silo_update( .await?; Ok(HttpResponseOk(assoc.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch Oxide service IP pool @@ -1736,16 +1910,20 @@ async fn ip_pool_silo_update( tags = ["system/networking"], }] async fn ip_pool_service_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let pool = nexus.ip_pool_service_fetch(&opctx).await?; Ok(HttpResponseOk(IpPool::from(pool))) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } type IpPoolRangePaginationParams = PaginationParams; @@ -1759,14 +1937,14 @@ type IpPoolRangePaginationParams = PaginationParams; tags = ["system/networking"], }] async fn ip_pool_range_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let path = path_params.into_inner(); let marker = match query.page { @@ -1793,7 +1971,11 @@ async fn ip_pool_range_list( }, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Add range to IP pool @@ -1805,21 +1987,25 @@ async fn ip_pool_range_list( tags = ["system/networking"], }] async fn ip_pool_range_add( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, range_params: TypedBody, ) -> Result, HttpError> { let apictx = &rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let range = range_params.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; let out = nexus.ip_pool_add_range(&opctx, &pool_lookup, &range).await?; Ok(HttpResponseCreated(out.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Remove range from IP pool @@ -1829,21 +2015,25 @@ async fn ip_pool_range_add( tags = ["system/networking"], }] async fn ip_pool_range_remove( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, range_params: TypedBody, ) -> Result { let apictx = &rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let range = range_params.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; nexus.ip_pool_delete_range(&opctx, &pool_lookup, &range).await?; Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List IP ranges for the Oxide service pool @@ -1855,13 +2045,13 @@ async fn ip_pool_range_remove( tags = ["system/networking"], }] async fn ip_pool_service_range_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let marker = match query.page { WhichPage::First(_) => None, @@ -1886,7 +2076,11 @@ async fn ip_pool_service_range_list( }, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Add IP range to Oxide service pool @@ -1898,18 +2092,22 @@ async fn ip_pool_service_range_list( tags = ["system/networking"], }] async fn ip_pool_service_range_add( - rqctx: RequestContext>, + rqctx: RequestContext, range_params: TypedBody, ) -> Result, HttpError> { let apictx = &rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let range = range_params.into_inner(); let out = nexus.ip_pool_service_add_range(&opctx, &range).await?; Ok(HttpResponseCreated(out.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Remove IP range from Oxide service pool @@ -1919,18 +2117,22 @@ async fn ip_pool_service_range_add( tags = ["system/networking"], }] async fn ip_pool_service_range_remove( - rqctx: RequestContext>, + rqctx: RequestContext, range_params: TypedBody, ) -> Result { let apictx = &rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let range = range_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus.ip_pool_service_delete_range(&opctx, &range).await?; Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Floating IP Addresses @@ -1942,12 +2144,12 @@ async fn ip_pool_service_range_remove( tags = ["floating-ips"], }] async fn floating_ip_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; @@ -1964,7 +2166,11 @@ async fn floating_ip_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create floating IP @@ -1974,12 +2180,12 @@ async fn floating_ip_list( tags = ["floating-ips"], }] async fn floating_ip_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, floating_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let floating_params = floating_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -1990,7 +2196,11 @@ async fn floating_ip_create( .await?; Ok(HttpResponseCreated(ip)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update floating IP @@ -2000,14 +2210,14 @@ async fn floating_ip_create( tags = ["floating-ips"], }] async fn floating_ip_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, updated_floating_ip: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let updated_floating_ip_params = updated_floating_ip.into_inner(); @@ -2027,7 +2237,11 @@ async fn floating_ip_update( .await?; Ok(HttpResponseOk(floating_ip)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete floating IP @@ -2037,14 +2251,14 @@ async fn floating_ip_update( tags = ["floating-ips"], }] async fn floating_ip_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let floating_ip_selector = params::FloatingIpSelector { @@ -2057,7 +2271,11 @@ async fn floating_ip_delete( nexus.floating_ip_delete(&opctx, fip_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch floating IP @@ -2067,14 +2285,14 @@ async fn floating_ip_delete( tags = ["floating-ips"] }] async fn floating_ip_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let floating_ip_selector = params::FloatingIpSelector { @@ -2087,7 +2305,11 @@ async fn floating_ip_view( .await?; Ok(HttpResponseOk(fip.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Attach floating IP @@ -2099,7 +2321,7 @@ async fn floating_ip_view( tags = ["floating-ips"], }] async fn floating_ip_attach( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, target: TypedBody, @@ -2107,7 +2329,7 @@ async fn floating_ip_attach( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let floating_ip_selector = params::FloatingIpSelector { @@ -2123,7 +2345,11 @@ async fn floating_ip_attach( .await?; Ok(HttpResponseAccepted(ip)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Detach floating IP @@ -2135,14 +2361,14 @@ async fn floating_ip_attach( tags = ["floating-ips"], }] async fn floating_ip_detach( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let floating_ip_selector = params::FloatingIpSelector { @@ -2154,7 +2380,11 @@ async fn floating_ip_detach( let ip = nexus.floating_ip_detach(&opctx, fip_lookup).await?; Ok(HttpResponseAccepted(ip)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Disks @@ -2166,13 +2396,13 @@ async fn floating_ip_detach( tags = ["disks"], }] async fn disk_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -2191,7 +2421,11 @@ async fn disk_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // TODO-correctness See note about instance create. This should be async. @@ -2202,14 +2436,14 @@ async fn disk_list( tags = ["disks"] }] async fn disk_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_disk: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let params = new_disk.into_inner(); let project_lookup = nexus.project_lookup(&opctx, query)?; @@ -2217,7 +2451,11 @@ async fn disk_create( nexus.project_create_disk(&opctx, &project_lookup, ¶ms).await?; Ok(HttpResponseCreated(disk.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch disk @@ -2227,14 +2465,14 @@ async fn disk_create( tags = ["disks"] }] async fn disk_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let disk_selector = @@ -2243,7 +2481,11 @@ async fn disk_view( nexus.disk_lookup(&opctx, disk_selector)?.fetch().await?; Ok(HttpResponseOk(disk.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete disk @@ -2253,14 +2495,14 @@ async fn disk_view( tags = ["disks"], }] async fn disk_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let disk_selector = @@ -2269,7 +2511,11 @@ async fn disk_delete( nexus.project_delete_disk(&opctx, &disk_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[derive(Display, Serialize, Deserialize, JsonSchema)] @@ -2297,7 +2543,7 @@ struct DiskMetricsPath { tags = ["disks"], }] async fn disk_metrics_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query< PaginationParams, @@ -2306,7 +2552,7 @@ async fn disk_metrics_list( ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); @@ -2331,7 +2577,11 @@ async fn disk_metrics_list( Ok(HttpResponseOk(result)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Start importing blocks into disk @@ -2343,14 +2593,14 @@ async fn disk_metrics_list( tags = ["disks"], }] async fn disk_bulk_write_import_start( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); @@ -2362,7 +2612,11 @@ async fn disk_bulk_write_import_start( Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Import blocks into disk @@ -2372,7 +2626,7 @@ async fn disk_bulk_write_import_start( tags = ["disks"], }] async fn disk_bulk_write_import( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, import_params: TypedBody, @@ -2380,7 +2634,7 @@ async fn disk_bulk_write_import( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let params = import_params.into_inner(); @@ -2393,7 +2647,11 @@ async fn disk_bulk_write_import( Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Stop importing blocks into disk @@ -2405,14 +2663,14 @@ async fn disk_bulk_write_import( tags = ["disks"], }] async fn disk_bulk_write_import_stop( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); @@ -2424,7 +2682,11 @@ async fn disk_bulk_write_import_stop( Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Confirm disk block import completion @@ -2434,7 +2696,7 @@ async fn disk_bulk_write_import_stop( tags = ["disks"], }] async fn disk_finalize_import( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, finalize_params: TypedBody, @@ -2442,7 +2704,7 @@ async fn disk_finalize_import( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let params = finalize_params.into_inner(); @@ -2454,7 +2716,11 @@ async fn disk_finalize_import( Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Instances @@ -2466,12 +2732,12 @@ async fn disk_finalize_import( tags = ["instances"], }] async fn instance_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -2491,7 +2757,11 @@ async fn instance_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create instance @@ -2501,12 +2771,12 @@ async fn instance_list( tags = ["instances"], }] async fn instance_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_instance: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let project_selector = query_params.into_inner(); let new_instance_params = &new_instance.into_inner(); let handler = async { @@ -2521,7 +2791,11 @@ async fn instance_create( .await?; Ok(HttpResponseCreated(instance.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch instance @@ -2531,12 +2805,12 @@ async fn instance_create( tags = ["instances"], }] async fn instance_view( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let handler = async { @@ -2555,7 +2829,11 @@ async fn instance_view( .await?; Ok(HttpResponseOk(instance_and_vmm.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete instance @@ -2565,12 +2843,12 @@ async fn instance_view( tags = ["instances"], }] async fn instance_delete( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -2584,7 +2862,11 @@ async fn instance_delete( nexus.project_destroy_instance(&opctx, &instance_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // TODO should this be in the public API? @@ -2595,13 +2877,13 @@ async fn instance_delete( tags = ["instances"], }] async fn instance_migrate( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, migrate_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let migrate_instance_params = migrate_params.into_inner(); @@ -2622,7 +2904,11 @@ async fn instance_migrate( .await?; Ok(HttpResponseOk(instance.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Reboot an instance @@ -2632,12 +2918,12 @@ async fn instance_migrate( tags = ["instances"], }] async fn instance_reboot( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -2651,7 +2937,11 @@ async fn instance_reboot( let instance = nexus.instance_reboot(&opctx, &instance_lookup).await?; Ok(HttpResponseAccepted(instance.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Boot instance @@ -2661,12 +2951,12 @@ async fn instance_reboot( tags = ["instances"], }] async fn instance_start( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -2680,7 +2970,11 @@ async fn instance_start( let instance = nexus.instance_start(&opctx, &instance_lookup).await?; Ok(HttpResponseAccepted(instance.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Stop instance @@ -2690,12 +2984,12 @@ async fn instance_start( tags = ["instances"], }] async fn instance_stop( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -2709,7 +3003,11 @@ async fn instance_stop( let instance = nexus.instance_stop(&opctx, &instance_lookup).await?; Ok(HttpResponseAccepted(instance.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch instance serial console @@ -2719,14 +3017,14 @@ async fn instance_stop( tags = ["instances"], }] async fn instance_serial_console( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -2740,7 +3038,11 @@ async fn instance_serial_console( .await?; Ok(HttpResponseOk(data)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Stream instance serial console @@ -2750,13 +3052,13 @@ async fn instance_serial_console( tags = ["instances"], }] async fn instance_serial_console_stream( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, conn: WebsocketConnection, ) -> WebsocketChannelResult { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -2806,13 +3108,13 @@ async fn instance_serial_console_stream( tags = ["instances"], }] async fn instance_ssh_public_key_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; @@ -2837,7 +3139,11 @@ async fn instance_ssh_public_key_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List disks for instance @@ -2847,13 +3153,13 @@ async fn instance_ssh_public_key_list( tags = ["instances"], }] async fn instance_disk_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, path_params: Path, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; @@ -2878,7 +3184,11 @@ async fn instance_disk_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Attach disk to instance @@ -2888,13 +3198,13 @@ async fn instance_disk_list( tags = ["instances"], }] async fn instance_disk_attach( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, disk_to_attach: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let disk = disk_to_attach.into_inner().disk; @@ -2910,7 +3220,11 @@ async fn instance_disk_attach( nexus.instance_attach_disk(&opctx, &instance_lookup, disk).await?; Ok(HttpResponseAccepted(disk.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Detach disk from instance @@ -2920,7 +3234,7 @@ async fn instance_disk_attach( tags = ["instances"], }] async fn instance_disk_detach( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, disk_to_detach: TypedBody, @@ -2928,7 +3242,7 @@ async fn instance_disk_detach( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let disk = disk_to_detach.into_inner().disk; @@ -2942,7 +3256,11 @@ async fn instance_disk_detach( nexus.instance_detach_disk(&opctx, &instance_lookup, disk).await?; Ok(HttpResponseAccepted(disk.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Certificates @@ -2958,12 +3276,12 @@ async fn instance_disk_detach( tags = ["silos"], }] async fn certificate_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -2981,7 +3299,11 @@ async fn certificate_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create new system-wide x.509 certificate @@ -2994,18 +3316,22 @@ async fn certificate_list( tags = ["silos"] }] async fn certificate_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_cert: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let new_cert_params = new_cert.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let cert = nexus.certificate_create(&opctx, new_cert_params).await?; Ok(HttpResponseCreated(cert.try_into()?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Path parameters for Certificate requests @@ -3023,19 +3349,23 @@ struct CertificatePathParam { tags = ["silos"], }] async fn certificate_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let (.., cert) = nexus.certificate_lookup(&opctx, &path.certificate).fetch().await?; Ok(HttpResponseOk(cert.try_into()?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete certificate @@ -3047,12 +3377,12 @@ async fn certificate_view( tags = ["silos"], }] async fn certificate_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus @@ -3063,7 +3393,11 @@ async fn certificate_delete( .await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create address lot @@ -3073,12 +3407,12 @@ async fn certificate_delete( tags = ["system/networking"], }] async fn networking_address_lot_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_address_lot: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = new_address_lot.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let result = nexus.address_lot_create(&opctx, params).await?; @@ -3089,7 +3423,11 @@ async fn networking_address_lot_create( Ok(HttpResponseCreated(AddressLotCreateResponse { lot, blocks })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete address lot @@ -3099,20 +3437,24 @@ async fn networking_address_lot_create( tags = ["system/networking"], }] async fn networking_address_lot_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let address_lot_lookup = nexus.address_lot_lookup(&opctx, path.address_lot)?; nexus.address_lot_delete(&opctx, &address_lot_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List address lots @@ -3122,12 +3464,12 @@ async fn networking_address_lot_delete( tags = ["system/networking"], }] async fn networking_address_lot_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -3146,7 +3488,11 @@ async fn networking_address_lot_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List blocks in address lot @@ -3156,13 +3502,13 @@ async fn networking_address_lot_list( tags = ["system/networking"], }] async fn networking_address_lot_block_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let path = path_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; @@ -3182,7 +3528,11 @@ async fn networking_address_lot_block_list( &|_, x: &AddressLotBlock| x.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create loopback address @@ -3192,12 +3542,12 @@ async fn networking_address_lot_block_list( tags = ["system/networking"], }] async fn networking_loopback_address_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_loopback_address: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = new_loopback_address.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let result = nexus.loopback_address_create(&opctx, params).await?; @@ -3206,7 +3556,11 @@ async fn networking_loopback_address_create( Ok(HttpResponseCreated(addr)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[derive(Serialize, Deserialize, JsonSchema)] @@ -3233,12 +3587,12 @@ pub struct LoopbackAddressPath { tags = ["system/networking"], }] async fn networking_loopback_address_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let addr = match IpNetwork::new(path.address, path.subnet_mask) { @@ -3258,7 +3612,11 @@ async fn networking_loopback_address_delete( .await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List loopback addresses @@ -3268,12 +3626,12 @@ async fn networking_loopback_address_delete( tags = ["system/networking"], }] async fn networking_loopback_address_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -3290,7 +3648,11 @@ async fn networking_loopback_address_list( &|_, x: &LoopbackAddress| x.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create switch port settings @@ -3300,12 +3662,12 @@ async fn networking_loopback_address_list( tags = ["system/networking"], }] async fn networking_switch_port_settings_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_settings: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = new_settings.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let result = nexus.switch_port_settings_post(&opctx, params).await?; @@ -3313,7 +3675,11 @@ async fn networking_switch_port_settings_create( let settings: SwitchPortSettingsView = result.into(); Ok(HttpResponseCreated(settings)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete switch port settings @@ -3323,18 +3689,22 @@ async fn networking_switch_port_settings_create( tags = ["system/networking"], }] async fn networking_switch_port_settings_delete( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let selector = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus.switch_port_settings_delete(&opctx, &selector).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List switch port settings @@ -3344,14 +3714,14 @@ async fn networking_switch_port_settings_delete( tags = ["system/networking"], }] async fn networking_switch_port_settings_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query< PaginatedByNameOrId, >, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -3370,7 +3740,11 @@ async fn networking_switch_port_settings_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Get information about switch port @@ -3380,18 +3754,22 @@ async fn networking_switch_port_settings_list( tags = ["system/networking"], }] async fn networking_switch_port_settings_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = path_params.into_inner().port; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let settings = nexus.switch_port_settings_get(&opctx, &query).await?; Ok(HttpResponseOk(settings.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List switch ports @@ -3401,12 +3779,12 @@ async fn networking_switch_port_settings_view( tags = ["system/hardware"], }] async fn networking_switch_port_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -3423,7 +3801,11 @@ async fn networking_switch_port_list( &|_, x: &SwitchPort| x.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Get switch port status @@ -3433,13 +3815,13 @@ async fn networking_switch_port_list( tags = ["system/hardware"], }] async fn networking_switch_port_status( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -3449,7 +3831,11 @@ async fn networking_switch_port_status( .await?, )) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Apply switch port settings @@ -3459,14 +3845,14 @@ async fn networking_switch_port_status( tags = ["system/hardware"], }] async fn networking_switch_port_apply_settings( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, settings_body: TypedBody, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let port = path_params.into_inner().port; let query = query_params.into_inner(); let settings = settings_body.into_inner(); @@ -3476,7 +3862,11 @@ async fn networking_switch_port_apply_settings( .await?; Ok(HttpResponseUpdatedNoContent {}) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Clear switch port settings @@ -3486,20 +3876,24 @@ async fn networking_switch_port_apply_settings( tags = ["system/hardware"], }] async fn networking_switch_port_clear_settings( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let port = path_params.into_inner().port; let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus.switch_port_clear_settings(&opctx, &port, &query).await?; Ok(HttpResponseUpdatedNoContent {}) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create new BGP configuration @@ -3509,18 +3903,22 @@ async fn networking_switch_port_clear_settings( tags = ["system/networking"], }] async fn networking_bgp_config_create( - rqctx: RequestContext>, + rqctx: RequestContext, config: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let config = config.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let result = nexus.bgp_config_set(&opctx, &config).await?; Ok(HttpResponseCreated::(result.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List BGP configurations @@ -3530,12 +3928,12 @@ async fn networking_bgp_config_create( tags = ["system/networking"], }] async fn networking_bgp_config_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -3554,7 +3952,11 @@ async fn networking_bgp_config_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } //TODO pagination? the normal by-name/by-id stuff does not work here @@ -3565,16 +3967,20 @@ async fn networking_bgp_config_list( tags = ["system/networking"], }] async fn networking_bgp_status( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result>, HttpError> { let apictx = rqctx.context(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let result = nexus.bgp_peer_status(&opctx).await?; Ok(HttpResponseOk(result)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Get BGP router message history @@ -3584,18 +3990,22 @@ async fn networking_bgp_status( tags = ["system/networking"], }] async fn networking_bgp_message_history( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let sel = query_params.into_inner(); let result = nexus.bgp_message_history(&opctx, &sel).await?; Ok(HttpResponseOk(AggregateBgpMessageHistory::new(result))) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } //TODO pagination? the normal by-name/by-id stuff does not work here @@ -3606,18 +4016,22 @@ async fn networking_bgp_message_history( tags = ["system/networking"], }] async fn networking_bgp_imported_routes_ipv4( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let sel = query_params.into_inner(); let result = nexus.bgp_imported_routes_ipv4(&opctx, &sel).await?; Ok(HttpResponseOk(result)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete BGP configuration @@ -3627,18 +4041,22 @@ async fn networking_bgp_imported_routes_ipv4( tags = ["system/networking"], }] async fn networking_bgp_config_delete( - rqctx: RequestContext>, + rqctx: RequestContext, sel: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let sel = sel.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus.bgp_config_delete(&opctx, &sel).await?; Ok(HttpResponseUpdatedNoContent {}) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create new BGP announce set @@ -3648,18 +4066,22 @@ async fn networking_bgp_config_delete( tags = ["system/networking"], }] async fn networking_bgp_announce_set_create( - rqctx: RequestContext>, + rqctx: RequestContext, config: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let config = config.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let result = nexus.bgp_create_announce_set(&opctx, &config).await?; Ok(HttpResponseCreated::(result.0.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } //TODO pagination? the normal by-name/by-id stuff does not work here @@ -3670,12 +4092,12 @@ async fn networking_bgp_announce_set_create( tags = ["system/networking"], }] async fn networking_bgp_announce_set_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let sel = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let result = nexus @@ -3686,7 +4108,11 @@ async fn networking_bgp_announce_set_list( .collect(); Ok(HttpResponseOk(result)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete BGP announce set @@ -3696,18 +4122,22 @@ async fn networking_bgp_announce_set_list( tags = ["system/networking"], }] async fn networking_bgp_announce_set_delete( - rqctx: RequestContext>, + rqctx: RequestContext, selector: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let sel = selector.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus.bgp_delete_announce_set(&opctx, &sel).await?; Ok(HttpResponseUpdatedNoContent {}) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Enable a BFD session @@ -3717,18 +4147,22 @@ async fn networking_bgp_announce_set_delete( tags = ["system/networking"], }] async fn networking_bfd_enable( - rqctx: RequestContext>, + rqctx: RequestContext, session: TypedBody, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; nexus.bfd_enable(&opctx, session.into_inner()).await?; Ok(HttpResponseUpdatedNoContent {}) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Disable a BFD session @@ -3738,18 +4172,22 @@ async fn networking_bfd_enable( tags = ["system/networking"], }] async fn networking_bfd_disable( - rqctx: RequestContext>, + rqctx: RequestContext, session: TypedBody, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; nexus.bfd_disable(&opctx, session.into_inner()).await?; Ok(HttpResponseUpdatedNoContent {}) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Get BFD status @@ -3759,17 +4197,21 @@ async fn networking_bfd_disable( tags = ["system/networking"], }] async fn networking_bfd_status( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; let status = nexus.bfd_status(&opctx).await?; Ok(HttpResponseOk(status)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Get user-facing services IP allowlist @@ -3779,11 +4221,11 @@ async fn networking_bfd_status( tags = ["system/networking"], }] async fn networking_allow_list_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus .allow_list_view(&opctx) @@ -3791,7 +4233,11 @@ async fn networking_allow_list_view( .map(HttpResponseOk) .map_err(HttpError::from) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update user-facing services IP allowlist @@ -3801,22 +4247,27 @@ async fn networking_allow_list_view( tags = ["system/networking"], }] async fn networking_allow_list_update( - rqctx: RequestContext>, + rqctx: RequestContext, params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; + let server_kind = apictx.kind; let params = params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let remote_addr = rqctx.request.remote_addr().ip(); nexus - .allow_list_upsert(&opctx, remote_addr, params) + .allow_list_upsert(&opctx, remote_addr, server_kind, params) .await .map(HttpResponseOk) .map_err(HttpError::from) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Images @@ -3831,13 +4282,13 @@ async fn networking_allow_list_update( tags = ["images"], }] async fn image_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -3867,7 +4318,11 @@ async fn image_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create image @@ -3879,14 +4334,14 @@ async fn image_list( tags = ["images"] }] async fn image_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_image: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let params = &new_image.into_inner(); let parent_lookup = match query.project.clone() { @@ -3905,7 +4360,11 @@ async fn image_create( let image = nexus.image_create(&opctx, &parent_lookup, ¶ms).await?; Ok(HttpResponseCreated(image.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch image @@ -3917,14 +4376,14 @@ async fn image_create( tags = ["images"], }] async fn image_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let image: nexus_db_model::Image = match nexus @@ -3948,7 +4407,11 @@ async fn image_view( }; Ok(HttpResponseOk(image.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete image @@ -3962,14 +4425,14 @@ async fn image_view( tags = ["images"], }] async fn image_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let image_lookup = nexus @@ -3984,7 +4447,11 @@ async fn image_delete( nexus.image_delete(&opctx, &image_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Promote project image @@ -3996,14 +4463,14 @@ async fn image_delete( tags = ["images"] }] async fn image_promote( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let image_lookup = nexus @@ -4018,7 +4485,11 @@ async fn image_promote( let image = nexus.image_promote(&opctx, &image_lookup).await?; Ok(HttpResponseAccepted(image.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Demote silo image @@ -4030,14 +4501,14 @@ async fn image_promote( tags = ["images"] }] async fn image_demote( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let image_lookup = nexus @@ -4053,7 +4524,11 @@ async fn image_demote( nexus.image_demote(&opctx, &image_lookup, &project_lookup).await?; Ok(HttpResponseAccepted(image.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List network interfaces @@ -4063,13 +4538,13 @@ async fn image_demote( tags = ["instances"], }] async fn instance_network_interface_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -4092,7 +4567,11 @@ async fn instance_network_interface_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create network interface @@ -4102,14 +4581,14 @@ async fn instance_network_interface_list( tags = ["instances"], }] async fn instance_network_interface_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, interface_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let instance_lookup = nexus.instance_lookup(&opctx, query)?; let iface = nexus @@ -4121,7 +4600,11 @@ async fn instance_network_interface_create( .await?; Ok(HttpResponseCreated(iface.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete network interface @@ -4136,14 +4619,14 @@ async fn instance_network_interface_create( tags = ["instances"], }] async fn instance_network_interface_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let interface_selector = params::InstanceNetworkInterfaceSelector { @@ -4158,7 +4641,11 @@ async fn instance_network_interface_delete( .await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch network interface @@ -4168,14 +4655,14 @@ async fn instance_network_interface_delete( tags = ["instances"], }] async fn instance_network_interface_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let interface_selector = params::InstanceNetworkInterfaceSelector { @@ -4189,7 +4676,11 @@ async fn instance_network_interface_view( .await?; Ok(HttpResponseOk(interface.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update network interface @@ -4199,7 +4690,7 @@ async fn instance_network_interface_view( tags = ["instances"], }] async fn instance_network_interface_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, updated_iface: TypedBody, @@ -4207,7 +4698,7 @@ async fn instance_network_interface_update( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let updated_iface = updated_iface.into_inner(); @@ -4231,7 +4722,11 @@ async fn instance_network_interface_update( .await?; Ok(HttpResponseOk(InstanceNetworkInterface::from(interface))) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // External IP addresses for instances @@ -4243,13 +4738,13 @@ async fn instance_network_interface_update( tags = ["instances"], }] async fn instance_external_ip_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4263,7 +4758,11 @@ async fn instance_external_ip_list( nexus.instance_list_external_ips(&opctx, &instance_lookup).await?; Ok(HttpResponseOk(ResultsPage { items: ips, next_page: None })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Allocate and attach ephemeral IP to instance @@ -4273,7 +4772,7 @@ async fn instance_external_ip_list( tags = ["instances"], }] async fn instance_ephemeral_ip_attach( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ip_to_create: TypedBody, @@ -4281,7 +4780,7 @@ async fn instance_ephemeral_ip_attach( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -4299,7 +4798,11 @@ async fn instance_ephemeral_ip_attach( .await?; Ok(HttpResponseAccepted(ip)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Detach and deallocate ephemeral IP from instance @@ -4309,14 +4812,14 @@ async fn instance_ephemeral_ip_attach( tags = ["instances"], }] async fn instance_ephemeral_ip_detach( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -4334,7 +4837,11 @@ async fn instance_ephemeral_ip_detach( .await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Snapshots @@ -4346,13 +4853,13 @@ async fn instance_ephemeral_ip_detach( tags = ["snapshots"], }] async fn snapshot_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -4371,7 +4878,11 @@ async fn snapshot_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create snapshot @@ -4383,14 +4894,14 @@ async fn snapshot_list( tags = ["snapshots"], }] async fn snapshot_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_snapshot: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let new_snapshot_params = &new_snapshot.into_inner(); let project_lookup = nexus.project_lookup(&opctx, query)?; @@ -4399,7 +4910,11 @@ async fn snapshot_create( .await?; Ok(HttpResponseCreated(snapshot.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch snapshot @@ -4409,14 +4924,14 @@ async fn snapshot_create( tags = ["snapshots"], }] async fn snapshot_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let snapshot_selector = params::SnapshotSelector { @@ -4427,7 +4942,11 @@ async fn snapshot_view( nexus.snapshot_lookup(&opctx, snapshot_selector)?.fetch().await?; Ok(HttpResponseOk(snapshot.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete snapshot @@ -4437,14 +4956,14 @@ async fn snapshot_view( tags = ["snapshots"], }] async fn snapshot_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let snapshot_selector = params::SnapshotSelector { @@ -4456,7 +4975,11 @@ async fn snapshot_delete( nexus.snapshot_delete(&opctx, &snapshot_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // VPCs @@ -4468,12 +4991,12 @@ async fn snapshot_delete( tags = ["vpcs"], }] async fn vpc_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -4494,7 +5017,11 @@ async fn vpc_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create VPC @@ -4504,12 +5031,12 @@ async fn vpc_list( tags = ["vpcs"], }] async fn vpc_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, body: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let new_vpc_params = body.into_inner(); let handler = async { @@ -4520,7 +5047,11 @@ async fn vpc_create( .await?; Ok(HttpResponseCreated(vpc.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch VPC @@ -4530,13 +5061,13 @@ async fn vpc_create( tags = ["vpcs"], }] async fn vpc_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4545,7 +5076,11 @@ async fn vpc_view( let (.., vpc) = nexus.vpc_lookup(&opctx, vpc_selector)?.fetch().await?; Ok(HttpResponseOk(vpc.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update a VPC @@ -4555,14 +5090,14 @@ async fn vpc_view( tags = ["vpcs"], }] async fn vpc_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, updated_vpc: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let updated_vpc_params = &updated_vpc.into_inner(); @@ -4575,7 +5110,11 @@ async fn vpc_update( .await?; Ok(HttpResponseOk(vpc.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete VPC @@ -4585,13 +5124,13 @@ async fn vpc_update( tags = ["vpcs"], }] async fn vpc_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4601,7 +5140,11 @@ async fn vpc_delete( nexus.project_delete_vpc(&opctx, &vpc_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List subnets @@ -4611,12 +5154,12 @@ async fn vpc_delete( tags = ["vpcs"], }] async fn vpc_subnet_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -4636,7 +5179,11 @@ async fn vpc_subnet_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create subnet @@ -4646,13 +5193,13 @@ async fn vpc_subnet_list( tags = ["vpcs"], }] async fn vpc_subnet_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, create_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let create = create_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4661,7 +5208,11 @@ async fn vpc_subnet_create( nexus.vpc_create_subnet(&opctx, &vpc_lookup, &create).await?; Ok(HttpResponseCreated(subnet.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch subnet @@ -4671,13 +5222,13 @@ async fn vpc_subnet_create( tags = ["vpcs"], }] async fn vpc_subnet_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4690,7 +5241,11 @@ async fn vpc_subnet_view( nexus.vpc_subnet_lookup(&opctx, subnet_selector)?.fetch().await?; Ok(HttpResponseOk(subnet.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete subnet @@ -4700,13 +5255,13 @@ async fn vpc_subnet_view( tags = ["vpcs"], }] async fn vpc_subnet_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4719,7 +5274,11 @@ async fn vpc_subnet_delete( nexus.vpc_delete_subnet(&opctx, &subnet_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update subnet @@ -4729,14 +5288,14 @@ async fn vpc_subnet_delete( tags = ["vpcs"], }] async fn vpc_subnet_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, subnet_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let subnet_params = subnet_params.into_inner(); @@ -4752,7 +5311,11 @@ async fn vpc_subnet_update( .await?; Ok(HttpResponseOk(subnet.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // This endpoint is likely temporary. We would rather list all IPs allocated in @@ -4766,13 +5329,13 @@ async fn vpc_subnet_update( tags = ["vpcs"], }] async fn vpc_subnet_list_network_interfaces( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let path = path_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; @@ -4801,7 +5364,11 @@ async fn vpc_subnet_list_network_interfaces( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // VPC Firewalls @@ -4814,7 +5381,7 @@ async fn vpc_subnet_list_network_interfaces( tags = ["vpcs"], }] async fn vpc_firewall_rules_view( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result, HttpError> { // TODO: Check If-Match and fail if the ETag doesn't match anymore. @@ -4823,7 +5390,7 @@ async fn vpc_firewall_rules_view( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let vpc_lookup = nexus.vpc_lookup(&opctx, query)?; let rules = nexus.vpc_list_firewall_rules(&opctx, &vpc_lookup).await?; @@ -4831,7 +5398,11 @@ async fn vpc_firewall_rules_view( rules: rules.into_iter().map(|rule| rule.into()).collect(), })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Replace firewall rules @@ -4841,7 +5412,7 @@ async fn vpc_firewall_rules_view( tags = ["vpcs"], }] async fn vpc_firewall_rules_update( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, router_params: TypedBody, ) -> Result, HttpError> { @@ -4850,7 +5421,7 @@ async fn vpc_firewall_rules_update( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let router_params = router_params.into_inner(); let vpc_lookup = nexus.vpc_lookup(&opctx, query)?; @@ -4861,7 +5432,11 @@ async fn vpc_firewall_rules_update( rules: rules.into_iter().map(|rule| rule.into()).collect(), })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // VPC Routers @@ -4874,13 +5449,13 @@ async fn vpc_firewall_rules_update( unpublished = true, }] async fn vpc_router_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -4899,7 +5474,11 @@ async fn vpc_router_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch router @@ -4910,13 +5489,13 @@ async fn vpc_router_list( unpublished = true, }] async fn vpc_router_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4929,7 +5508,11 @@ async fn vpc_router_view( nexus.vpc_router_lookup(&opctx, router_selector)?.fetch().await?; Ok(HttpResponseOk(vpc_router.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create VPC router @@ -4940,13 +5523,13 @@ async fn vpc_router_view( unpublished = true, }] async fn vpc_router_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, create_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let create = create_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4961,7 +5544,11 @@ async fn vpc_router_create( .await?; Ok(HttpResponseCreated(router.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete router @@ -4972,13 +5559,13 @@ async fn vpc_router_create( unpublished = true, }] async fn vpc_router_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4991,7 +5578,11 @@ async fn vpc_router_delete( nexus.vpc_delete_router(&opctx, &router_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update router @@ -5002,14 +5593,14 @@ async fn vpc_router_delete( unpublished = true, }] async fn vpc_router_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, router_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let router_params = router_params.into_inner(); @@ -5025,7 +5616,11 @@ async fn vpc_router_update( .await?; Ok(HttpResponseOk(router.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List routes @@ -5038,13 +5633,13 @@ async fn vpc_router_update( unpublished = true, }] async fn vpc_router_route_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -5063,7 +5658,11 @@ async fn vpc_router_route_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Vpc Router Routes @@ -5076,14 +5675,14 @@ async fn vpc_router_route_list( unpublished = true, }] async fn vpc_router_route_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let route_selector = params::RouteSelector { @@ -5098,7 +5697,11 @@ async fn vpc_router_route_view( .await?; Ok(HttpResponseOk(route.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create router @@ -5109,14 +5712,14 @@ async fn vpc_router_route_view( unpublished = true, }] async fn vpc_router_route_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, create_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let create = create_params.into_inner(); let router_lookup = nexus.vpc_router_lookup(&opctx, query)?; @@ -5130,7 +5733,11 @@ async fn vpc_router_route_create( .await?; Ok(HttpResponseCreated(route.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete route @@ -5141,14 +5748,14 @@ async fn vpc_router_route_create( unpublished = true, }] async fn vpc_router_route_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let route_selector = params::RouteSelector { @@ -5162,7 +5769,11 @@ async fn vpc_router_route_delete( nexus.router_delete_route(&opctx, &route_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update route @@ -5173,14 +5784,14 @@ async fn vpc_router_route_delete( unpublished = true, }] async fn vpc_router_route_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, router_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let router_params = router_params.into_inner(); @@ -5198,7 +5809,11 @@ async fn vpc_router_route_update( .await?; Ok(HttpResponseOk(route.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Racks @@ -5210,12 +5825,12 @@ async fn vpc_router_route_update( tags = ["system/hardware"], }] async fn rack_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let racks = nexus @@ -5230,7 +5845,11 @@ async fn rack_list( &|_, rack: &Rack| rack.identity.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Path parameters for Rack requests @@ -5247,18 +5866,22 @@ struct RackPathParam { tags = ["system/hardware"], }] async fn rack_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let rack_info = nexus.rack_lookup(&opctx, &path.rack_id).await?; Ok(HttpResponseOk(rack_info.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List uninitialized sleds @@ -5268,7 +5891,7 @@ async fn rack_view( tags = ["system/hardware"] }] async fn sled_list_uninitialized( - rqctx: RequestContext>, + rqctx: RequestContext, query: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); @@ -5280,12 +5903,16 @@ async fn sled_list_uninitialized( ); } let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let sleds = nexus.sled_list_uninitialized(&opctx).await?; Ok(HttpResponseOk(ResultsPage { items: sleds, next_page: None })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// The unique ID of a sled. @@ -5306,11 +5933,11 @@ pub struct SledId { tags = ["system/hardware"] }] async fn sled_add( - rqctx: RequestContext>, + rqctx: RequestContext, sled: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let id = nexus @@ -5319,7 +5946,11 @@ async fn sled_add( .into_untyped_uuid(); Ok(HttpResponseCreated(SledId { id })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Sleds @@ -5331,12 +5962,12 @@ async fn sled_add( tags = ["system/hardware"], }] async fn sled_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let sleds = nexus @@ -5351,7 +5982,11 @@ async fn sled_list( &|_, sled: &Sled| sled.identity.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch sled @@ -5361,19 +5996,23 @@ async fn sled_list( tags = ["system/hardware"], }] async fn sled_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let (.., sled) = nexus.sled_lookup(&opctx, &path.sled_id)?.fetch().await?; Ok(HttpResponseOk(sled.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Set sled provision policy @@ -5383,13 +6022,13 @@ async fn sled_view( tags = ["system/hardware"], }] async fn sled_set_provision_policy( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, new_provision_state: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let new_state = new_provision_state.into_inner().state; @@ -5407,7 +6046,11 @@ async fn sled_set_provision_policy( Ok(HttpResponseOk(response)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List instances running on given sled @@ -5417,13 +6060,13 @@ async fn sled_set_provision_policy( tags = ["system/hardware"], }] async fn sled_instance_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -5444,7 +6087,11 @@ async fn sled_instance_list( &|_, sled_instance: &views::SledInstance| sled_instance.identity.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Physical disks @@ -5456,12 +6103,12 @@ async fn sled_instance_list( tags = ["system/hardware"], }] async fn physical_disk_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let disks = nexus @@ -5476,7 +6123,11 @@ async fn physical_disk_list( &|_, disk: &PhysicalDisk| disk.identity.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Get a physical disk @@ -5486,12 +6137,12 @@ async fn physical_disk_list( tags = ["system/hardware"], }] async fn physical_disk_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -5499,7 +6150,11 @@ async fn physical_disk_view( nexus.physical_disk_lookup(&opctx, &path).await?.fetch().await?; Ok(HttpResponseOk(physical_disk.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Switches @@ -5511,12 +6166,12 @@ async fn physical_disk_view( tags = ["system/hardware"], }] async fn switch_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let switches = nexus @@ -5531,7 +6186,11 @@ async fn switch_list( &|_, switch: &views::Switch| switch.identity.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch switch @@ -5541,12 +6200,12 @@ async fn switch_list( tags = ["system/hardware"], }] async fn switch_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let (.., switch) = nexus @@ -5558,7 +6217,11 @@ async fn switch_view( .await?; Ok(HttpResponseOk(switch.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List physical disks attached to sleds @@ -5568,13 +6231,13 @@ async fn switch_view( tags = ["system/hardware"], }] async fn sled_physical_disk_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -5594,7 +6257,11 @@ async fn sled_physical_disk_list( &|_, disk: &PhysicalDisk| disk.identity.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Metrics @@ -5635,7 +6302,7 @@ struct SystemMetricsPathParam { tags = ["system/metrics"], }] async fn system_metric( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, pag_params: Query< PaginationParams, @@ -5644,7 +6311,7 @@ async fn system_metric( ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let metric_name = path_params.into_inner().metric_name; let pagination = pag_params.into_inner(); let limit = rqctx.page_limit(&pagination)?; @@ -5667,7 +6334,11 @@ async fn system_metric( Ok(HttpResponseOk(result)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// View metrics @@ -5679,7 +6350,7 @@ async fn system_metric( tags = ["metrics"], }] async fn silo_metric( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, pag_params: Query< PaginationParams, @@ -5688,7 +6359,7 @@ async fn silo_metric( ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let metric_name = path_params.into_inner().metric_name; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -5716,7 +6387,11 @@ async fn silo_metric( Ok(HttpResponseOk(result)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List timeseries schemas @@ -5726,13 +6401,13 @@ async fn silo_metric( tags = ["metrics"], }] async fn timeseries_schema_list( - rqctx: RequestContext>, + rqctx: RequestContext, pag_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let pagination = pag_params.into_inner(); let limit = rqctx.page_limit(&pagination)?; @@ -5742,7 +6417,11 @@ async fn timeseries_schema_list( .map(HttpResponseOk) .map_err(HttpError::from) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // TODO: can we link to an OxQL reference? Do we have one? Can we even do links? @@ -5756,12 +6435,12 @@ async fn timeseries_schema_list( tags = ["metrics"], }] async fn timeseries_query( - rqctx: RequestContext>, + rqctx: RequestContext, body: TypedBody, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let query = body.into_inner().query; nexus @@ -5770,7 +6449,11 @@ async fn timeseries_query( .map(HttpResponseOk) .map_err(HttpError::from) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Updates @@ -5783,12 +6466,12 @@ async fn timeseries_query( unpublished = true, }] async fn system_update_put_repository( - rqctx: RequestContext>, + rqctx: RequestContext, query: Query, body: StreamingBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let query = query.into_inner(); @@ -5797,7 +6480,11 @@ async fn system_update_put_repository( nexus.updates_put_repository(&opctx, body, query.file_name).await?; Ok(HttpResponseOk(update)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch TUF repository description @@ -5810,11 +6497,11 @@ async fn system_update_put_repository( unpublished = true, }] async fn system_update_get_repository( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let params = path_params.into_inner(); @@ -5824,7 +6511,11 @@ async fn system_update_get_repository( description: description.into_external(), })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Silo users @@ -5836,12 +6527,12 @@ async fn system_update_get_repository( tags = ["silos"], }] async fn user_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -5865,7 +6556,11 @@ async fn user_list( &|_, user: &User| user.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Silo groups @@ -5877,11 +6572,11 @@ async fn user_list( tags = ["silos"], }] async fn group_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; let handler = async { @@ -5898,7 +6593,11 @@ async fn group_list( &|_, group: &Group| group.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch group @@ -5908,19 +6607,23 @@ async fn group_list( tags = ["silos"], }] async fn group_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let (.., group) = nexus.silo_group_lookup(&opctx, &path.group_id).fetch().await?; Ok(HttpResponseOk(group.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Built-in (system) users @@ -5932,11 +6635,11 @@ async fn group_view( tags = ["system/silos"], }] async fn user_builtin_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?.map_name(|n| Name::ref_cast(n)); @@ -5954,7 +6657,11 @@ async fn user_builtin_list( &marker_for_name, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch built-in user @@ -5964,19 +6671,23 @@ async fn user_builtin_list( tags = ["system/silos"], }] async fn user_builtin_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let user_selector = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let (.., user) = nexus.user_builtin_lookup(&opctx, &user_selector)?.fetch().await?; Ok(HttpResponseOk(user.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Built-in roles @@ -6002,11 +6713,11 @@ struct RolePathParam { tags = ["roles"], }] async fn role_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -6036,7 +6747,11 @@ async fn role_list( |role: &Role, _| RolePage { last_seen: role.name.to_string() }, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch built-in role @@ -6046,11 +6761,11 @@ async fn role_list( tags = ["roles"], }] async fn role_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let role_name = &path.role_name; let handler = async { @@ -6058,7 +6773,11 @@ async fn role_view( let role = nexus.role_builtin_fetch(&opctx, &role_name).await?; Ok(HttpResponseOk(role.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Current user @@ -6070,10 +6789,10 @@ async fn role_view( tags = ["session"], }] pub(crate) async fn current_user_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let user = nexus.silo_user_fetch_self(&opctx).await?; @@ -6083,7 +6802,11 @@ pub(crate) async fn current_user_view( silo_name: silo.name().clone(), })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch current user's groups @@ -6093,13 +6816,13 @@ pub(crate) async fn current_user_view( tags = ["session"], }] pub(crate) async fn current_user_groups( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let groups = nexus .silo_user_fetch_groups_for_self( @@ -6116,7 +6839,11 @@ pub(crate) async fn current_user_groups( &|_, group: &views::Group| group.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Per-user SSH public keys @@ -6130,13 +6857,13 @@ pub(crate) async fn current_user_groups( tags = ["session"], }] async fn current_user_ssh_key_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -6157,7 +6884,11 @@ async fn current_user_ssh_key_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create SSH public key @@ -6169,13 +6900,13 @@ async fn current_user_ssh_key_list( tags = ["session"], }] async fn current_user_ssh_key_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_key: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let &actor = opctx .authn .actor_required() @@ -6185,7 +6916,11 @@ async fn current_user_ssh_key_create( .await?; Ok(HttpResponseCreated(ssh_key.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch SSH public key @@ -6197,13 +6932,13 @@ async fn current_user_ssh_key_create( tags = ["session"], }] async fn current_user_ssh_key_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let &actor = opctx .authn @@ -6219,7 +6954,11 @@ async fn current_user_ssh_key_view( assert_eq!(silo_user.id(), actor.actor_id()); Ok(HttpResponseOk(ssh_key.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete SSH public key @@ -6231,13 +6970,13 @@ async fn current_user_ssh_key_view( tags = ["session"], }] async fn current_user_ssh_key_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let &actor = opctx .authn @@ -6251,7 +6990,11 @@ async fn current_user_ssh_key_delete( nexus.ssh_key_delete(&opctx, actor.actor_id(), &ssh_key_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List instrumentation probes @@ -6261,7 +7004,7 @@ async fn current_user_ssh_key_delete( tags = ["system/probes"], }] async fn probe_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); @@ -6269,7 +7012,7 @@ async fn probe_list( let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -6289,7 +7032,11 @@ async fn probe_list( }, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// View instrumentation probe @@ -6299,7 +7046,7 @@ async fn probe_list( tags = ["system/probes"], }] async fn probe_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { @@ -6308,7 +7055,7 @@ async fn probe_view( let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let project_selector = query_params.into_inner(); let project_lookup = nexus.project_lookup(&opctx, project_selector)?; @@ -6316,7 +7063,11 @@ async fn probe_view( nexus.probe_get(&opctx, &project_lookup, &path.probe).await?; Ok(HttpResponseOk(probe)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create instrumentation probe @@ -6326,7 +7077,7 @@ async fn probe_view( tags = ["system/probes"], }] async fn probe_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_probe: TypedBody, ) -> Result, HttpError> { @@ -6335,7 +7086,7 @@ async fn probe_create( let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let new_probe_params = &new_probe.into_inner(); let project_selector = query_params.into_inner(); let project_lookup = nexus.project_lookup(&opctx, project_selector)?; @@ -6344,7 +7095,11 @@ async fn probe_create( .await?; Ok(HttpResponseCreated(probe.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete instrumentation probe @@ -6354,7 +7109,7 @@ async fn probe_create( tags = ["system/probes"], }] async fn probe_delete( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result { @@ -6363,14 +7118,18 @@ async fn probe_delete( let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let project_selector = query_params.into_inner(); let project_lookup = nexus.project_lookup(&opctx, project_selector)?; nexus.probe_delete(&opctx, &project_lookup, path.probe).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[cfg(test)] diff --git a/nexus/src/internal_api/http_entrypoints.rs b/nexus/src/internal_api/http_entrypoints.rs index 81b63d0d89..ceafe7f103 100644 --- a/nexus/src/internal_api/http_entrypoints.rs +++ b/nexus/src/internal_api/http_entrypoints.rs @@ -5,7 +5,7 @@ //! Handler functions (entrypoints) for HTTP APIs internal to the control plane use super::params::{OximeterInfo, RackInitializationRequest}; -use crate::ServerContext; +use crate::context::ApiContext; use dropshot::endpoint; use dropshot::ApiDescription; use dropshot::FreeformBody; @@ -60,10 +60,9 @@ use serde::Deserialize; use serde::Serialize; use std::collections::BTreeMap; use std::collections::BTreeSet; -use std::sync::Arc; use uuid::Uuid; -type NexusApiDescription = ApiDescription>; +type NexusApiDescription = ApiDescription; /// Returns a description of the internal nexus API pub(crate) fn internal_api() -> NexusApiDescription { @@ -134,10 +133,10 @@ struct SledAgentPathParam { path = "/sled-agents/{sled_id}", }] async fn sled_agent_get( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let path = path_params.into_inner(); @@ -155,11 +154,11 @@ async fn sled_agent_get( path = "/sled-agents/{sled_id}", }] async fn sled_agent_put( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, sled_info: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let path = path_params.into_inner(); @@ -182,10 +181,10 @@ async fn sled_agent_put( path = "/sled-agents/{sled_id}/firewall-rules-update", }] async fn sled_firewall_rules_request( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let path = path_params.into_inner(); @@ -211,11 +210,11 @@ struct RackPathParam { path = "/racks/{rack_id}/initialization-complete", }] async fn rack_initialization_complete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, info: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); let request = info.into_inner(); @@ -237,11 +236,11 @@ struct SwitchPathParam { path = "/switch/{switch_id}", }] async fn switch_put( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, body: TypedBody, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -264,11 +263,11 @@ struct InstancePathParam { path = "/instances/{instance_id}", }] async fn cpapi_instances_put( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, new_runtime_state: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); let new_state = new_runtime_state.into_inner(); @@ -294,11 +293,11 @@ struct DiskPathParam { path = "/disks/{disk_id}", }] async fn cpapi_disks_put( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, new_runtime_state: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); let new_state = new_runtime_state.into_inner(); @@ -329,10 +328,10 @@ struct VolumePathParam { path = "/volume/{volume_id}/remove-read-only-parent", }] async fn cpapi_volume_remove_read_only_parent( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -354,10 +353,10 @@ async fn cpapi_volume_remove_read_only_parent( path = "/disk/{disk_id}/remove-read-only-parent", }] async fn cpapi_disk_remove_read_only_parent( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -375,10 +374,10 @@ async fn cpapi_disk_remove_read_only_parent( path = "/metrics/producers", }] async fn cpapi_producers_post( - request_context: RequestContext>, + request_context: RequestContext, producer_info: TypedBody, ) -> Result, HttpError> { - let context = request_context.context(); + let context = &request_context.context().context; let handler = async { let nexus = &context.nexus; let producer_info = producer_info.into_inner(); @@ -413,11 +412,11 @@ pub struct CollectorIdPathParams { path = "/metrics/collectors/{collector_id}/producers", }] async fn cpapi_assigned_producers_list( - request_context: RequestContext>, + request_context: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { - let context = request_context.context(); + let context = &request_context.context().context; let handler = async { let nexus = &context.nexus; let collector_id = path_params.into_inner().collector_id; @@ -446,10 +445,10 @@ async fn cpapi_assigned_producers_list( path = "/metrics/collectors", }] async fn cpapi_collectors_post( - request_context: RequestContext>, + request_context: RequestContext, oximeter_info: TypedBody, ) -> Result { - let context = request_context.context(); + let context = &request_context.context().context; let handler = async { let nexus = &context.nexus; let oximeter_info = oximeter_info.into_inner(); @@ -470,10 +469,10 @@ async fn cpapi_collectors_post( path = "/artifacts/{kind}/{name}/{version}", }] async fn cpapi_artifact_download( - request_context: RequestContext>, + request_context: RequestContext, path_params: Path, ) -> Result, HttpError> { - let context = request_context.context(); + let context = &request_context.context().context; let nexus = &context.nexus; let opctx = crate::context::op_context_for_internal_api(&request_context).await; @@ -497,11 +496,11 @@ struct UpstairsPathParam { path = "/crucible/0/upstairs/{upstairs_id}/repair-start", }] async fn cpapi_upstairs_repair_start( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, repair_start_info: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -525,11 +524,11 @@ async fn cpapi_upstairs_repair_start( path = "/crucible/0/upstairs/{upstairs_id}/repair-finish", }] async fn cpapi_upstairs_repair_finish( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, repair_finish_info: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -560,11 +559,11 @@ struct UpstairsRepairPathParam { path = "/crucible/0/upstairs/{upstairs_id}/repair/{repair_id}/progress", }] async fn cpapi_upstairs_repair_progress( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, repair_progress: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -597,11 +596,11 @@ struct UpstairsDownstairsPathParam { path = "/crucible/0/upstairs/{upstairs_id}/downstairs/{downstairs_id}/stop-request", }] async fn cpapi_downstairs_client_stop_request( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, downstairs_client_stop_request: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -627,11 +626,11 @@ async fn cpapi_downstairs_client_stop_request( path = "/crucible/0/upstairs/{upstairs_id}/downstairs/{downstairs_id}/stopped", }] async fn cpapi_downstairs_client_stopped( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, downstairs_client_stopped: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -658,10 +657,10 @@ async fn cpapi_downstairs_client_stopped( path = "/sagas", }] async fn saga_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let nexus = &apictx.nexus; let query = query_params.into_inner(); @@ -690,10 +689,10 @@ struct SagaPathParam { path = "/sagas/{saga_id}", }] async fn saga_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -715,9 +714,9 @@ async fn saga_view( path = "/bgtasks", }] async fn bgtask_list( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result>, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let nexus = &apictx.nexus; let opctx = crate::context::op_context_for_internal_api(&rqctx).await; @@ -747,10 +746,10 @@ struct BackgroundTasksActivateRequest { path = "/bgtasks/view/{bgtask_name}", }] async fn bgtask_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -768,10 +767,10 @@ async fn bgtask_view( path = "/bgtasks/activate", }] async fn bgtask_activate( - rqctx: RequestContext>, + rqctx: RequestContext, body: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -810,11 +809,11 @@ struct RpwNatQueryParam { path = "/nat/ipv4/changeset/{from_gen}" }] async fn ipv4_nat_changeset( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -843,10 +842,10 @@ async fn ipv4_nat_changeset( path = "/deployment/blueprints/all", }] async fn blueprint_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let nexus = &apictx.nexus; let query = query_params.into_inner(); @@ -869,10 +868,10 @@ async fn blueprint_list( path = "/deployment/blueprints/all/{blueprint_id}", }] async fn blueprint_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -889,10 +888,10 @@ async fn blueprint_view( path = "/deployment/blueprints/all/{blueprint_id}", }] async fn blueprint_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -911,9 +910,9 @@ async fn blueprint_delete( path = "/deployment/blueprints/target", }] async fn blueprint_target_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -929,10 +928,10 @@ async fn blueprint_target_view( path = "/deployment/blueprints/target", }] async fn blueprint_target_set( - rqctx: RequestContext>, + rqctx: RequestContext, target: TypedBody, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -949,10 +948,10 @@ async fn blueprint_target_set( path = "/deployment/blueprints/target/enabled", }] async fn blueprint_target_set_enabled( - rqctx: RequestContext>, + rqctx: RequestContext, target: TypedBody, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -972,9 +971,9 @@ async fn blueprint_target_set_enabled( path = "/deployment/blueprints/regenerate", }] async fn blueprint_regenerate( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -992,10 +991,10 @@ async fn blueprint_regenerate( path = "/deployment/blueprints/import", }] async fn blueprint_import( - rqctx: RequestContext>, + rqctx: RequestContext, blueprint: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -1012,9 +1011,9 @@ async fn blueprint_import( path = "/sleds/uninitialized", }] async fn sled_list_uninitialized( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result>, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let nexus = &apictx.nexus; let opctx = crate::context::op_context_for_internal_api(&rqctx).await; @@ -1040,10 +1039,10 @@ pub struct SledId { path = "/sleds/add", }] async fn sled_add( - rqctx: RequestContext>, + rqctx: RequestContext, sled: TypedBody, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; @@ -1064,10 +1063,10 @@ async fn sled_add( path = "/sleds/expunge", }] async fn sled_expunge( - rqctx: RequestContext>, + rqctx: RequestContext, sled: TypedBody, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; @@ -1090,11 +1089,11 @@ struct ProbePathParam { path = "/probes/{sled}" }] async fn probes_get( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let query = query_params.into_inner(); let path = path_params.into_inner(); diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index e34b694e52..e1b327de91 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -20,6 +20,7 @@ mod saga_interface; pub use app::test_interfaces::TestInterfaces; pub use app::Nexus; +use context::ApiContext; use context::ServerContext; use dropshot::ConfigDropshot; use external_api::http_entrypoints::external_api; @@ -77,11 +78,10 @@ pub fn run_openapi_internal() -> Result<(), String> { /// A partially-initialized Nexus server, which exposes an internal interface, /// but is not ready to receive external requests. pub struct InternalServer { - /// shared state used by API request handlers - apictx: Arc, + /// Shared server state. + apictx: ApiContext, /// dropshot server for internal API - http_server_internal: dropshot::HttpServer>, - + http_server_internal: dropshot::HttpServer, config: NexusConfig, log: Logger, } @@ -97,31 +97,39 @@ impl InternalServer { let ctxlog = log.new(o!("component" => "ServerContext")); - let apictx = - ServerContext::new(config.deployment.rack_id, ctxlog, &config) - .await?; + let context = ApiContext::for_internal( + config.deployment.rack_id, + ctxlog, + &config, + ) + .await?; // Launch the internal server. let server_starter_internal = dropshot::HttpServerStarter::new( &config.deployment.dropshot_internal, internal_api(), - Arc::clone(&apictx), + context.clone(), &log.new(o!("component" => "dropshot_internal")), ) .map_err(|error| format!("initializing internal server: {}", error))?; let http_server_internal = server_starter_internal.start(); - Ok(Self { apictx, http_server_internal, config: config.clone(), log }) + Ok(Self { + apictx: context, + http_server_internal, + config: config.clone(), + log, + }) } } -type DropshotServer = dropshot::HttpServer>; +type DropshotServer = dropshot::HttpServer; /// Packages up a [`Nexus`], running both external and internal HTTP API servers /// wired up to Nexus pub struct Server { /// shared state used by API request handlers - apictx: Arc, + apictx: ApiContext, } impl Server { @@ -132,16 +140,17 @@ impl Server { let config = internal.config; // Wait until RSS handoff completes. - let opctx = apictx.nexus.opctx_for_service_balancer(); - apictx.nexus.await_rack_initialization(&opctx).await; + let opctx = apictx.context.nexus.opctx_for_service_balancer(); + apictx.context.nexus.await_rack_initialization(&opctx).await; // While we've started our internal server, we need to wait until we've // definitely implemented our source IP allowlist for making requests to // the external server we're about to start. - apictx.nexus.await_ip_allowlist_plumbing().await; + apictx.context.nexus.await_ip_allowlist_plumbing().await; // Launch the external server. let tls_config = apictx + .context .nexus .external_tls_config(config.deployment.dropshot_external.tls) .await; @@ -167,7 +176,7 @@ impl Server { dropshot::HttpServerStarter::new_with_tls( &config.deployment.dropshot_external.dropshot, external_api(), - Arc::clone(&apictx), + apictx.for_external(), &log.new(o!("component" => "dropshot_external")), tls_config.clone().map(dropshot::ConfigTls::Dynamic), ) @@ -181,7 +190,7 @@ impl Server { dropshot::HttpServerStarter::new_with_tls( &techport_server_config, external_api(), - Arc::clone(&apictx), + apictx.for_techport(), &log.new(o!("component" => "dropshot_external_techport")), tls_config.map(dropshot::ConfigTls::Dynamic), ) @@ -195,11 +204,12 @@ impl Server { // metric data. let producer_server = start_producer_server( &log, - &apictx.producer_registry, + &apictx.context.producer_registry, http_server_internal.local_addr(), )?; apictx + .context .nexus .set_servers( http_server_external, @@ -212,8 +222,8 @@ impl Server { Ok(server) } - pub fn apictx(&self) -> &Arc { - &self.apictx + pub fn server_context(&self) -> &Arc { + &self.apictx.context } /// Wait for the given server to shut down @@ -222,7 +232,7 @@ impl Server { /// immediately after calling `start()`, the program will block indefinitely /// or until something else initiates a graceful shutdown. pub(crate) async fn wait_for_finish(self) -> Result<(), String> { - self.apictx.nexus.wait_for_shutdown().await + self.server_context().nexus.wait_for_shutdown().await } } @@ -236,7 +246,7 @@ impl nexus_test_interface::NexusServer for Server { ) -> (InternalServer, SocketAddr) { let internal_server = InternalServer::start(config, &log).await.unwrap(); - internal_server.apictx.nexus.wait_for_populate().await.unwrap(); + internal_server.apictx.context.nexus.wait_for_populate().await.unwrap(); let addr = internal_server.http_server_internal.local_addr(); (internal_server, addr) } @@ -259,7 +269,8 @@ impl nexus_test_interface::NexusServer for Server { // Perform the "handoff from RSS". // // However, RSS isn't running, so we'll do the handoff ourselves. - let opctx = internal_server.apictx.nexus.opctx_for_internal_api(); + let opctx = + internal_server.apictx.context.nexus.opctx_for_internal_api(); // Allocation of the initial Nexus's external IP is a little funny. In // a real system, it'd be allocated by RSS and provided with the rack @@ -290,6 +301,7 @@ impl nexus_test_interface::NexusServer for Server { internal_server .apictx + .context .nexus .rack_initialize( &opctx, @@ -332,7 +344,7 @@ impl nexus_test_interface::NexusServer for Server { // Historically, tests have assumed that there's only one provisionable // sled, and that's convenient for a lot of purposes. Mark our second // sled non-provisionable. - let nexus = &rv.apictx().nexus; + let nexus = &rv.server_context().nexus; nexus .sled_set_provision_policy( &opctx, @@ -349,11 +361,15 @@ impl nexus_test_interface::NexusServer for Server { } async fn get_http_server_external_address(&self) -> SocketAddr { - self.apictx.nexus.get_external_server_address().await.unwrap() + self.apictx.context.nexus.get_external_server_address().await.unwrap() + } + + async fn get_http_server_techport_address(&self) -> SocketAddr { + self.apictx.context.nexus.get_techport_server_address().await.unwrap() } async fn get_http_server_internal_address(&self) -> SocketAddr { - self.apictx.nexus.get_internal_server_address().await.unwrap() + self.apictx.context.nexus.get_internal_server_address().await.unwrap() } async fn upsert_crucible_dataset( @@ -363,8 +379,9 @@ impl nexus_test_interface::NexusServer for Server { dataset_id: Uuid, address: SocketAddrV6, ) { - let opctx = self.apictx.nexus.opctx_for_internal_api(); + let opctx = self.apictx.context.nexus.opctx_for_internal_api(); self.apictx + .context .nexus .upsert_physical_disk(&opctx, physical_disk) .await @@ -372,9 +389,10 @@ impl nexus_test_interface::NexusServer for Server { let zpool_id = zpool.id; - self.apictx.nexus.upsert_zpool(&opctx, zpool).await.unwrap(); + self.apictx.context.nexus.upsert_zpool(&opctx, zpool).await.unwrap(); self.apictx + .context .nexus .upsert_dataset( dataset_id, @@ -389,7 +407,7 @@ impl nexus_test_interface::NexusServer for Server { async fn inventory_collect_and_get_latest_collection( &self, ) -> Result, Error> { - let nexus = &self.apictx.nexus; + let nexus = &self.apictx.context.nexus; nexus.activate_inventory_collection(); @@ -399,6 +417,7 @@ impl nexus_test_interface::NexusServer for Server { async fn close(mut self) { self.apictx + .context .nexus .close_servers() .await diff --git a/nexus/test-interface/src/lib.rs b/nexus/test-interface/src/lib.rs index 2c7f0989ea..06c5570b7b 100644 --- a/nexus/test-interface/src/lib.rs +++ b/nexus/test-interface/src/lib.rs @@ -68,6 +68,7 @@ pub trait NexusServer: Send + Sync + 'static { ) -> Self; async fn get_http_server_external_address(&self) -> SocketAddr; + async fn get_http_server_techport_address(&self) -> SocketAddr; async fn get_http_server_internal_address(&self) -> SocketAddr; // Previously, as a dataset was created (within the sled agent), diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 23d84ee702..8bbb6ef38c 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -104,6 +104,7 @@ pub const TEST_SUITE_PASSWORD: &str = "oxide"; pub struct ControlPlaneTestContext { pub start_time: chrono::DateTime, pub external_client: ClientTestContext, + pub techport_client: ClientTestContext, pub internal_client: ClientTestContext, pub server: N, pub database: dev::db::CockroachInstance, @@ -257,6 +258,7 @@ pub struct ControlPlaneTestContextBuilder<'a, N: NexusServer> { pub logctx: LogContext, pub external_client: Option, + pub techport_client: Option, pub internal_client: Option, pub server: Option, @@ -307,6 +309,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { start_time, logctx, external_client: None, + techport_client: None, internal_client: None, server: None, database: None, @@ -832,6 +835,8 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { let external_server_addr = server.get_http_server_external_address().await; + let techport_external_server_addr = + server.get_http_server_techport_address().await; let internal_server_addr = server.get_http_server_internal_address().await; let testctx_external = ClientTestContext::new( @@ -840,6 +845,12 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .log .new(o!("component" => "external client test context")), ); + let testctx_techport = ClientTestContext::new( + techport_external_server_addr, + self.logctx.log.new( + o!("component" => "techport external client test context"), + ), + ); let testctx_internal = ClientTestContext::new( internal_server_addr, self.logctx @@ -849,6 +860,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { self.external_dns_zone_name = Some(external_dns_zone_name); self.external_client = Some(testctx_external); + self.techport_client = Some(testctx_techport); self.internal_client = Some(testctx_internal); self.silo_name = Some(silo_name); self.user_name = Some(user_name); @@ -1086,6 +1098,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { start_time: self.start_time, server: self.server.unwrap(), external_client: self.external_client.unwrap(), + techport_client: self.techport_client.unwrap(), internal_client: self.internal_client.unwrap(), database: self.database.unwrap(), clickhouse: self.clickhouse.unwrap(), diff --git a/nexus/tests/integration_tests/allow_list.rs b/nexus/tests/integration_tests/allow_list.rs index fde0fe5db7..dc206843f7 100644 --- a/nexus/tests/integration_tests/allow_list.rs +++ b/nexus/tests/integration_tests/allow_list.rs @@ -104,7 +104,7 @@ async fn test_allow_list(cptestctx: &ControlPlaneTestContext) { let addrs = vec![IpNet::single(IpAddr::V4(Ipv4Addr::new(1, 1, 1, 1)))]; let allowed_ips = AllowedSourceIps::try_from(addrs.clone()) .expect("Expected a valid IP list"); - let new_list = params::AllowListUpdate { allowed_ips }; + let new_list = params::AllowListUpdate { allowed_ips: allowed_ips.clone() }; let err: dropshot::HttpErrorResponseBody = NexusRequest::expect_failure_with_body( client, @@ -122,4 +122,9 @@ async fn test_allow_list(cptestctx: &ControlPlaneTestContext) { assert!(err .message .contains("would prevent access from the current client")); + + // But we _should_ be able to make this self-defeating request through the + // techport proxy server. + let client = &cptestctx.techport_client; + update_list_and_compare(client, allowed_ips).await; } diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index c74cfb5c50..886504a83b 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -191,7 +191,7 @@ async fn test_disk_create_attach_detach_delete( let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; let project_id = create_project_and_pool(client).await; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let disks_url = get_disks_url(); // Create a disk. @@ -365,7 +365,7 @@ async fn test_disk_slot_assignment(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let disk_names = ["a", "b", "c", "d"]; let mut disks = Vec::new(); @@ -391,7 +391,7 @@ async fn test_disk_slot_assignment(cptestctx: &ControlPlaneTestContext) { get_disk_attach_url(&instance.identity.id.into()); async fn get_disk_slot(ctx: &ControlPlaneTestContext, disk_id: Uuid) -> u8 { - let apictx = &ctx.server.apictx(); + let apictx = &ctx.server.server_context(); let nexus = &apictx.nexus; let datastore = nexus.datastore(); let opctx = @@ -469,7 +469,7 @@ async fn test_disk_slot_assignment(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_disk_move_between_instances(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(&client).await; let disks_url = get_disks_url(); @@ -1043,7 +1043,7 @@ async fn test_disk_virtual_provisioning_collection( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let _test = DiskTest::new(&cptestctx).await; @@ -1251,7 +1251,7 @@ async fn test_disk_virtual_provisioning_collection_failed_delete( ) { // Confirm that there's no panic deleting a project if a disk deletion fails let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let disk_test = DiskTest::new(&cptestctx).await; @@ -1391,7 +1391,7 @@ async fn test_phantom_disk_rename(cptestctx: &ControlPlaneTestContext) { // faulted let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let _disk_test = DiskTest::new(&cptestctx).await; @@ -1512,7 +1512,7 @@ async fn test_phantom_disk_rename(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_disk_size_accounting(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); // Create three 10 GiB zpools, each with one dataset. @@ -1973,7 +1973,7 @@ async fn test_project_delete_disk_no_auth_idempotent( // Call project_delete_disk_no_auth twice, ensuring that the disk is either // there before deleting and not afterwards. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -2013,7 +2013,7 @@ async fn test_project_delete_disk_no_auth_idempotent( // Test allocating a single region #[nexus_test] async fn test_single_region_allocate(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -2085,7 +2085,7 @@ async fn test_single_region_allocate(cptestctx: &ControlPlaneTestContext) { async fn test_region_allocation_strategy_random_is_idempotent( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -2152,7 +2152,7 @@ async fn test_region_allocation_strategy_random_is_idempotent( async fn test_region_allocation_strategy_random_is_idempotent_arbitrary( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -2208,7 +2208,7 @@ async fn test_region_allocation_strategy_random_is_idempotent_arbitrary( async fn test_single_region_allocate_for_replace( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -2297,7 +2297,7 @@ async fn test_single_region_allocate_for_replace( async fn test_single_region_allocate_for_replace_not_enough_zpools( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -2387,7 +2387,7 @@ async fn test_single_region_allocate_for_replace_not_enough_zpools( async fn test_region_allocation_after_delete( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); diff --git a/nexus/tests/integration_tests/external_ips.rs b/nexus/tests/integration_tests/external_ips.rs index 9d7ef34b35..396edddc41 100644 --- a/nexus/tests/integration_tests/external_ips.rs +++ b/nexus/tests/integration_tests/external_ips.rs @@ -628,7 +628,7 @@ async fn test_floating_ip_create_attachment( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; create_default_ip_pool(&client).await; @@ -725,7 +725,7 @@ async fn test_external_ip_live_attach_detach( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; create_default_ip_pool(&client).await; @@ -934,7 +934,7 @@ async fn test_floating_ip_attach_fail_between_projects( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let _nexus = &apictx.nexus; create_default_ip_pool(&client).await; @@ -1009,7 +1009,7 @@ async fn test_external_ip_attach_fail_if_in_use_by_other( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; create_default_ip_pool(&client).await; diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 0b5947ef7e..7ad52b9919 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -280,7 +280,7 @@ async fn test_instances_create_reboot_halt( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "just-rainsticks"; @@ -585,7 +585,7 @@ async fn test_instance_start_creates_networking_state( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "series-of-tubes"; @@ -688,7 +688,7 @@ async fn test_instance_start_creates_networking_state( #[nexus_test] async fn test_instance_migrate(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "bird-ecology"; @@ -784,7 +784,7 @@ async fn test_instance_migrate(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_instance_migrate_v2p(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let datastore = nexus.datastore(); let opctx = @@ -936,7 +936,7 @@ async fn test_instance_failed_after_sled_agent_error( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "losing-is-fun"; @@ -1063,7 +1063,7 @@ async fn assert_metrics( #[nexus_test] async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let datastore = nexus.datastore(); @@ -1143,14 +1143,14 @@ async fn test_instance_metrics_with_migration( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "bird-ecology"; // Wait until Nexus registers as a producer with Oximeter. wait_for_producer( &cptestctx.oximeter, - cptestctx.server.apictx().nexus.id(), + cptestctx.server.server_context().nexus.id(), ) .await; @@ -1276,7 +1276,7 @@ async fn test_instances_create_stopped_start( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "just-rainsticks"; @@ -1327,7 +1327,7 @@ async fn test_instances_delete_fails_when_running_succeeds_when_stopped( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "just-rainsticks"; @@ -1849,7 +1849,7 @@ async fn test_instance_create_delete_network_interface( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let instance_name = "nic-attach-test-inst"; create_project_and_pool(&client).await; @@ -2090,7 +2090,7 @@ async fn test_instance_update_network_interfaces( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let instance_name = "nic-update-test-inst"; create_project_and_pool(&client).await; @@ -2710,7 +2710,7 @@ async fn test_instance_create_attach_disks_undo( let faulted_disk = create_disk(&client, PROJECT_NAME, "faulted-disk").await; // set `faulted_disk` to the faulted state - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; assert!(nexus .set_disk_as_faulted(&faulted_disk.identity.id) @@ -2971,7 +2971,7 @@ async fn test_cannot_attach_faulted_disks(cptestctx: &ControlPlaneTestContext) { assert_eq!(disks.len(), 8); // Set the 7th to FAULTED - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; assert!(nexus.set_disk_as_faulted(&disks[6].identity.id).await.unwrap()); @@ -3129,7 +3129,7 @@ async fn test_disks_detached_when_instance_destroyed( // sled. let instance_url = format!("/v1/instances/nfs?project={}", PROJECT_NAME); let instance = instance_get(&client, &instance_url).await; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let sa = nexus .instance_sled_by_id(&instance.identity.id) @@ -3656,7 +3656,7 @@ async fn test_cannot_provision_instance_beyond_cpu_capacity( // Make the started instance transition to Running, shut it down, and verify // that the other reasonably-sized instance can now start. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; instance_simulate(nexus, &instances[1].identity.id).await; instances[1] = instance_post(client, configs[1].0, InstanceOp::Stop).await; instance_simulate(nexus, &instances[1].identity.id).await; @@ -3762,7 +3762,7 @@ async fn test_cannot_provision_instance_beyond_ram_capacity( // Make the started instance transition to Running, shut it down, and verify // that the other reasonably-sized instance can now start. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; instance_simulate(nexus, &instances[1].identity.id).await; instances[1] = instance_post(client, configs[1].0, InstanceOp::Stop).await; instance_simulate(nexus, &instances[1].identity.id).await; @@ -3772,7 +3772,7 @@ async fn test_cannot_provision_instance_beyond_ram_capacity( #[nexus_test] async fn test_instance_serial(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "kris-picks"; @@ -4052,7 +4052,7 @@ async fn stop_and_delete_instance( let client = &cptestctx.external_client; let instance = instance_post(&client, instance_name, InstanceOp::Stop).await; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; instance_simulate(nexus, &instance.identity.id).await; let url = format!("/v1/instances/{}?project={}", instance_name, PROJECT_NAME); @@ -4436,7 +4436,7 @@ async fn test_instance_create_in_silo(cptestctx: &ControlPlaneTestContext) { // Make sure the instance can actually start even though a collaborator // created it. - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let authn = AuthnMode::SiloUser(user_id); let instance_url = get_instance_url(instance_name); @@ -4533,7 +4533,7 @@ async fn test_instance_v2p_mappings(cptestctx: &ControlPlaneTestContext) { // Validate that every sled (except the instance's sled) now has a V2P // mapping for this instance - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let datastore = nexus.datastore(); let opctx = diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index cb5eade735..38cfd25844 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -791,7 +791,7 @@ async fn test_ip_pool_utilization_total(cptestctx: &ControlPlaneTestContext) { // allowed. It's worth doing because we want this code to correctly handle // IPv6 ranges when they are allowed again. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let log = cptestctx.logctx.log.new(o!()); let opctx = OpContext::for_tests(log, datastore.clone()); @@ -1147,7 +1147,7 @@ async fn test_ip_range_delete_with_allocated_external_ip_fails( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let ip_pools_url = "/v1/system/ip-pools"; let pool_name = "mypool"; diff --git a/nexus/tests/integration_tests/metrics.rs b/nexus/tests/integration_tests/metrics.rs index ec44c3747a..71d18f95ee 100644 --- a/nexus/tests/integration_tests/metrics.rs +++ b/nexus/tests/integration_tests/metrics.rs @@ -179,7 +179,7 @@ async fn test_metrics( // Wait until Nexus registers as a producer with Oximeter. wait_for_producer( &cptestctx.oximeter, - cptestctx.server.apictx().nexus.id(), + cptestctx.server.server_context().nexus.id(), ) .await; @@ -259,7 +259,7 @@ async fn test_timeseries_schema_list( // Nexus registers itself as a metric producer on startup, with its own UUID // as the producer ID. Wait for this to show up in the registered lists of // producers. - let nexus_id = cptestctx.server.apictx().nexus.id(); + let nexus_id = cptestctx.server.server_context().nexus.id(); wait_for_producer(&cptestctx.oximeter, nexus_id).await; // We should be able to fetch the list of timeseries, and it should include @@ -328,7 +328,7 @@ async fn test_instance_watcher_metrics( let client = &cptestctx.external_client; let internal_client = &cptestctx.internal_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; // TODO(eliza): consider factoring this out to a generic // `activate_background_task` function in `nexus-test-utils` eventually? @@ -445,7 +445,7 @@ async fn test_instance_watcher_metrics( // Wait until Nexus registers as a producer with Oximeter. wait_for_producer( &cptestctx.oximeter, - cptestctx.server.apictx().nexus.id(), + cptestctx.server.server_context().nexus.id(), ) .await; diff --git a/nexus/tests/integration_tests/pantry.rs b/nexus/tests/integration_tests/pantry.rs index 1a3908affa..c5d98709ac 100644 --- a/nexus/tests/integration_tests/pantry.rs +++ b/nexus/tests/integration_tests/pantry.rs @@ -393,7 +393,7 @@ async fn test_cannot_mount_import_ready_disk( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -424,7 +424,7 @@ async fn test_cannot_mount_import_from_bulk_writes_disk( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -448,7 +448,7 @@ async fn test_import_blocks_with_bulk_write( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -489,7 +489,7 @@ async fn test_import_blocks_with_bulk_write_with_snapshot( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -732,7 +732,7 @@ async fn test_cannot_bulk_write_start_attached_disk( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -762,7 +762,7 @@ async fn test_cannot_bulk_write_attached_disk( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -792,7 +792,7 @@ async fn test_cannot_bulk_write_stop_attached_disk( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -821,7 +821,7 @@ async fn test_cannot_finalize_attached_disk( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; diff --git a/nexus/tests/integration_tests/rack.rs b/nexus/tests/integration_tests/rack.rs index a6c218cea8..a7ebf0f8b6 100644 --- a/nexus/tests/integration_tests/rack.rs +++ b/nexus/tests/integration_tests/rack.rs @@ -35,14 +35,17 @@ async fn test_list_own_rack(cptestctx: &ControlPlaneTestContext) { .all_items; assert_eq!(1, racks.len()); - assert_eq!(cptestctx.server.apictx().nexus.rack_id(), racks[0].identity.id); + assert_eq!( + cptestctx.server.server_context().nexus.rack_id(), + racks[0].identity.id + ); } #[nexus_test] async fn test_get_own_rack(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let expected_id = cptestctx.server.apictx().nexus.rack_id(); + let expected_id = cptestctx.server.server_context().nexus.rack_id(); let rack_url = format!("/v1/system/hardware/racks/{}", expected_id); let rack = NexusRequest::object_get(client, &rack_url) .authn_as(AuthnMode::PrivilegedUser) diff --git a/nexus/tests/integration_tests/saml.rs b/nexus/tests/integration_tests/saml.rs index b1b0429c2e..80816f2ea2 100644 --- a/nexus/tests/integration_tests/saml.rs +++ b/nexus/tests/integration_tests/saml.rs @@ -91,7 +91,7 @@ async fn test_create_a_saml_idp(cptestctx: &ControlPlaneTestContext) { .await; // Assert external authenticator opctx can read it - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let (.., _retrieved_silo_nexus) = nexus .silo_lookup( &nexus.opctx_external_authn(), @@ -1167,7 +1167,7 @@ async fn test_post_saml_response(cptestctx: &ControlPlaneTestContext) { ) .await; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; nexus.set_samael_max_issue_delay( chrono::Utc::now() - "2022-05-04T15:36:12.631Z" @@ -1298,7 +1298,7 @@ async fn test_post_saml_response_with_relay_state( ) .await; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; nexus.set_samael_max_issue_delay( chrono::Utc::now() - "2022-05-04T15:36:12.631Z" diff --git a/nexus/tests/integration_tests/silo_users.rs b/nexus/tests/integration_tests/silo_users.rs index 099a186a2c..598d2a28a4 100644 --- a/nexus/tests/integration_tests/silo_users.rs +++ b/nexus/tests/integration_tests/silo_users.rs @@ -26,10 +26,10 @@ type ControlPlaneTestContext = #[nexus_test] async fn test_silo_group_users(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ); // we start out with the two default users diff --git a/nexus/tests/integration_tests/silos.rs b/nexus/tests/integration_tests/silos.rs index 6dfddb12e1..e95b2870ca 100644 --- a/nexus/tests/integration_tests/silos.rs +++ b/nexus/tests/integration_tests/silos.rs @@ -55,7 +55,7 @@ type ControlPlaneTestContext = #[nexus_test] async fn test_silos(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; // Verify that we cannot create a name with the same name as the recovery // Silo that was created during rack initialization. @@ -277,7 +277,7 @@ async fn test_silos(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_silo_admin_group(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let silo: Silo = object_create( client, @@ -523,7 +523,7 @@ async fn test_deleting_a_silo_deletes_the_idp( .expect("failed to make request"); // Expect that the silo is gone - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let response = IdentityProviderType::lookup( &nexus.datastore(), @@ -747,7 +747,7 @@ struct TestSiloUserProvisionTypes { #[nexus_test] async fn test_silo_user_provision_types(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let test_cases: Vec = vec![ @@ -844,7 +844,7 @@ async fn test_silo_user_fetch_by_external_id( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let silo = create_silo( &client, @@ -1026,7 +1026,7 @@ async fn test_silo_users_list(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_silo_groups_jit(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let silo = create_silo( @@ -1095,7 +1095,7 @@ async fn test_silo_groups_jit(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_silo_groups_fixed(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let silo = create_silo( &client, @@ -1156,7 +1156,7 @@ async fn test_silo_groups_remove_from_one_group( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let silo = create_silo( @@ -1269,7 +1269,7 @@ async fn test_silo_groups_remove_from_both_groups( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let silo = create_silo( @@ -1381,7 +1381,7 @@ async fn test_silo_groups_remove_from_both_groups( #[nexus_test] async fn test_silo_delete_clean_up_groups(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; // Create a silo let silo = create_silo( @@ -1463,7 +1463,7 @@ async fn test_silo_delete_clean_up_groups(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_ensure_same_silo_group(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; // Create a silo let silo = create_silo( @@ -1525,7 +1525,7 @@ async fn test_ensure_same_silo_group(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_silo_user_views(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); // Create the two Silos. let silo1 = @@ -1741,7 +1741,7 @@ async fn create_jit_user( #[nexus_test] async fn test_jit_silo_constraints(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let silo = create_silo(&client, "jit", true, shared::SiloIdentityMode::SamlJit) diff --git a/nexus/tests/integration_tests/sleds.rs b/nexus/tests/integration_tests/sleds.rs index bf1e2e4b99..97dbb39bc6 100644 --- a/nexus/tests/integration_tests/sleds.rs +++ b/nexus/tests/integration_tests/sleds.rs @@ -106,7 +106,7 @@ async fn test_physical_disk_create_list_delete( let disks_initial = physical_disks_list(&external_client, &disks_url).await; // Inject a disk into the database, observe it in the external API - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let sled_id = Uuid::from_str(&SLED_AGENT_UUID).unwrap(); let physical_disk = DbPhysicalDisk::new( diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index 058c59a501..3fb6f8f6ec 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -136,7 +136,7 @@ async fn test_snapshot_basic(cptestctx: &ControlPlaneTestContext) { .await; // cannot snapshot attached disk for instance in state starting - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; instance_simulate(nexus, &instance.identity.id).await; // Issue snapshot request @@ -362,7 +362,7 @@ async fn test_snapshot_stopped_instance(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); DiskTest::new(&cptestctx).await; let project_id = create_project_and_pool(client).await; @@ -521,7 +521,7 @@ async fn test_reject_creating_disk_from_snapshot( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let project_id = create_project_and_pool(&client).await; @@ -674,7 +674,7 @@ async fn test_reject_creating_disk_from_illegal_snapshot( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let project_id = create_project_and_pool(&client).await; @@ -770,7 +770,7 @@ async fn test_reject_creating_disk_from_other_project_snapshot( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let project_id = create_project_and_pool(&client).await; @@ -1002,7 +1002,7 @@ async fn test_create_snapshot_record_idempotent( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let project_id = create_project_and_pool(&client).await; @@ -1194,7 +1194,7 @@ async fn test_create_snapshot_record_idempotent( async fn test_region_snapshot_create_idempotent( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let region_snapshot = db::model::RegionSnapshot { @@ -1218,7 +1218,7 @@ async fn test_region_snapshot_create_idempotent( #[nexus_test] async fn test_multiple_deletes_not_sent(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); DiskTest::new(&cptestctx).await; let _project_id = create_project_and_pool(client).await; diff --git a/nexus/tests/integration_tests/subnet_allocation.rs b/nexus/tests/integration_tests/subnet_allocation.rs index d9d015bf26..0efc659890 100644 --- a/nexus/tests/integration_tests/subnet_allocation.rs +++ b/nexus/tests/integration_tests/subnet_allocation.rs @@ -91,8 +91,12 @@ async fn test_subnet_allocation(cptestctx: &ControlPlaneTestContext) { // Create a new, small VPC Subnet, so we don't need to issue many requests // to test address exhaustion. - let subnet_size = - cptestctx.server.apictx().nexus.tunables().max_vpc_ipv4_subnet_prefix; + let subnet_size = cptestctx + .server + .server_context() + .nexus + .tunables() + .max_vpc_ipv4_subnet_prefix; let vpc_selector = format!("project={}&vpc=default", project_name); let subnets_url = format!("/v1/vpc-subnets?{}", vpc_selector); let subnet_name = "small"; diff --git a/nexus/tests/integration_tests/volume_management.rs b/nexus/tests/integration_tests/volume_management.rs index ecfa7cf0f1..ae348e775d 100644 --- a/nexus/tests/integration_tests/volume_management.rs +++ b/nexus/tests/integration_tests/volume_management.rs @@ -1351,7 +1351,7 @@ async fn test_volume_remove_read_only_parent_base( ) { // Test the removal of a volume with a read only parent. // The ROP should end up on the t_vid volume. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); @@ -1465,7 +1465,7 @@ async fn test_volume_remove_read_only_parent_no_parent( ) { // Test the removal of a read only parent from a volume // without a read only parent. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); @@ -1483,7 +1483,7 @@ async fn test_volume_remove_read_only_parent_volume_not_volume( ) { // test removal of a read only volume for a volume that is not // of a type to have a read only parent. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); @@ -1512,7 +1512,7 @@ async fn test_volume_remove_read_only_parent_bad_volume( ) { // Test the removal of a read only parent from a volume // that does not exist - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); @@ -1528,7 +1528,7 @@ async fn test_volume_remove_read_only_parent_volume_deleted( cptestctx: &ControlPlaneTestContext, ) { // Test the removal of a read_only_parent from a deleted volume. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -1558,7 +1558,7 @@ async fn test_volume_remove_read_only_parent_volume_deleted( async fn test_volume_remove_rop_saga(cptestctx: &ControlPlaneTestContext) { // Test the saga for removal of a volume with a read only parent. // We create a volume with a read only parent, then call the saga on it. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); @@ -1621,7 +1621,7 @@ async fn test_volume_remove_rop_saga_twice( // Test calling the saga for removal of a volume with a read only parent // two times, the first will remove the read_only_parent, the second will // do nothing. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); @@ -1720,7 +1720,7 @@ async fn test_volume_remove_rop_saga_volume_not_volume( ) { // Test saga removal of a read only volume for a volume that is not // of a type to have a read only parent. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let volume_id = Uuid::new_v4(); let datastore = nexus.datastore(); @@ -1759,7 +1759,7 @@ async fn test_volume_remove_rop_saga_deleted_volume( ) { // Test that a saga removal of a read_only_parent from a deleted volume // takes no action on that deleted volume. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -1823,7 +1823,7 @@ async fn test_volume_remove_rop_saga_deleted_volume( async fn test_volume_checkout(cptestctx: &ControlPlaneTestContext) { // Verify that a volume_checkout will update the generation number in the // database when the volume type is Volume with sub_volume Region. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -1874,7 +1874,7 @@ async fn test_volume_checkout_updates_nothing( ) { // Verify that a volume_checkout will do nothing for a volume that does // not contain a sub_volume with a generation field. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -1927,7 +1927,7 @@ async fn test_volume_checkout_updates_multiple_gen( // Verify that a volume_checkout will update the generation number in the // database when the volume type is Volume with multiple sub_volumes of // type Region. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -1993,7 +1993,7 @@ async fn test_volume_checkout_updates_sparse_multiple_gen( // database when the volume type is Volume with multiple sub_volumes of // type Region and also verify that a non generation sub_volume won't be a // problem - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -2054,7 +2054,7 @@ async fn test_volume_checkout_updates_sparse_mid_multiple_gen( // database when the volume type is Volume with multiple sub_volumes of // type Region and also verify that a non generation sub_volume in the // middle of the sub_volumes won't be a problem - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -2113,7 +2113,7 @@ async fn test_volume_checkout_randomize_ids_only_read_only( ) { // Verify that a volume_checkout_randomize_ids will not work for // non-read-only Regions - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -2155,7 +2155,7 @@ async fn test_volume_checkout_randomize_ids_only_read_only( /// `[ipv6]:port` targets being reused. #[nexus_test] async fn test_keep_your_targets_straight(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); // Four zpools, one dataset each @@ -2646,7 +2646,7 @@ fn volume_match_gen( async fn test_volume_hard_delete_idempotent( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); diff --git a/nexus/tests/integration_tests/vpc_subnets.rs b/nexus/tests/integration_tests/vpc_subnets.rs index 76cff9ac79..0814512cf2 100644 --- a/nexus/tests/integration_tests/vpc_subnets.rs +++ b/nexus/tests/integration_tests/vpc_subnets.rs @@ -31,7 +31,7 @@ async fn test_delete_vpc_subnet_with_interfaces_fails( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; // Create a project that we'll use for testing. From 9d985e72717965e3f3388864556cc25550a01e38 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 14 May 2024 12:22:55 -0700 Subject: [PATCH 4/9] Update Rust crate prettyplease to 0.2.20 (#5760) --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 38cb54142e..f0ff8138e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6926,9 +6926,9 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ac2cf0f2e4f42b49f5ffd07dae8d746508ef7526c13940e5f524012ae6c6550" +checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", "syn 2.0.60", diff --git a/Cargo.toml b/Cargo.toml index f04fffff7d..19d2924a7e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -376,7 +376,7 @@ postgres-protocol = "0.6.6" predicates = "3.1.0" pretty_assertions = "1.4.0" pretty-hex = "0.4.1" -prettyplease = { version = "0.2.19", features = ["verbatim"] } +prettyplease = { version = "0.2.20", features = ["verbatim"] } proc-macro2 = "1.0" progenitor = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } progenitor-client = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } From a5e29992fc79970324974fcee7451fdc79c2510d Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Tue, 14 May 2024 15:59:08 -0400 Subject: [PATCH 5/9] Allow recommissioning of previously-decommissioned sleds (#5733) This is a pretty small delta on top of #5698 that I've been sitting on for a few days before getting a chance to test it on a4x2 this afternoon. I set up a4x2 with three sleds (g0, g1, g3), and went through 3.5 add/remove cycles on sled g2 (3 add/remove pairs and a 4th add). g2 is present in `omdb db sleds`: ``` root@oxz_switch:~# omdb db sleds SERIAL IP ROLE POLICY STATE ID g3 [fd00:1122:3344:103::1]:12345 scrimlet in service active 13b8d30d-b66d-4333-b156-9aa2527e130b g2 [fd00:1122:3344:124::1]:12345 - in service active 218e4e9c-0a27-4460-a65c-7e93bef531c4 g1 [fd00:1122:3344:102::1]:12345 - in service active 661e9e3e-0beb-43fe-9606-109b7145b258 g0 [fd00:1122:3344:101::1]:12345 scrimlet in service active 9f19235f-bfa4-4032-9cad-608448b4f1a0 ``` I added `SledFilter::Decommissioned` specifically for use with omdb, where we can see the 3 previous times `g2` was present (each with a different sled ID): ``` root@oxz_switch:~# omdb db sleds -F decommissioned SERIAL IP ROLE POLICY STATE ID g2 [fd00:1122:3344:121::1]:12345 - expunged decommissioned a7b09236-c8ba-4025-934d-b5f0539d9379 g2 [fd00:1122:3344:123::1]:12345 - expunged decommissioned ec0e81bf-6366-4273-97bc-47223334dc90 g2 [fd00:1122:3344:122::1]:12345 - expunged decommissioned f5f8ba44-9681-4e6f-84f2-fd654c83dd23 ``` The current blueprint shows history of all four times the sled has been present, because we don't currently prune expunged zones: ``` root@oxz_switch:~# omdb nexus blueprints show current blueprint 5bfb182e-df8a-41e5-a8a3-862bb42f8feb parent: 5b2823c8-ca41-46b5-9efd-ffaae0e6e028 ----------------------------------------------------------------------------------------------- zone type zone ID disposition underlay IP ----------------------------------------------------------------------------------------------- sled 13b8d30d-b66d-4333-b156-9aa2527e130b: blueprint zones at generation 5 clickhouse 9ee21438-4a2f-499a-b867-58993a36f2f0 in service fd00:1122:3344:103::6 cockroach_db b0f22c64-a8e7-4c83-af26-f30a0dc413d5 in service fd00:1122:3344:103::3 crucible 29f3db85-722f-41ed-a66c-66aa31a31591 in service fd00:1122:3344:103::b crucible 40317028-eedf-487f-88ae-821632f05f39 in service fd00:1122:3344:103::8 crucible 50153da9-da61-4cd9-829c-18129e5a6c52 in service fd00:1122:3344:103::9 crucible 765dc0e2-1850-43b1-ae08-531d51772f14 in service fd00:1122:3344:103::a crucible bcc91ace-f816-400d-9198-22ed20e00ca3 in service fd00:1122:3344:103::c crucible_pantry bf1c3443-fbf5-4e16-affb-ee4e2598cbcb in service fd00:1122:3344:103::7 external_dns ed00e862-094c-449b-bfdb-ddba26f36bb2 in service fd00:1122:3344:103::4 internal_dns 6b4f3315-ce80-4d89-a495-ff96c5f573cd in service fd00:1122:3344:3::1 internal_ntp 6b9d837b-9bb9-46f1-8f48-c0c8a7d89882 in service fd00:1122:3344:103::d nexus 327c035e-628e-4880-b434-8d77ca362f21 in service fd00:1122:3344:103::5 sled 218e4e9c-0a27-4460-a65c-7e93bef531c4: blueprint zones at generation 3 crucible 5a7167ff-04ef-47fd-93bc-2adc7c8a7087 in service fd00:1122:3344:124::26 crucible 9ca15af1-79dc-4248-86d1-5d7d25987609 in service fd00:1122:3344:124::23 crucible a80accc8-3fdd-42b2-8425-eb246a7f0ba0 in service fd00:1122:3344:124::22 crucible d80a6050-f2e3-4827-8fd0-37c5c88ac87d in service fd00:1122:3344:124::25 crucible e92e8930-6f81-4f09-a520-415ef896a05f in service fd00:1122:3344:124::24 internal_ntp a37275e8-eb02-4cb3-b216-a47a892313e7 in service fd00:1122:3344:124::21 sled 661e9e3e-0beb-43fe-9606-109b7145b258: blueprint zones at generation 5 boundary_ntp 8eb79dcb-b6a0-4a24-835e-cf9e55b12495 in service fd00:1122:3344:102::d cockroach_db 294868f1-d76b-4ef6-a87b-399ec06ba9a3 in service fd00:1122:3344:102::3 cockroach_db 373fa1d2-597c-4245-a87b-31a7f734c806 in service fd00:1122:3344:102::4 crucible 00611d19-0752-4e6f-a57a-43df54089987 in service fd00:1122:3344:102::c crucible 05697e11-b936-4319-b1a6-ba8e46c77988 in service fd00:1122:3344:102::b crucible 612ad32d-321d-4159-9265-4c0eca972e2c in service fd00:1122:3344:102::9 crucible c03f67ee-7f38-445f-b9a5-dcfd1feb976c in service fd00:1122:3344:102::a crucible e2e46038-8ecb-4fe0-be1a-94530842cd65 in service fd00:1122:3344:102::8 crucible_pantry 4fea76f9-d88e-43e9-ade7-2345dd77d2a7 in service fd00:1122:3344:102::7 internal_dns 504f14cf-42ea-4457-b922-fb400eea1495 in service fd00:1122:3344:2::1 nexus 8d3ba915-17f6-4c2f-a94d-31120e89f17b in service fd00:1122:3344:102::5 oximeter 4d98514a-a352-4ddc-996f-912958d9a80d in service fd00:1122:3344:102::6 sled 9f19235f-bfa4-4032-9cad-608448b4f1a0: blueprint zones at generation 5 boundary_ntp 603a1fe0-1fd1-4fc1-968c-af95e1400f2d in service fd00:1122:3344:101::d cockroach_db d0f66442-cc46-48b3-9770-d473517a81f5 in service fd00:1122:3344:101::3 cockroach_db ed701854-eb03-43eb-8a7c-5c1aea73cc51 in service fd00:1122:3344:101::4 crucible 56151b6f-fa7d-4459-9540-6b04460d2c64 in service fd00:1122:3344:101::8 crucible 8b37469e-2660-498e-bad7-9c9158b4fbae in service fd00:1122:3344:101::9 crucible b602ce7d-2b92-4e12-8b95-dd2c4cad3397 in service fd00:1122:3344:101::a crucible d87cae8c-d85d-4a25-b7d6-f7bbce50f6e9 in service fd00:1122:3344:101::b crucible ea549438-9531-491e-b85b-b92668184c9f in service fd00:1122:3344:101::c crucible_pantry 51e18a3d-a4ce-4ee1-a900-3f23444c9bfd in service fd00:1122:3344:101::7 external_dns 60f77e70-af63-4b4d-b3b2-62610f707d47 in service fd00:1122:3344:101::5 internal_dns d47c3ade-ae61-49b0-8bb7-217a825517de in service fd00:1122:3344:1::1 nexus a1df997e-3595-4f24-979c-76232ae16164 in service fd00:1122:3344:101::6 sled a7b09236-c8ba-4025-934d-b5f0539d9379: blueprint zones at generation 4 crucible 8aac1836-3d74-4d88-af41-b9e2fda8d3f7 expunged fd00:1122:3344:121::23 crucible a0468efe-3f76-4244-9d92-03a70608c777 expunged fd00:1122:3344:121::22 crucible afc45a35-0860-41d8-bac1-c06ac442fd43 expunged fd00:1122:3344:121::24 crucible b596d696-6521-4ebb-b0b4-22a772f5d1d0 expunged fd00:1122:3344:121::25 crucible b5f1851a-695f-4223-bc67-d30df30e77d0 expunged fd00:1122:3344:121::26 internal_ntp 219fdc9b-fe80-436a-9d81-c8f7bb3f0364 expunged fd00:1122:3344:121::21 sled ec0e81bf-6366-4273-97bc-47223334dc90: blueprint zones at generation 4 crucible 5fe4544e-d44b-45fe-bf99-505007654e97 expunged fd00:1122:3344:123::22 crucible 76f11abf-cfbf-41ee-972b-1c38c6babd22 expunged fd00:1122:3344:123::26 crucible 87821b5d-ed10-4787-ab24-6b7411bcc5d8 expunged fd00:1122:3344:123::25 crucible 8ceb3b5b-eb61-4195-b873-6dac8108fd73 expunged fd00:1122:3344:123::23 crucible c1d0cfb9-9489-4fce-a213-40fb5c1b7b0f expunged fd00:1122:3344:123::24 internal_ntp 86a61155-e468-4906-b0b6-d12fa2f3f4dd expunged fd00:1122:3344:123::21 sled f5f8ba44-9681-4e6f-84f2-fd654c83dd23: blueprint zones at generation 4 crucible 4c82a05d-bbc0-40b2-8b85-f688b56b2f7b expunged fd00:1122:3344:122::22 crucible 6b6a6770-f552-438d-bca0-b1d8bdd8c2ed expunged fd00:1122:3344:122::24 crucible 83eccc0d-afea-4e27-a906-3a629c2094d1 expunged fd00:1122:3344:122::23 crucible 8f6cea3e-5489-4665-a37b-c6336c0e54c8 expunged fd00:1122:3344:122::26 crucible bdd61a6c-50b8-450c-b963-443d77a1c7f4 expunged fd00:1122:3344:122::25 internal_ntp ab8b9602-a861-4985-809d-30e4724a63ab expunged fd00:1122:3344:122::21 METADATA: created by: a1df997e-3595-4f24-979c-76232ae16164 created at: 2024-05-10T19:09:09.547Z comment: sled 218e4e9c-0a27-4460-a65c-7e93bef531c4: add zones internal DNS version: 11 external DNS version: 2 ``` Also visible in that blueprint is evidence that we went through the "add NTP" -> "add crucible" reconfigurator steps. I did not go so far as to add a Nexus to g2 each time (it's considerably more manually intensive to do so, which we might need to address if that's a thing we want to test more thoroughly). This also fixes a bug in `allocate_sled_underlay_subnet_octets` that I accidentally introduced in #5675, restoring idempotence up until the point where the sled has upserted itself. (The comments added should clarify this.) We can and should still fail on an attempt to add a sled where we (a) have an allocation and (b) have an entry in the `sled` table, but no longer fail on attempt to add a sled where we (a) have an allocation but (b) do NOT have an entry in the `sled` table. --- dev-tools/omdb/tests/usage_errors.out | 2 + nexus/db-model/src/schema_versions.rs | 3 +- .../src/sled_underlay_subnet_allocation.rs | 2 +- nexus/db-queries/src/db/datastore/rack.rs | 199 ++++++++++++++++-- nexus/src/app/mod.rs | 5 + nexus/src/app/rack.rs | 4 +- nexus/tests/integration_tests/rack.rs | 56 ++++- nexus/types/src/deployment/planning_input.rs | 13 ++ .../up1.sql | 2 + .../up2.sql | 1 + .../up3.sql | 3 + .../up4.sql | 1 + schema/crdb/dbinit.sql | 25 ++- 13 files changed, 275 insertions(+), 41 deletions(-) create mode 100644 schema/crdb/allocate-subnet-decommissioned-sleds/up1.sql create mode 100644 schema/crdb/allocate-subnet-decommissioned-sleds/up2.sql create mode 100644 schema/crdb/allocate-subnet-decommissioned-sleds/up3.sql create mode 100644 schema/crdb/allocate-subnet-decommissioned-sleds/up4.sql diff --git a/dev-tools/omdb/tests/usage_errors.out b/dev-tools/omdb/tests/usage_errors.out index 3ffe579a23..15fc9d322e 100644 --- a/dev-tools/omdb/tests/usage_errors.out +++ b/dev-tools/omdb/tests/usage_errors.out @@ -279,6 +279,8 @@ Options: Possible values: - commissioned: All sleds that are currently part of the control plane cluster + - decommissioned: All sleds that were previously part of the control plane cluster + but have been decommissioned - discretionary: Sleds that are eligible for discretionary services - in-service: Sleds that are in service (even if they might not be eligible for discretionary services) diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index 5a263ea536..afdf91074e 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -17,7 +17,7 @@ use std::collections::BTreeMap; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(61, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(62, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -29,6 +29,7 @@ static KNOWN_VERSIONS: Lazy> = Lazy::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(62, "allocate-subnet-decommissioned-sleds"), KnownVersion::new(61, "blueprint-add-sled-state"), KnownVersion::new(60, "add-lookup-vmm-by-sled-id-index"), KnownVersion::new(59, "enforce-first-as-default"), diff --git a/nexus/db-model/src/sled_underlay_subnet_allocation.rs b/nexus/db-model/src/sled_underlay_subnet_allocation.rs index 8dae9da4b8..3cb9579f1b 100644 --- a/nexus/db-model/src/sled_underlay_subnet_allocation.rs +++ b/nexus/db-model/src/sled_underlay_subnet_allocation.rs @@ -8,7 +8,7 @@ use omicron_uuid_kinds::SledKind; use uuid::Uuid; /// Underlay allocation for a sled added to an initialized rack -#[derive(Queryable, Insertable, Debug, Clone, Selectable)] +#[derive(Queryable, Insertable, Debug, Clone, PartialEq, Eq, Selectable)] #[diesel(table_name = sled_underlay_subnet_allocation)] pub struct SledUnderlaySubnetAllocation { pub rack_id: Uuid, diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 8e8913f7bd..04901c7785 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -21,6 +21,7 @@ use crate::db::fixed_data::vpc_subnet::DNS_VPC_SUBNET; use crate::db::fixed_data::vpc_subnet::NEXUS_VPC_SUBNET; use crate::db::fixed_data::vpc_subnet::NTP_VPC_SUBNET; use crate::db::identity::Asset; +use crate::db::lookup::LookupPath; use crate::db::model::Dataset; use crate::db::model::IncompleteExternalIp; use crate::db::model::PhysicalDisk; @@ -41,6 +42,7 @@ use nexus_db_model::InitialDnsGroup; use nexus_db_model::PasswordHashString; use nexus_db_model::SiloUser; use nexus_db_model::SiloUserPasswordHash; +use nexus_db_model::SledState; use nexus_db_model::SledUnderlaySubnetAllocation; use nexus_types::deployment::blueprint_zone_type; use nexus_types::deployment::Blueprint; @@ -183,8 +185,8 @@ impl From for Error { pub enum SledUnderlayAllocationResult { /// A new allocation was created New(SledUnderlaySubnetAllocation), - /// A prior allocation was found - Existing(SledUnderlaySubnetAllocation), + /// A prior allocation associated with a commissioned sled was found + CommissionedSled(SledUnderlaySubnetAllocation), } impl DataStore { @@ -327,8 +329,44 @@ impl DataStore { }; for allocation in allocations { if allocation.hw_baseboard_id == new_allocation.hw_baseboard_id { - // We already have an allocation for this sled. - return Ok(SledUnderlayAllocationResult::Existing(allocation)); + // We already have an allocation for this sled, but we need to + // check whether this allocation matches a sled that has been + // decommissioned. (The same physical sled, tracked by + // `hw_baseboard_id`, can be logically removed from the control + // plane via decommissioning, then added back again later, which + // requires allocating a new subnet.) + match LookupPath::new(opctx, self) + .sled_id(allocation.sled_id.into_untyped_uuid()) + .optional_fetch_for(authz::Action::Read) + .await? + .map(|(_, sled)| sled.state()) + { + Some(SledState::Active) => { + // This allocation is for an active sled; return the + // existing allocation. + return Ok( + SledUnderlayAllocationResult::CommissionedSled( + allocation, + ), + ); + } + Some(SledState::Decommissioned) => { + // This allocation was for a now-decommissioned sled; + // ignore it and keep searching. + } + None => { + // This allocation is still "new" in the sense that it + // is assigned to a sled that has not yet upserted + // itself to join the control plane. We must return + // `::New(_)` here to ensure idempotence of allocation + // (e.g., if we allocate a sled, but its sled-agent + // crashes before it can upsert itself, we need to be + // able to get the same allocation back again). + return Ok(SledUnderlayAllocationResult::New( + allocation, + )); + } + } } if allocation.subnet_octet == new_allocation.subnet_octet { bail_unless!( @@ -962,7 +1000,6 @@ mod test { }; use crate::db::datastore::test_utils::datastore_test; use crate::db::datastore::Discoverability; - use crate::db::lookup::LookupPath; use crate::db::model::ExternalIp; use crate::db::model::IpKind; use crate::db::model::IpPoolRange; @@ -1190,8 +1227,7 @@ mod test { logctx.cleanup_successful(); } - async fn create_test_sled(db: &DataStore) -> Sled { - let sled_id = Uuid::new_v4(); + async fn create_test_sled(db: &DataStore, sled_id: Uuid) -> Sled { let addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0); let sled_update = SledUpdate::new( sled_id, @@ -1270,9 +1306,9 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - let sled1 = create_test_sled(&datastore).await; - let sled2 = create_test_sled(&datastore).await; - let sled3 = create_test_sled(&datastore).await; + let sled1 = create_test_sled(&datastore, Uuid::new_v4()).await; + let sled2 = create_test_sled(&datastore, Uuid::new_v4()).await; + let sled3 = create_test_sled(&datastore, Uuid::new_v4()).await; let service_ip_pool_ranges = vec![IpRange::try_from(( Ipv4Addr::new(1, 2, 3, 4), @@ -1621,7 +1657,7 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - let sled = create_test_sled(&datastore).await; + let sled = create_test_sled(&datastore, Uuid::new_v4()).await; // Ask for two Nexus services, with different external IPs. let nexus_ip_start = Ipv4Addr::new(1, 2, 3, 4); @@ -1904,7 +1940,7 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - let sled = create_test_sled(&datastore).await; + let sled = create_test_sled(&datastore, Uuid::new_v4()).await; let mut system = SystemDescription::new(); system @@ -2000,7 +2036,7 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - let sled = create_test_sled(&datastore).await; + let sled = create_test_sled(&datastore, Uuid::new_v4()).await; let ip = IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)); let service_ip_pool_ranges = vec![IpRange::from(ip)]; @@ -2256,7 +2292,9 @@ mod test { SledUnderlayAllocationResult::New(allocation) => { allocation.subnet_octet } - SledUnderlayAllocationResult::Existing(allocation) => { + SledUnderlayAllocationResult::CommissionedSled( + allocation, + ) => { panic!("unexpected allocation {allocation:?}"); } }, @@ -2276,9 +2314,9 @@ mod test { ); // If we attempt to insert the same baseboards again, we should get the - // existing allocations back. - for (hw_baseboard_id, expected_octet) in - hw_baseboard_ids.into_iter().zip(expected) + // same new allocations back. + for (&hw_baseboard_id, prev_allocation) in + hw_baseboard_ids.iter().zip(&allocations) { match datastore .allocate_sled_underlay_subnet_octets( @@ -2288,17 +2326,134 @@ mod test { ) .await .unwrap() + { + SledUnderlayAllocationResult::New(allocation) => { + assert_eq!(allocation, *prev_allocation); + } + SledUnderlayAllocationResult::CommissionedSled(allocation) => { + panic!("unexpected allocation {allocation:?}"); + } + } + } + + // Pick one of the hw_baseboard_ids and insert a sled record. We should + // get back the `CommissionedSled` allocation result if we retry + // allocation of that baseboard. + create_test_sled( + &datastore, + allocations[0].sled_id.into_untyped_uuid(), + ) + .await; + match datastore + .allocate_sled_underlay_subnet_octets( + &opctx, + rack_id, + hw_baseboard_ids[0], + ) + .await + .unwrap() + { + SledUnderlayAllocationResult::New(allocation) => { + panic!("unexpected allocation {allocation:?}"); + } + SledUnderlayAllocationResult::CommissionedSled(allocation) => { + assert_eq!(allocation, allocations[0]); + } + } + + // If we attempt to insert the same baseboard again and that baseboard + // is only assigned to decommissioned sleds, we should get a new + // allocation. We'll pick one hw baseboard ID, create a `Sled` for it, + // decommission that sled, and confirm we get a new octet, five times in + // a loop (to emulate the same sled being added and decommissioned + // multiple times). + let mut next_expected_octet = *expected.last().unwrap() + 1; + let mut prior_allocation = allocations.last().unwrap().clone(); + let target_hw_baseboard_id = *hw_baseboard_ids.last().unwrap(); + for _ in 0..5 { + // Commission the sled. + let sled = create_test_sled( + &datastore, + prior_allocation.sled_id.into_untyped_uuid(), + ) + .await; + + // If we attempt this same baseboard again, we get the existing + // allocation back. + match datastore + .allocate_sled_underlay_subnet_octets( + &opctx, + rack_id, + target_hw_baseboard_id, + ) + .await + .unwrap() { SledUnderlayAllocationResult::New(allocation) => { panic!("unexpected allocation {allocation:?}"); } - SledUnderlayAllocationResult::Existing(allocation) => { - assert_eq!( - allocation.subnet_octet, expected_octet, - "unexpected octet for {allocation:?}" - ); + SledUnderlayAllocationResult::CommissionedSled(existing) => { + assert_eq!(existing, prior_allocation); } } + + // Decommission the sled. + let (authz_sled,) = LookupPath::new(&opctx, &datastore) + .sled_id(sled.id()) + .lookup_for(authz::Action::Modify) + .await + .expect("found target sled ID"); + datastore + .sled_set_policy_to_expunged(&opctx, &authz_sled) + .await + .expect("expunged sled"); + datastore + .sled_set_state_to_decommissioned(&opctx, &authz_sled) + .await + .expect("decommissioned sled"); + + // Attempt a new allocation for the same hw_baseboard_id. + let allocation = match datastore + .allocate_sled_underlay_subnet_octets( + &opctx, + rack_id, + target_hw_baseboard_id, + ) + .await + .unwrap() + { + SledUnderlayAllocationResult::New(allocation) => allocation, + SledUnderlayAllocationResult::CommissionedSled(allocation) => { + panic!("unexpected existing allocation {allocation:?}"); + } + }; + + // We should get the next octet with a new sled ID. + assert_eq!(allocation.subnet_octet, next_expected_octet); + assert_ne!(allocation.sled_id.into_untyped_uuid(), sled.id()); + prior_allocation = allocation; + + // Ensure if we attempt this same baseboard again, we get the + // same allocation back (the sled hasn't been commissioned yet). + match datastore + .allocate_sled_underlay_subnet_octets( + &opctx, + rack_id, + target_hw_baseboard_id, + ) + .await + .unwrap() + { + SledUnderlayAllocationResult::New(allocation) => { + assert_eq!(prior_allocation, allocation); + } + SledUnderlayAllocationResult::CommissionedSled(existing) => { + panic!("unexpected allocation {existing:?}"); + } + } + + // Bump our expectations for the next iteration. + next_expected_octet += 1; } db.cleanup().await.unwrap(); diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index a7f12d30cd..4b77788c96 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -534,6 +534,11 @@ impl Nexus { &self.id } + /// Return the rack ID for this Nexus instance. + pub fn rack_id(&self) -> Uuid { + self.rack_id + } + /// Return the tunable configuration parameters, e.g. for use in tests. pub fn tunables(&self) -> &Tunables { &self.tunables diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 25c0824ce6..c766446f38 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -790,11 +790,11 @@ impl super::Nexus { .await? { SledUnderlayAllocationResult::New(allocation) => allocation, - SledUnderlayAllocationResult::Existing(allocation) => { + SledUnderlayAllocationResult::CommissionedSled(allocation) => { return Err(Error::ObjectAlreadyExists { type_name: ResourceType::Sled, object_name: format!( - "{} / {} ({})", + "{} ({}): {}", sled.serial, sled.part, allocation.sled_id ), }); diff --git a/nexus/tests/integration_tests/rack.rs b/nexus/tests/integration_tests/rack.rs index a7ebf0f8b6..3e10ebcca4 100644 --- a/nexus/tests/integration_tests/rack.rs +++ b/nexus/tests/integration_tests/rack.rs @@ -5,6 +5,10 @@ use dropshot::ResultsPage; use http::Method; use http::StatusCode; +use nexus_client::types::SledId; +use nexus_db_model::SledBaseboard; +use nexus_db_model::SledSystemHardware; +use nexus_db_model::SledUpdate; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; @@ -17,7 +21,7 @@ use nexus_types::internal_api::params::SledAgentInfo; use nexus_types::internal_api::params::SledRole; use omicron_common::api::external::ByteCount; use omicron_common::api::external::Generation; -use omicron_nexus::TestInterfaces; +use omicron_uuid_kinds::GenericUuid; use uuid::Uuid; type ControlPlaneTestContext = @@ -170,7 +174,7 @@ async fn test_sled_add(cptestctx: &ControlPlaneTestContext) { // Add one of these sleds. let add_url = "/v1/system/hardware/sleds/"; let baseboard = uninitialized_sleds.pop().unwrap().baseboard; - NexusRequest::objects_post( + let sled_id = NexusRequest::objects_post( external_client, add_url, ¶ms::UninitializedSledId { @@ -179,11 +183,53 @@ async fn test_sled_add(cptestctx: &ControlPlaneTestContext) { }, ) .authn_as(AuthnMode::PrivilegedUser) - .execute() + .execute_and_parse_unwrap::() .await - .expect("failed to add sled"); + .id; - // Attempting to add the same sled again should fail. + // Attempting to add the same sled again should succeed with the same sled + // ID: this operation should be idempotent up until the point at which the + // sled is inserted in the db. + let repeat_sled_id = NexusRequest::objects_post( + external_client, + add_url, + ¶ms::UninitializedSledId { + serial: baseboard.serial.clone(), + part: baseboard.part.clone(), + }, + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await + .id; + assert_eq!(sled_id, repeat_sled_id); + + // Now upsert the sled. + let nexus = &cptestctx.server.apictx().nexus; + nexus + .datastore() + .sled_upsert(SledUpdate::new( + sled_id.into_untyped_uuid(), + "[::1]:0".parse().unwrap(), + SledBaseboard { + serial_number: baseboard.serial.clone(), + part_number: baseboard.part.clone(), + revision: 0, + }, + SledSystemHardware { + is_scrimlet: false, + usable_hardware_threads: 8, + usable_physical_ram: (1 << 30).try_into().unwrap(), + reservoir_size: (1 << 20).try_into().unwrap(), + }, + nexus.rack_id(), + Generation::new().into(), + )) + .await + .expect("inserted sled"); + + // The sled has been commissioned as part of the rack, so adding it should + // fail. let error: dropshot::HttpErrorResponseBody = NexusRequest::expect_failure_with_body( external_client, diff --git a/nexus/types/src/deployment/planning_input.rs b/nexus/types/src/deployment/planning_input.rs index 1975cfaae0..89d8bae660 100644 --- a/nexus/types/src/deployment/planning_input.rs +++ b/nexus/types/src/deployment/planning_input.rs @@ -250,6 +250,14 @@ pub enum SledFilter { /// fetch "all sleds regardless of current policy or state". Commissioned, + /// All sleds that were previously part of the control plane cluster but + /// have been decommissioned. + /// + /// Any sleds matching this filter are expected to no longer be present. + /// This filter is only useful for historical or debugging purposes, such as + /// listing decommissioned sleds via `omdb`. + Decommissioned, + /// Sleds that are eligible for discretionary services. Discretionary, @@ -312,6 +320,7 @@ impl SledPolicy { provision_policy: SledProvisionPolicy::Provisionable, } => match filter { SledFilter::Commissioned => true, + SledFilter::Decommissioned => false, SledFilter::Discretionary => true, SledFilter::InService => true, SledFilter::QueryDuringInventory => true, @@ -322,6 +331,7 @@ impl SledPolicy { provision_policy: SledProvisionPolicy::NonProvisionable, } => match filter { SledFilter::Commissioned => true, + SledFilter::Decommissioned => false, SledFilter::Discretionary => false, SledFilter::InService => true, SledFilter::QueryDuringInventory => true, @@ -330,6 +340,7 @@ impl SledPolicy { }, SledPolicy::Expunged => match filter { SledFilter::Commissioned => true, + SledFilter::Decommissioned => true, SledFilter::Discretionary => false, SledFilter::InService => false, SledFilter::QueryDuringInventory => false, @@ -360,6 +371,7 @@ impl SledState { match self { SledState::Active => match filter { SledFilter::Commissioned => true, + SledFilter::Decommissioned => false, SledFilter::Discretionary => true, SledFilter::InService => true, SledFilter::QueryDuringInventory => true, @@ -368,6 +380,7 @@ impl SledState { }, SledState::Decommissioned => match filter { SledFilter::Commissioned => false, + SledFilter::Decommissioned => true, SledFilter::Discretionary => false, SledFilter::InService => false, SledFilter::QueryDuringInventory => false, diff --git a/schema/crdb/allocate-subnet-decommissioned-sleds/up1.sql b/schema/crdb/allocate-subnet-decommissioned-sleds/up1.sql new file mode 100644 index 0000000000..adffd4a2cf --- /dev/null +++ b/schema/crdb/allocate-subnet-decommissioned-sleds/up1.sql @@ -0,0 +1,2 @@ +ALTER TABLE omicron.public.sled_underlay_subnet_allocation + ALTER PRIMARY KEY USING COLUMNS (hw_baseboard_id, sled_id); diff --git a/schema/crdb/allocate-subnet-decommissioned-sleds/up2.sql b/schema/crdb/allocate-subnet-decommissioned-sleds/up2.sql new file mode 100644 index 0000000000..ba67d093f4 --- /dev/null +++ b/schema/crdb/allocate-subnet-decommissioned-sleds/up2.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS sled_underlay_subnet_allocation_hw_baseboard_id_key CASCADE; diff --git a/schema/crdb/allocate-subnet-decommissioned-sleds/up3.sql b/schema/crdb/allocate-subnet-decommissioned-sleds/up3.sql new file mode 100644 index 0000000000..f96b3312c9 --- /dev/null +++ b/schema/crdb/allocate-subnet-decommissioned-sleds/up3.sql @@ -0,0 +1,3 @@ +CREATE UNIQUE INDEX IF NOT EXISTS commissioned_sled_uniqueness + ON omicron.public.sled (serial_number, part_number) + WHERE sled_state != 'decommissioned'; diff --git a/schema/crdb/allocate-subnet-decommissioned-sleds/up4.sql b/schema/crdb/allocate-subnet-decommissioned-sleds/up4.sql new file mode 100644 index 0000000000..9489a61c2a --- /dev/null +++ b/schema/crdb/allocate-subnet-decommissioned-sleds/up4.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS serial_part_revision_unique CASCADE; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index e66f28d74f..fa0c74aac2 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -148,15 +148,18 @@ CREATE TABLE IF NOT EXISTS omicron.public.sled ( sled_state omicron.public.sled_state NOT NULL, /* Generation number owned and incremented by the sled-agent */ - sled_agent_gen INT8 NOT NULL DEFAULT 1, - - -- This constraint should be upheld, even for deleted disks - -- in the fleet. - CONSTRAINT serial_part_revision_unique UNIQUE ( - serial_number, part_number, revision - ) + sled_agent_gen INT8 NOT NULL DEFAULT 1 ); +-- Add an index that ensures a given physical sled (identified by serial and +-- part number) can only be a commissioned member of the control plane once. +-- +-- TODO Should `sled` reference `hw_baseboard_id` instead of having its own +-- serial/part columns? +CREATE UNIQUE INDEX IF NOT EXISTS commissioned_sled_uniqueness + ON omicron.public.sled (serial_number, part_number) + WHERE sled_state != 'decommissioned'; + /* Add an index which lets us look up sleds on a rack */ CREATE UNIQUE INDEX IF NOT EXISTS lookup_sled_by_rack ON omicron.public.sled ( rack_id, @@ -222,7 +225,7 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_resource_by_sled ON omicron.public.sled CREATE TABLE IF NOT EXISTS omicron.public.sled_underlay_subnet_allocation ( -- The physical identity of the sled -- (foreign key into `hw_baseboard_id` table) - hw_baseboard_id UUID PRIMARY KEY, + hw_baseboard_id UUID, -- The rack to which a sled is being added -- (foreign key into `rack` table) @@ -240,7 +243,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.sled_underlay_subnet_allocation ( -- The octet that extends a /56 rack subnet to a /64 sled subnet -- -- Always between 33 and 255 inclusive - subnet_octet INT2 NOT NULL UNIQUE CHECK (subnet_octet BETWEEN 33 AND 255) + subnet_octet INT2 NOT NULL UNIQUE CHECK (subnet_octet BETWEEN 33 AND 255), + + PRIMARY KEY (hw_baseboard_id, sled_id) ); -- Add an index which allows pagination by {rack_id, sled_id} pairs. @@ -3856,7 +3861,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '61.0.0', NULL) + (TRUE, NOW(), NOW(), '62.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; From 372d980791cdd67071c82692557fd89b6e2951ec Mon Sep 17 00:00:00 2001 From: iliana etaoin Date: Tue, 14 May 2024 14:41:43 -0700 Subject: [PATCH 6/9] fix #5743 vs. #5751 semantic merge conflict (#5765) --- nexus/reconfigurator/execution/src/sled_state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/reconfigurator/execution/src/sled_state.rs b/nexus/reconfigurator/execution/src/sled_state.rs index aaa5b6bc26..fafc1c2e44 100644 --- a/nexus/reconfigurator/execution/src/sled_state.rs +++ b/nexus/reconfigurator/execution/src/sled_state.rs @@ -90,7 +90,7 @@ mod tests { async fn test_decommission_is_idempotent( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), From 949c6abc31b004dba6c6276831a2f2bd8061dab2 Mon Sep 17 00:00:00 2001 From: Rain Date: Tue, 14 May 2024 15:26:24 -0700 Subject: [PATCH 7/9] [nexus-types] more validation for PlanningInput (#5644) While reconciling the PlanningInput with the blueprint, I was a bit concerned about getting nonsensical inputs (e.g. multiple zones get the same IPs) and the system going haywire. In particular, I was concerned about the Serialize and Deserialize impls which were seemingly added for reconfigurator-cli. However, they were liabilities that didn't check for internal invariants. To address this, this PR introduces the notion of a `TriMap`. A `TriMap` is a 1:1:1 map which can be looked up by any one of three keys. Instead of storing just a regular map, the `PlanningInput` now stores a couple of `TriMap` instances via an intermediate `OmicronZoneNetworkResources` struct. A `TriMap` is implemented as a vector with three indexes. It's a pretty straightforward implementation, and I've also added property-based tests to ensure that a `TriMap` is always valid (including always deserializing to a valid structure). At the moment, a `TriMap` does not allow removing entries. If necessary, removals can be implemented by just marking the entry as dead and removing the keys from the indexes. This is fine for our use case, since `TriMap`s aren't long-lived. I've also factored out the omicron zone IP and NIC code into an `OmicronZoneNetworkResources` struct. In the future, we'll make the `BlueprintBuilder` use this struct, so that blueprints are validated whenever they're being mutated. We can also add this to the general code that validates blueprints. --- Cargo.lock | 3 + dev-tools/reconfigurator-cli/src/main.rs | 5 +- nexus/db-model/src/network_interface.rs | 4 +- .../db-queries/src/db/datastore/deployment.rs | 5 +- nexus/reconfigurator/planning/src/example.rs | 5 +- nexus/types/Cargo.toml | 5 + .../deployment/tri_map.txt | 7 + nexus/types/src/deployment.rs | 17 +- .../types/src/deployment/network_resources.rs | 307 +++++++++++ nexus/types/src/deployment/planning_input.rs | 345 +++++------- nexus/types/src/deployment/tri_map.rs | 511 ++++++++++++++++++ nexus/types/src/deployment/zone_type.rs | 2 +- uuid-kinds/src/lib.rs | 1 + 13 files changed, 982 insertions(+), 235 deletions(-) create mode 100644 nexus/types/proptest-regressions/deployment/tri_map.txt create mode 100644 nexus/types/src/deployment/network_resources.rs create mode 100644 nexus/types/src/deployment/tri_map.rs diff --git a/Cargo.lock b/Cargo.lock index f0ff8138e8..e1e445cc3c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4899,6 +4899,7 @@ dependencies = [ "base64 0.22.1", "chrono", "clap", + "derive-where", "dns-service-client", "futures", "gateway-client", @@ -4911,6 +4912,7 @@ dependencies = [ "omicron-workspace-hack", "openssl", "parse-display", + "proptest", "schemars", "serde", "serde_json", @@ -4921,6 +4923,7 @@ dependencies = [ "steno", "strum", "tabled", + "test-strategy", "thiserror", "uuid", ] diff --git a/dev-tools/reconfigurator-cli/src/main.rs b/dev-tools/reconfigurator-cli/src/main.rs index 72add6ce8c..f088c9d97d 100644 --- a/dev-tools/reconfigurator-cli/src/main.rs +++ b/dev-tools/reconfigurator-cli/src/main.rs @@ -32,7 +32,9 @@ use nexus_types::inventory::SledRole; use omicron_common::api::external::Generation; use omicron_common::api::external::Name; use omicron_uuid_kinds::CollectionUuid; +use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::SledUuid; +use omicron_uuid_kinds::VnicUuid; use reedline::{Reedline, Signal}; use std::collections::BTreeMap; use std::io::BufRead; @@ -146,7 +148,8 @@ impl ReconfiguratorSim { .add_omicron_zone_external_ip(zone.id, external_ip) .context("adding omicron zone external IP")?; let nic = OmicronZoneNic { - id: nic.id, + // TODO-cleanup use `TypedUuid` everywhere + id: VnicUuid::from_untyped_uuid(nic.id), mac: nic.mac, ip: nic.ip, slot: nic.slot, diff --git a/nexus/db-model/src/network_interface.rs b/nexus/db-model/src/network_interface.rs index ff774699d6..8520afdb76 100644 --- a/nexus/db-model/src/network_interface.rs +++ b/nexus/db-model/src/network_interface.rs @@ -17,7 +17,9 @@ use ipnetwork::NetworkSize; use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::api::{external, internal}; +use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::VnicUuid; use sled_agent_client::ZoneKind; use uuid::Uuid; @@ -207,7 +209,7 @@ impl TryFrom<&'_ ServiceNetworkInterface> }); } Ok(Self { - id: nic.id(), + id: VnicUuid::from_untyped_uuid(nic.id()), mac: *nic.mac, ip: nic.ip.ip(), slot: *nic.slot, diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index 7359f1725b..09bc2eef0f 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -1346,6 +1346,7 @@ mod tests { use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; + use once_cell::sync::Lazy; use pretty_assertions::assert_eq; use rand::thread_rng; use rand::Rng; @@ -1353,8 +1354,8 @@ mod tests { use std::mem; use std::net::Ipv6Addr; - static EMPTY_PLANNING_INPUT: PlanningInput = - PlanningInputBuilder::empty_input(); + static EMPTY_PLANNING_INPUT: Lazy = + Lazy::new(|| PlanningInputBuilder::empty_input()); // This is a not-super-future-maintainer-friendly helper to check that all // the subtables related to blueprints have been pruned of a specific diff --git a/nexus/reconfigurator/planning/src/example.rs b/nexus/reconfigurator/planning/src/example.rs index 24dbbd15ac..f8748be758 100644 --- a/nexus/reconfigurator/planning/src/example.rs +++ b/nexus/reconfigurator/planning/src/example.rs @@ -13,7 +13,9 @@ use nexus_types::deployment::OmicronZoneNic; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledFilter; use nexus_types::inventory::Collection; +use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::SledKind; +use omicron_uuid_kinds::VnicUuid; use typed_rng::TypedUuidRng; pub struct ExampleSystem { @@ -105,7 +107,8 @@ impl ExampleSystem { .add_omicron_zone_nic( service_id, OmicronZoneNic { - id: nic.id, + // TODO-cleanup use `TypedUuid` everywhere + id: VnicUuid::from_untyped_uuid(nic.id), mac: nic.mac, ip: nic.ip, slot: nic.slot, diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index 372cee858a..8f76633416 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -12,6 +12,7 @@ anyhow.workspace = true chrono.workspace = true clap.workspace = true base64.workspace = true +derive-where.workspace = true futures.workspace = true humantime.workspace = true ipnetwork.workspace = true @@ -38,3 +39,7 @@ omicron-common.workspace = true omicron-passwords.workspace = true omicron-workspace-hack.workspace = true sled-agent-client.workspace = true + +[dev-dependencies] +proptest.workspace = true +test-strategy.workspace = true diff --git a/nexus/types/proptest-regressions/deployment/tri_map.txt b/nexus/types/proptest-regressions/deployment/tri_map.txt new file mode 100644 index 0000000000..c3f4260f52 --- /dev/null +++ b/nexus/types/proptest-regressions/deployment/tri_map.txt @@ -0,0 +1,7 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc bafcbc817cff65814a6f3233f1ef3d6c36f75c37ad35175d17d1c8484a734034 # shrinks to input = _ProptestOpsArgs { initial: {(0, '$', ""): "", (0, ' ', ""): ""}, ops: [] } diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index b7b5bf6aac..a577c4978c 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -42,15 +42,22 @@ use strum::IntoEnumIterator; use thiserror::Error; use uuid::Uuid; +mod network_resources; mod planning_input; +mod tri_map; mod zone_type; +pub use network_resources::AddNetworkResourceError; +pub use network_resources::OmicronZoneExternalFloatingAddr; +pub use network_resources::OmicronZoneExternalFloatingIp; +pub use network_resources::OmicronZoneExternalIp; +pub use network_resources::OmicronZoneExternalIpEntry; +pub use network_resources::OmicronZoneExternalIpKey; +pub use network_resources::OmicronZoneExternalSnatIp; +pub use network_resources::OmicronZoneNetworkResources; +pub use network_resources::OmicronZoneNic; +pub use network_resources::OmicronZoneNicEntry; pub use planning_input::DiskFilter; -pub use planning_input::OmicronZoneExternalFloatingAddr; -pub use planning_input::OmicronZoneExternalFloatingIp; -pub use planning_input::OmicronZoneExternalIp; -pub use planning_input::OmicronZoneExternalSnatIp; -pub use planning_input::OmicronZoneNic; pub use planning_input::PlanningInput; pub use planning_input::PlanningInputBuildError; pub use planning_input::PlanningInputBuilder; diff --git a/nexus/types/src/deployment/network_resources.rs b/nexus/types/src/deployment/network_resources.rs new file mode 100644 index 0000000000..15f495d87a --- /dev/null +++ b/nexus/types/src/deployment/network_resources.rs @@ -0,0 +1,307 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::tri_map::TriMap; +use super::tri_map::TriMapEntry; +use anyhow::anyhow; +use omicron_common::api::external::MacAddr; +use omicron_common::api::internal::shared::SourceNatConfig; +use omicron_uuid_kinds::ExternalIpUuid; +use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::VnicUuid; +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use std::net::IpAddr; +use std::net::SocketAddr; +use thiserror::Error; + +/// Tracker and validator for network resources allocated to Omicron-managed +/// zones. +/// +/// ## Implementation notes +/// +/// `OmicronZoneNetworkResources` consists of two 1:1:1 "trijective" maps: +/// +/// 1. Providing a unique map for Omicron zone IDs, external IP IDs, and +/// external IPs. +/// 2. Providing a unique map for Omicron zone IDs, vNIC IDs, and vNICs. +/// +/// One question that arises: should there instead be a single 1:1:1:1:1 map? +/// In other words, is there a 1:1 mapping between external IPs and vNICs as +/// well? The answer is "generally yes", but: +/// +/// - They're not stored in the database that way, and it's possible that +/// there's some divergence. +/// - We currently don't plan to get any utility out of asserting the 1:1:1:1:1 +/// map. The main planned use of this is for expunged zone garbage collection +/// -- while that benefits from trijective maps tremendously, there's no +/// additional value in asserting a unique mapping between external IPs and +/// vNICs. +/// +/// So we use two separate maps for now. But a single map is always a +/// possibility in the future, if required. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct OmicronZoneNetworkResources { + /// external IPs allocated to Omicron zones + omicron_zone_external_ips: TriMap, + + /// vNICs allocated to Omicron zones + omicron_zone_nics: TriMap, +} + +impl OmicronZoneNetworkResources { + pub fn new() -> Self { + Self { + omicron_zone_external_ips: TriMap::new(), + omicron_zone_nics: TriMap::new(), + } + } + + pub fn add_external_ip( + &mut self, + zone_id: OmicronZoneUuid, + ip: OmicronZoneExternalIp, + ) -> Result<(), AddNetworkResourceError> { + let entry = OmicronZoneExternalIpEntry { zone_id, ip }; + self.omicron_zone_external_ips.insert_no_dups(entry).map_err(|err| { + AddNetworkResourceError::DuplicateOmicronZoneExternalIp { + zone_id, + ip, + err: anyhow!(err), + } + }) + } + + pub fn add_nic( + &mut self, + zone_id: OmicronZoneUuid, + nic: OmicronZoneNic, + ) -> Result<(), AddNetworkResourceError> { + let entry = OmicronZoneNicEntry { zone_id, nic: nic.clone() }; + self.omicron_zone_nics.insert_no_dups(entry).map_err(|err| { + AddNetworkResourceError::DuplicateOmicronZoneNic { + zone_id, + nic, + err: anyhow!(err), + } + }) + } + + pub fn get_external_ip_by_zone_id( + &self, + zone_id: OmicronZoneUuid, + ) -> Option<&OmicronZoneExternalIpEntry> { + self.omicron_zone_external_ips.get1(&zone_id) + } + + pub fn get_external_ip_by_external_ip_id( + &self, + ip: ExternalIpUuid, + ) -> Option<&OmicronZoneExternalIpEntry> { + self.omicron_zone_external_ips.get2(&ip) + } + + pub fn get_external_ip_by_ip( + &self, + ip: OmicronZoneExternalIpKey, + ) -> Option<&OmicronZoneExternalIpEntry> { + self.omicron_zone_external_ips.get3(&ip) + } + + pub fn get_nic_by_zone_id( + &self, + zone_id: OmicronZoneUuid, + ) -> Option<&OmicronZoneNicEntry> { + self.omicron_zone_nics.get1(&zone_id) + } + + pub fn get_nic_by_vnic_id( + &self, + vnic_id: VnicUuid, + ) -> Option<&OmicronZoneNicEntry> { + self.omicron_zone_nics.get2(&vnic_id) + } + + pub fn get_nic_by_mac(&self, mac: MacAddr) -> Option<&OmicronZoneNicEntry> { + self.omicron_zone_nics.get3(&mac) + } +} + +/// External IP variants possible for Omicron-managed zones. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum OmicronZoneExternalIp { + Floating(OmicronZoneExternalFloatingIp), + Snat(OmicronZoneExternalSnatIp), + // We may eventually want `Ephemeral(_)` too (arguably Nexus could be + // ephemeral?), but for now we only have Floating and Snat uses. +} + +impl OmicronZoneExternalIp { + pub fn id(&self) -> ExternalIpUuid { + match self { + OmicronZoneExternalIp::Floating(ext) => ext.id, + OmicronZoneExternalIp::Snat(ext) => ext.id, + } + } + + pub fn ip(&self) -> IpAddr { + match self { + OmicronZoneExternalIp::Floating(ext) => ext.ip, + OmicronZoneExternalIp::Snat(ext) => ext.snat_cfg.ip, + } + } + + pub fn ip_key(&self) -> OmicronZoneExternalIpKey { + match self { + OmicronZoneExternalIp::Floating(ip) => { + OmicronZoneExternalIpKey::Floating(ip.ip) + } + OmicronZoneExternalIp::Snat(snat) => { + OmicronZoneExternalIpKey::Snat(snat.snat_cfg) + } + } + } +} + +/// An IP-based key suitable for uniquely identifying an +/// [`OmicronZoneExternalIp`]. +/// +/// We can't use the IP itself to uniquely identify an external IP because SNAT +/// IPs can have overlapping addresses. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum OmicronZoneExternalIpKey { + Floating(IpAddr), + Snat(SourceNatConfig), +} + +/// Floating external IP allocated to an Omicron-managed zone. +/// +/// This is a slimmer `nexus_db_model::ExternalIp` that only stores the fields +/// necessary for blueprint planning, and requires that the zone have a single +/// IP. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, +)] +pub struct OmicronZoneExternalFloatingIp { + pub id: ExternalIpUuid, + pub ip: IpAddr, +} + +/// Floating external address with port allocated to an Omicron-managed zone. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, +)] +pub struct OmicronZoneExternalFloatingAddr { + pub id: ExternalIpUuid, + pub addr: SocketAddr, +} + +impl OmicronZoneExternalFloatingAddr { + pub fn into_ip(self) -> OmicronZoneExternalFloatingIp { + OmicronZoneExternalFloatingIp { id: self.id, ip: self.addr.ip() } + } +} + +/// SNAT (outbound) external IP allocated to an Omicron-managed zone. +/// +/// This is a slimmer `nexus_db_model::ExternalIp` that only stores the fields +/// necessary for blueprint planning, and requires that the zone have a single +/// IP. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, +)] +pub struct OmicronZoneExternalSnatIp { + pub id: ExternalIpUuid, + pub snat_cfg: SourceNatConfig, +} + +/// Network interface allocated to an Omicron-managed zone. +/// +/// This is a slimmer `nexus_db_model::ServiceNetworkInterface` that only stores +/// the fields necessary for blueprint planning. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct OmicronZoneNic { + pub id: VnicUuid, + pub mac: MacAddr, + pub ip: IpAddr, + pub slot: u8, + pub primary: bool, +} + +/// A pair of an Omicron zone ID and an external IP. +/// +/// Part of [`OmicronZoneNetworkResources`]. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct OmicronZoneExternalIpEntry { + pub zone_id: OmicronZoneUuid, + pub ip: OmicronZoneExternalIp, +} + +/// Specification for the tri-map of Omicron zone external IPs. +impl TriMapEntry for OmicronZoneExternalIpEntry { + type K1 = OmicronZoneUuid; + type K2 = ExternalIpUuid; + + // Note: cannot use IpAddr here, because SNAT IPs can overlap as long as + // their port blocks are disjoint. + type K3 = OmicronZoneExternalIpKey; + + fn key1(&self) -> Self::K1 { + self.zone_id + } + + fn key2(&self) -> Self::K2 { + self.ip.id() + } + + fn key3(&self) -> Self::K3 { + self.ip.ip_key() + } +} + +/// A pair of an Omicron zone ID and a network interface. +/// +/// Part of [`OmicronZoneNetworkResources`]. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct OmicronZoneNicEntry { + zone_id: OmicronZoneUuid, + nic: OmicronZoneNic, +} + +impl TriMapEntry for OmicronZoneNicEntry { + type K1 = OmicronZoneUuid; + type K2 = VnicUuid; + type K3 = MacAddr; + + fn key1(&self) -> Self::K1 { + self.zone_id + } + + fn key2(&self) -> Self::K2 { + self.nic.id + } + + fn key3(&self) -> Self::K3 { + self.nic.mac + } +} + +#[derive(Debug, Error)] +pub enum AddNetworkResourceError { + #[error("associating Omicron zone {zone_id} with {ip:?} failed due to duplicates")] + DuplicateOmicronZoneExternalIp { + zone_id: OmicronZoneUuid, + ip: OmicronZoneExternalIp, + #[source] + err: anyhow::Error, + }, + #[error("associating Omicron zone {zone_id} with {nic:?} failed due to duplicates")] + DuplicateOmicronZoneNic { + zone_id: OmicronZoneUuid, + nic: OmicronZoneNic, + #[source] + err: anyhow::Error, + }, +} diff --git a/nexus/types/src/deployment/planning_input.rs b/nexus/types/src/deployment/planning_input.rs index 89d8bae660..ccb15b858a 100644 --- a/nexus/types/src/deployment/planning_input.rs +++ b/nexus/types/src/deployment/planning_input.rs @@ -5,34 +5,136 @@ //! Types describing inputs the Reconfigurator needs to plan and produce new //! blueprints. +use super::AddNetworkResourceError; +use super::OmicronZoneExternalIp; +use super::OmicronZoneNetworkResources; +use super::OmicronZoneNic; use crate::external_api::views::PhysicalDiskPolicy; use crate::external_api::views::PhysicalDiskState; use crate::external_api::views::SledPolicy; use crate::external_api::views::SledProvisionPolicy; use crate::external_api::views::SledState; use clap::ValueEnum; +use ipnetwork::IpNetwork; use omicron_common::address::IpRange; use omicron_common::address::Ipv6Subnet; use omicron_common::address::SLED_PREFIX; use omicron_common::api::external::Generation; -use omicron_common::api::external::MacAddr; -use omicron_common::api::internal::shared::SourceNatConfig; use omicron_common::api::internal::shared::SourceNatConfigError; use omicron_common::disk::DiskIdentity; -use omicron_uuid_kinds::ExternalIpUuid; use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; -use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; use std::collections::btree_map::Entry; use std::collections::BTreeMap; -use std::net::IpAddr; -use std::net::SocketAddr; use strum::IntoEnumIterator; -use uuid::Uuid; + +/// Policy and database inputs to the Reconfigurator planner +/// +/// The primary inputs to the planner are the parent (either a parent blueprint +/// or an inventory collection) and this structure. This type holds the +/// fleet-wide policy as well as any additional information fetched from CRDB +/// that the planner needs to make decisions. +/// +/// The current policy is pretty limited. It's aimed primarily at supporting +/// the add/remove sled use case. +/// +/// The planning input has some internal invariants that code outside of this +/// module can rely on. They include: +/// +/// - Each Omicron zone has at most one external IP and at most one vNIC. +/// - A given external IP or vNIC is only associated with a single Omicron +/// zone. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PlanningInput { + /// fleet-wide policy + policy: Policy, + + /// current internal DNS version + internal_dns_version: Generation, + + /// current external DNS version + external_dns_version: Generation, + + /// per-sled policy and resources + sleds: BTreeMap, + + /// per-zone network resources + network_resources: OmicronZoneNetworkResources, +} + +impl PlanningInput { + pub fn internal_dns_version(&self) -> Generation { + self.internal_dns_version + } + + pub fn external_dns_version(&self) -> Generation { + self.external_dns_version + } + + pub fn target_nexus_zone_count(&self) -> usize { + self.policy.target_nexus_zone_count + } + + pub fn service_ip_pool_ranges(&self) -> &[IpRange] { + &self.policy.service_ip_pool_ranges + } + + pub fn all_sleds( + &self, + filter: SledFilter, + ) -> impl Iterator + '_ { + self.sleds.iter().filter_map(move |(&sled_id, details)| { + filter + .matches_policy_and_state(details.policy, details.state) + .then_some((sled_id, details)) + }) + } + + pub fn all_sled_ids( + &self, + filter: SledFilter, + ) -> impl Iterator + '_ { + self.all_sleds(filter).map(|(sled_id, _)| sled_id) + } + + pub fn all_sled_resources( + &self, + filter: SledFilter, + ) -> impl Iterator + '_ { + self.all_sleds(filter) + .map(|(sled_id, details)| (sled_id, &details.resources)) + } + + pub fn sled_policy(&self, sled_id: &SledUuid) -> Option { + self.sleds.get(sled_id).map(|details| details.policy) + } + + pub fn sled_resources(&self, sled_id: &SledUuid) -> Option<&SledResources> { + self.sleds.get(sled_id).map(|details| &details.resources) + } + + pub fn network_resources(&self) -> &OmicronZoneNetworkResources { + &self.network_resources + } + + /// Convert this `PlanningInput` back into a [`PlanningInputBuilder`] + /// + /// This is primarily useful for tests that want to mutate an existing + /// [`PlanningInput`]. + pub fn into_builder(self) -> PlanningInputBuilder { + PlanningInputBuilder { + policy: self.policy, + internal_dns_version: self.internal_dns_version, + external_dns_version: self.external_dns_version, + sleds: self.sleds, + network_resources: self.network_resources, + } + } +} /// Describes a single disk already managed by the sled. #[derive(Debug, Clone, Serialize, Deserialize)] @@ -152,85 +254,6 @@ impl SledResources { } } -/// External IP variants possible for Omicron-managed zones. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] -pub enum OmicronZoneExternalIp { - Floating(OmicronZoneExternalFloatingIp), - Snat(OmicronZoneExternalSnatIp), - // We may eventually want `Ephemeral(_)` too (arguably Nexus could be - // ephemeral?), but for now we only have Floating and Snat uses. -} - -impl OmicronZoneExternalIp { - pub fn id(&self) -> ExternalIpUuid { - match self { - OmicronZoneExternalIp::Floating(ext) => ext.id, - OmicronZoneExternalIp::Snat(ext) => ext.id, - } - } - - pub fn ip(&self) -> IpAddr { - match self { - OmicronZoneExternalIp::Floating(ext) => ext.ip, - OmicronZoneExternalIp::Snat(ext) => ext.snat_cfg.ip, - } - } -} - -/// Floating external IP allocated to an Omicron-managed zone. -/// -/// This is a slimmer `nexus_db_model::ExternalIp` that only stores the fields -/// necessary for blueprint planning, and requires that the zone have a single -/// IP. -#[derive( - Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, -)] -pub struct OmicronZoneExternalFloatingIp { - pub id: ExternalIpUuid, - pub ip: IpAddr, -} - -/// Floating external address with port allocated to an Omicron-managed zone. -#[derive( - Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, -)] -pub struct OmicronZoneExternalFloatingAddr { - pub id: ExternalIpUuid, - pub addr: SocketAddr, -} - -impl OmicronZoneExternalFloatingAddr { - pub fn into_ip(self) -> OmicronZoneExternalFloatingIp { - OmicronZoneExternalFloatingIp { id: self.id, ip: self.addr.ip() } - } -} - -/// SNAT (outbound) external IP allocated to an Omicron-managed zone. -/// -/// This is a slimmer `nexus_db_model::ExternalIp` that only stores the fields -/// necessary for blueprint planning, and requires that the zone have a single -/// IP. -#[derive( - Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, -)] -pub struct OmicronZoneExternalSnatIp { - pub id: ExternalIpUuid, - pub snat_cfg: SourceNatConfig, -} - -/// Network interface allocated to an Omicron-managed zone. -/// -/// This is a slimmer `nexus_db_model::ServiceNetworkInterface` that only stores -/// the fields necessary for blueprint planning. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct OmicronZoneNic { - pub id: Uuid, - pub mac: MacAddr, - pub ip: IpAddr, - pub slot: u8, - pub primary: bool, -} - /// Filters that apply to sleds. /// /// This logic lives here rather than within the individual components making @@ -426,37 +449,6 @@ pub struct Policy { pub target_nexus_zone_count: usize, } -/// Policy and database inputs to the Reconfigurator planner -/// -/// The primary inputs to the planner are the parent (either a parent blueprint -/// or an inventory collection) and this structure. This type holds the -/// fleet-wide policy as well as any additional information fetched from CRDB -/// that the planner needs to make decisions. -/// -/// -/// The current policy is pretty limited. It's aimed primarily at supporting -/// the add/remove sled use case. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct PlanningInput { - /// fleet-wide policy - policy: Policy, - - /// current internal DNS version - internal_dns_version: Generation, - - /// current external DNS version - external_dns_version: Generation, - - /// per-sled policy and resources - sleds: BTreeMap, - - /// external IPs allocated to Omicron zones - omicron_zone_external_ips: BTreeMap, - - /// vNICs allocated to Omicron zones - omicron_zone_nics: BTreeMap, -} - #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SledDetails { /// current sled policy @@ -467,82 +459,14 @@ pub struct SledDetails { pub resources: SledResources, } -impl PlanningInput { - pub fn internal_dns_version(&self) -> Generation { - self.internal_dns_version - } - - pub fn external_dns_version(&self) -> Generation { - self.external_dns_version - } - - pub fn target_nexus_zone_count(&self) -> usize { - self.policy.target_nexus_zone_count - } - - pub fn service_ip_pool_ranges(&self) -> &[IpRange] { - &self.policy.service_ip_pool_ranges - } - - pub fn all_sleds( - &self, - filter: SledFilter, - ) -> impl Iterator + '_ { - self.sleds.iter().filter_map(move |(&sled_id, details)| { - filter - .matches_policy_and_state(details.policy, details.state) - .then_some((sled_id, details)) - }) - } - - pub fn all_sled_ids( - &self, - filter: SledFilter, - ) -> impl Iterator + '_ { - self.all_sleds(filter).map(|(sled_id, _)| sled_id) - } - - pub fn all_sled_resources( - &self, - filter: SledFilter, - ) -> impl Iterator + '_ { - self.all_sleds(filter) - .map(|(sled_id, details)| (sled_id, &details.resources)) - } - - pub fn sled_policy(&self, sled_id: &SledUuid) -> Option { - self.sleds.get(sled_id).map(|details| details.policy) - } - - pub fn sled_resources(&self, sled_id: &SledUuid) -> Option<&SledResources> { - self.sleds.get(sled_id).map(|details| &details.resources) - } - - // Convert this `PlanningInput` back into a [`PlanningInputBuilder`] - // - // This is primarily useful for tests that want to mutate an existing - // `PlanningInput`. - pub fn into_builder(self) -> PlanningInputBuilder { - PlanningInputBuilder { - policy: self.policy, - internal_dns_version: self.internal_dns_version, - external_dns_version: self.external_dns_version, - sleds: self.sleds, - omicron_zone_external_ips: self.omicron_zone_external_ips, - omicron_zone_nics: self.omicron_zone_nics, - } - } -} - #[derive(Debug, thiserror::Error)] pub enum PlanningInputBuildError { #[error("duplicate sled ID: {0}")] DuplicateSledId(SledUuid), - #[error("Omicron zone {zone_id} already has an external IP ({ip:?})")] - DuplicateOmicronZoneExternalIp { - zone_id: OmicronZoneUuid, - ip: OmicronZoneExternalIp, - }, + #[error("Omicron zone {zone_id} has a range of IPs ({ip:?}), only a single IP is supported")] + NotSingleIp { zone_id: OmicronZoneUuid, ip: IpNetwork }, + #[error(transparent)] + AddNetworkResource(#[from] AddNetworkResourceError), #[error("Omicron zone {0} has an ephemeral IP (unsupported)")] EphemeralIpUnsupported(OmicronZoneUuid), #[error("Omicron zone {zone_id} has a bad SNAT config")] @@ -551,8 +475,6 @@ pub enum PlanningInputBuildError { #[source] err: SourceNatConfigError, }, - #[error("Omicron zone {zone_id} already has a NIC ({nic:?})")] - DuplicateOmicronZoneNic { zone_id: OmicronZoneUuid, nic: OmicronZoneNic }, } /// Constructor for [`PlanningInput`]. @@ -562,12 +484,12 @@ pub struct PlanningInputBuilder { internal_dns_version: Generation, external_dns_version: Generation, sleds: BTreeMap, - omicron_zone_external_ips: BTreeMap, - omicron_zone_nics: BTreeMap, + network_resources: OmicronZoneNetworkResources, } impl PlanningInputBuilder { - pub const fn empty_input() -> PlanningInput { + pub fn empty_input() -> PlanningInput { + // This empty input is known to be valid. PlanningInput { policy: Policy { service_ip_pool_ranges: Vec::new(), @@ -576,8 +498,7 @@ impl PlanningInputBuilder { internal_dns_version: Generation::new(), external_dns_version: Generation::new(), sleds: BTreeMap::new(), - omicron_zone_external_ips: BTreeMap::new(), - omicron_zone_nics: BTreeMap::new(), + network_resources: OmicronZoneNetworkResources::new(), } } @@ -591,8 +512,7 @@ impl PlanningInputBuilder { internal_dns_version, external_dns_version, sleds: BTreeMap::new(), - omicron_zone_external_ips: BTreeMap::new(), - omicron_zone_nics: BTreeMap::new(), + network_resources: OmicronZoneNetworkResources::new(), } } @@ -617,18 +537,7 @@ impl PlanningInputBuilder { zone_id: OmicronZoneUuid, ip: OmicronZoneExternalIp, ) -> Result<(), PlanningInputBuildError> { - match self.omicron_zone_external_ips.entry(zone_id) { - Entry::Vacant(slot) => { - slot.insert(ip); - Ok(()) - } - Entry::Occupied(prev) => { - Err(PlanningInputBuildError::DuplicateOmicronZoneExternalIp { - zone_id, - ip: *prev.get(), - }) - } - } + Ok(self.network_resources.add_external_ip(zone_id, ip)?) } pub fn add_omicron_zone_nic( @@ -636,18 +545,7 @@ impl PlanningInputBuilder { zone_id: OmicronZoneUuid, nic: OmicronZoneNic, ) -> Result<(), PlanningInputBuildError> { - match self.omicron_zone_nics.entry(zone_id) { - Entry::Vacant(slot) => { - slot.insert(nic); - Ok(()) - } - Entry::Occupied(prev) => { - Err(PlanningInputBuildError::DuplicateOmicronZoneNic { - zone_id, - nic: prev.get().clone(), - }) - } - } + Ok(self.network_resources.add_nic(zone_id, nic)?) } pub fn policy_mut(&mut self) -> &mut Policy { @@ -676,8 +574,7 @@ impl PlanningInputBuilder { internal_dns_version: self.internal_dns_version, external_dns_version: self.external_dns_version, sleds: self.sleds, - omicron_zone_external_ips: self.omicron_zone_external_ips, - omicron_zone_nics: self.omicron_zone_nics, + network_resources: self.network_resources, } } } diff --git a/nexus/types/src/deployment/tri_map.rs b/nexus/types/src/deployment/tri_map.rs new file mode 100644 index 0000000000..52b64aec43 --- /dev/null +++ b/nexus/types/src/deployment/tri_map.rs @@ -0,0 +1,511 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::{ + borrow::Borrow, + collections::{hash_map, BTreeSet, HashMap}, + fmt, + hash::Hash, +}; + +use derive_where::derive_where; +use serde::{Deserialize, Serialize, Serializer}; + +/// An append-only 1:1:1 (trijective) map for three keys and a value. +/// +/// The storage mechanism is a vector of entries, with indexes into that vector +/// stored in three hashmaps. This allows for efficient lookups by any of the +/// three keys, while preventing duplicates. +/// +/// Not totally generic yet, just meant for the deployment use case. +#[derive_where(Clone, Debug, Default)] +pub(crate) struct TriMap { + entries: Vec, + // Invariant: the value (usize) in these maps are valid indexes into + // `entries`, and are a 1:1 mapping. + k1_to_entry: HashMap, + k2_to_entry: HashMap, + k3_to_entry: HashMap, +} + +// Note: Eq and PartialEq are not implemented for TriMap. Implementing them +// would need to be done with care, because TriMap is not semantically like an +// IndexMap: two maps are equivalent even if their entries are in a different +// order. + +/// The `Serialize` impl for `TriMap` serializes just the list of entries. +impl Serialize for TriMap +where + T: Serialize, +{ + fn serialize( + &self, + serializer: S, + ) -> Result { + // Serialize just the entries -- don't serialize the indexes. We'll + // rebuild the indexes on deserialization. + self.entries.serialize(serializer) + } +} + +/// The `Deserialize` impl for `TriMap` deserializes the list of entries and +/// then rebuilds the indexes, producing an error if there are any duplicates. +impl<'de, T: TriMapEntry> Deserialize<'de> for TriMap +where + T: Deserialize<'de>, +{ + fn deserialize>( + deserializer: D, + ) -> Result { + // First, deserialize the entries. + let entries = Vec::::deserialize(deserializer)?; + + // Now build a map from scratch, inserting the entries sequentially. + // This will catch issues with duplicates. + let mut map = TriMap::new(); + for entry in entries { + map.insert_no_dups(entry).map_err(serde::de::Error::custom)?; + } + + Ok(map) + } +} + +pub(crate) trait TriMapEntry: Clone + fmt::Debug { + type K1: Eq + Hash + Clone + fmt::Debug; + type K2: Eq + Hash + Clone + fmt::Debug; + type K3: Eq + Hash + Clone + fmt::Debug; + + fn key1(&self) -> Self::K1; + fn key2(&self) -> Self::K2; + fn key3(&self) -> Self::K3; +} + +impl TriMap { + pub(crate) fn new() -> Self { + Self { + entries: Vec::new(), + k1_to_entry: HashMap::new(), + k2_to_entry: HashMap::new(), + k3_to_entry: HashMap::new(), + } + } + + /// Checks general invariants of the map. + /// + /// The code below always upholds these invariants, but it's useful to have + /// an explicit check for tests. + #[cfg(test)] + fn validate(&self) -> anyhow::Result<()> { + use anyhow::{ensure, Context}; + + // Check that all the maps are of the right size. + ensure!( + self.entries.len() == self.k1_to_entry.len(), + "key1 index has {} entries, but there are {} entries", + self.k1_to_entry.len(), + self.entries.len() + ); + ensure!( + self.entries.len() == self.k2_to_entry.len(), + "key2 index has {} entries, but there are {} entries", + self.k2_to_entry.len(), + self.entries.len() + ); + ensure!( + self.entries.len() == self.k3_to_entry.len(), + "key3 index has {} entries, but there are {} entries", + self.k3_to_entry.len(), + self.entries.len() + ); + + // Check that the indexes are all correct. + for (ix, entry) in self.entries.iter().enumerate() { + let key1 = entry.key1(); + let key2 = entry.key2(); + let key3 = entry.key3(); + + let ix1 = self.k1_to_entry.get(&key1).context(format!( + "entry at index {ix} ({entry:?}) has no key1 index" + ))?; + let ix2 = self.k2_to_entry.get(&key2).context(format!( + "entry at index {ix} ({entry:?}) has no key2 index" + ))?; + let ix3 = self.k3_to_entry.get(&key3).context(format!( + "entry at index {ix} ({entry:?}) has no key3 index" + ))?; + + if *ix1 != ix || *ix2 != ix || *ix3 != ix { + return Err(anyhow::anyhow!( + "entry at index {} has mismatched indexes: key1: {}, key2: {}, key3: {}", + ix, + ix1, + ix2, + ix3 + )); + } + } + + Ok(()) + } + + /// Inserts a value into the set, returning an error if any duplicates were + /// added. + pub(crate) fn insert_no_dups( + &mut self, + value: T, + ) -> Result<(), DuplicateEntry> { + let mut dups = BTreeSet::new(); + + // Check for duplicates *before* inserting the new entry, because we + // don't want to partially insert the new entry and then have to roll + // back. + let e1 = detect_dup_or_insert( + self.k1_to_entry.entry(value.key1()), + &mut dups, + ); + let e2 = detect_dup_or_insert( + self.k2_to_entry.entry(value.key2()), + &mut dups, + ); + let e3 = detect_dup_or_insert( + self.k3_to_entry.entry(value.key3()), + &mut dups, + ); + + if !dups.is_empty() { + return Err(DuplicateEntry { + new: value, + dups: dups.iter().map(|ix| self.entries[*ix].clone()).collect(), + }); + } + + let next_index = self.entries.len(); + self.entries.push(value); + // e1, e2 and e3 are all Some because if they were None, dups would be + // non-empty, and we'd have bailed out earlier. + e1.unwrap().insert(next_index); + e2.unwrap().insert(next_index); + e3.unwrap().insert(next_index); + + Ok(()) + } + + pub(crate) fn get1(&self, key1: &Q) -> Option<&T> + where + T::K1: Borrow, + Q: Eq + Hash + ?Sized, + { + self.k1_to_entry.get(key1).map(|ix| &self.entries[*ix]) + } + + pub(crate) fn get2(&self, key2: &Q) -> Option<&T> + where + T::K2: Borrow, + Q: Eq + Hash + ?Sized, + { + self.k2_to_entry.get(key2).map(|ix| &self.entries[*ix]) + } + + pub(crate) fn get3(&self, key3: &Q) -> Option<&T> + where + T::K3: Borrow, + Q: Eq + Hash + ?Sized, + { + self.k3_to_entry.get(key3).map(|ix| &self.entries[*ix]) + } +} + +fn detect_dup_or_insert<'a, K>( + entry: hash_map::Entry<'a, K, usize>, + dups: &mut BTreeSet, +) -> Option> { + match entry { + hash_map::Entry::Vacant(slot) => Some(slot), + hash_map::Entry::Occupied(slot) => { + dups.insert(*slot.get()); + None + } + } +} + +#[derive(Debug)] +pub struct DuplicateEntry { + new: T, + dups: Vec, +} + +impl fmt::Display for DuplicateEntry { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "duplicate entry: {:?} conflicts with existing: {:?}", + self.new, self.dups + ) + } +} + +impl std::error::Error for DuplicateEntry {} + +#[cfg(test)] +mod tests { + use super::*; + use proptest::prelude::*; + use test_strategy::{proptest, Arbitrary}; + + #[derive( + Clone, Debug, Eq, PartialEq, Arbitrary, Serialize, Deserialize, + )] + struct TestEntry { + key1: u8, + key2: char, + key3: String, + value: String, + } + + impl TriMapEntry for TestEntry { + // These types are chosen to represent various kinds of keys in the + // proptest below. + // + // We use u8 since there can only be 256 values, increasing the + // likelihood of collisions in the proptest below. + type K1 = u8; + // char is chosen because the Arbitrary impl for it is biased towards + // ASCII, increasing the likelihood of collisions. + type K2 = char; + // String is a generally open-ended type that probably won't have many + // collisions. + type K3 = String; + + fn key1(&self) -> Self::K1 { + self.key1 + } + + fn key2(&self) -> Self::K2 { + self.key2 + } + + fn key3(&self) -> Self::K3 { + self.key3.clone() + } + } + + #[test] + fn test_insert_entry_no_dups() { + let mut map = TriMap::::new(); + + // Add an element. + let v1 = TestEntry { + key1: 0, + key2: 'a', + key3: "x".to_string(), + value: "v".to_string(), + }; + map.insert_no_dups(v1.clone()).unwrap(); + + // Add an exact duplicate, which should error out. + let error = map.insert_no_dups(v1.clone()).unwrap_err(); + assert_eq!(&error.new, &v1); + assert_eq!(error.dups, vec![v1.clone()]); + + // Add a duplicate against just key1, which should error out. + let v2 = TestEntry { + key1: 0, + key2: 'b', + key3: "y".to_string(), + value: "v".to_string(), + }; + let error = map.insert_no_dups(v2.clone()).unwrap_err(); + assert_eq!(&error.new, &v2); + assert_eq!(error.dups, vec![v1.clone()]); + + // Add a duplicate against just key2, which should error out. + let v3 = TestEntry { + key1: 1, + key2: 'a', + key3: "y".to_string(), + value: "v".to_string(), + }; + let error = map.insert_no_dups(v3.clone()).unwrap_err(); + assert_eq!(&error.new, &v3); + + // Add a duplicate against just key3, which should error out. + let v4 = TestEntry { + key1: 1, + key2: 'b', + key3: "x".to_string(), + value: "v".to_string(), + }; + let error = map.insert_no_dups(v4.clone()).unwrap_err(); + assert_eq!(&error.new, &v4); + + // Add an entry that doesn't have any conflicts. + let v5 = TestEntry { + key1: 1, + key2: 'b', + key3: "y".to_string(), + value: "v".to_string(), + }; + map.insert_no_dups(v5.clone()).unwrap(); + } + + /// Represents a naive version of `TriMap` that doesn't have any indexes + /// and does linear scans. + #[derive(Debug)] + struct NaiveTriMap { + entries: Vec, + } + + impl NaiveTriMap { + fn new() -> Self { + Self { entries: Vec::new() } + } + + fn insert_entry_no_dups( + &mut self, + entry: TestEntry, + ) -> Result<(), DuplicateEntry> { + let dups = self + .entries + .iter() + .filter(|e| { + e.key1 == entry.key1 + || e.key2 == entry.key2 + || e.key3 == entry.key3 + }) + .cloned() + .collect::>(); + + if !dups.is_empty() { + return Err(DuplicateEntry { new: entry, dups }); + } + + self.entries.push(entry); + Ok(()) + } + } + + #[derive(Debug, Arbitrary)] + enum Operation { + // Make inserts a bit more common to try and fill up the map. + #[weight(3)] + Insert(TestEntry), + Get1(u8), + Get2(char), + Get3(String), + } + + #[proptest] + fn proptest_serialize_roundtrip(values: Vec) { + let mut map = TriMap::::new(); + let mut first_error = None; + for value in values.clone() { + // Ignore errors from duplicates which are quite possible to occur + // here, since we're just testing serialization. But store the + // first error to ensure that deserialization returns errors. + if let Err(error) = map.insert_no_dups(value) { + if first_error.is_none() { + first_error = Some(error); + } + } + } + + let serialized = serde_json::to_string(&map).unwrap(); + let deserialized: TriMap = + serde_json::from_str(&serialized).unwrap(); + + assert_eq!(map.entries, deserialized.entries, "entries match"); + // All of the indexes should be the same too. + assert_eq!( + map.k1_to_entry, deserialized.k1_to_entry, + "k1 indexes match" + ); + assert_eq!( + map.k2_to_entry, deserialized.k2_to_entry, + "k2 indexes match" + ); + assert_eq!( + map.k3_to_entry, deserialized.k3_to_entry, + "k3 indexes match" + ); + + // Try deserializing the full list of values directly, and see that the + // error reported is the same as first_error. + // + // Here we rely on the fact that a TriMap is serialized as just a + // vector. + let serialized = serde_json::to_string(&values).unwrap(); + let res: Result, _> = + serde_json::from_str(&serialized); + match (first_error, res) { + (None, Ok(_)) => {} // No error, should be fine + (Some(first_error), Ok(_)) => { + panic!( + "expected error ({first_error}), but deserialization succeeded" + ) + } + (None, Err(error)) => { + panic!("unexpected error: {error}, deserialization should have succeeded") + } + (Some(first_error), Err(error)) => { + // first_error is the error from the map, and error is the + // deserialization error (which should always be a custom + // error, stored as a string). + let expected = first_error.to_string(); + let actual = error.to_string(); + assert_eq!(actual, expected, "error matches"); + } + } + } + + #[proptest(cases = 16)] + fn proptest_ops( + #[strategy(prop::collection::vec(any::(), 0..1024))] + ops: Vec, + ) { + let mut map = TriMap::::new(); + let mut naive_map = NaiveTriMap::new(); + + // Now perform the operations on both maps. + for op in ops { + match op { + Operation::Insert(entry) => { + let map_res = map.insert_no_dups(entry.clone()); + let naive_res = + naive_map.insert_entry_no_dups(entry.clone()); + + assert_eq!(map_res.is_ok(), naive_res.is_ok()); + if let Err(map_err) = map_res { + let naive_err = naive_res.unwrap_err(); + assert_eq!(map_err.new, naive_err.new); + assert_eq!(map_err.dups, naive_err.dups); + } + + map.validate().expect("map should be valid"); + } + Operation::Get1(key1) => { + let map_res = map.get1(&key1); + let naive_res = + naive_map.entries.iter().find(|e| e.key1 == key1); + + assert_eq!(map_res, naive_res); + } + Operation::Get2(key2) => { + let map_res = map.get2(&key2); + let naive_res = + naive_map.entries.iter().find(|e| e.key2 == key2); + + assert_eq!(map_res, naive_res); + } + Operation::Get3(key3) => { + let map_res = map.get3(&key3); + let naive_res = + naive_map.entries.iter().find(|e| e.key3 == key3); + + assert_eq!(map_res, naive_res); + } + } + } + } +} diff --git a/nexus/types/src/deployment/zone_type.rs b/nexus/types/src/deployment/zone_type.rs index 035e0667bc..9f663015cd 100644 --- a/nexus/types/src/deployment/zone_type.rs +++ b/nexus/types/src/deployment/zone_type.rs @@ -196,7 +196,7 @@ impl BlueprintZoneType { } pub mod blueprint_zone_type { - use crate::deployment::planning_input::OmicronZoneExternalFloatingAddr; + use crate::deployment::OmicronZoneExternalFloatingAddr; use crate::deployment::OmicronZoneExternalFloatingIp; use crate::deployment::OmicronZoneExternalSnatIp; use crate::inventory::OmicronZoneDataset; diff --git a/uuid-kinds/src/lib.rs b/uuid-kinds/src/lib.rs index 489e0da365..2fc08972a6 100644 --- a/uuid-kinds/src/lib.rs +++ b/uuid-kinds/src/lib.rs @@ -62,5 +62,6 @@ impl_typed_uuid_kind! { Upstairs => "upstairs", UpstairsRepair => "upstairs_repair", UpstairsSession => "upstairs_session", + Vnic => "vnic", Zpool => "zpool", } From c8c7b41b4f3ba7111b60f65675b98c7a380f6421 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Tue, 14 May 2024 19:20:45 -0400 Subject: [PATCH 8/9] fix #5733 vs #5751 semantic merge conflict (#5767) same problem and fix as #5765 --- nexus/tests/integration_tests/rack.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/tests/integration_tests/rack.rs b/nexus/tests/integration_tests/rack.rs index 3e10ebcca4..c72c59b6f7 100644 --- a/nexus/tests/integration_tests/rack.rs +++ b/nexus/tests/integration_tests/rack.rs @@ -205,7 +205,7 @@ async fn test_sled_add(cptestctx: &ControlPlaneTestContext) { assert_eq!(sled_id, repeat_sled_id); // Now upsert the sled. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; nexus .datastore() .sled_upsert(SledUpdate::new( From 75661286020288b84c4cb138b21b1ff132c53590 Mon Sep 17 00:00:00 2001 From: Eliza Weisman Date: Tue, 14 May 2024 18:05:19 -0700 Subject: [PATCH 9/9] [nexus] deflake `test_instance_watcher_metrics` (#5768) Presently, `test_instance_watcher_metrics` will wait for the `instance_watcher` background task to have run before making assertions about metrics, but it does *not* ensure that oximeter has actually collected those metrics. This can result in flaky failures --- see #5752. This commit adds explicit calls to `oximeter.force_collect()` prior to making assertions, to ensure that the latest metrics have been collected. Fixes #5752 --- nexus/tests/integration_tests/metrics.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nexus/tests/integration_tests/metrics.rs b/nexus/tests/integration_tests/metrics.rs index 71d18f95ee..abcc7f1c75 100644 --- a/nexus/tests/integration_tests/metrics.rs +++ b/nexus/tests/integration_tests/metrics.rs @@ -329,6 +329,7 @@ async fn test_instance_watcher_metrics( let client = &cptestctx.external_client; let internal_client = &cptestctx.internal_client; let nexus = &cptestctx.server.server_context().nexus; + let oximeter = &cptestctx.oximeter; // TODO(eliza): consider factoring this out to a generic // `activate_background_task` function in `nexus-test-utils` eventually? @@ -399,6 +400,8 @@ async fn test_instance_watcher_metrics( ) .await .unwrap(); + // Make sure that the latest metrics have been collected. + oximeter.force_collect().await; }; #[track_caller] @@ -443,11 +446,8 @@ async fn test_instance_watcher_metrics( let project = create_project_and_pool(&client).await; let project_name = project.identity.name.as_str(); // Wait until Nexus registers as a producer with Oximeter. - wait_for_producer( - &cptestctx.oximeter, - cptestctx.server.server_context().nexus.id(), - ) - .await; + wait_for_producer(&oximeter, cptestctx.server.server_context().nexus.id()) + .await; eprintln!("--- creating instance 1 ---"); let instance1 = create_instance(&client, project_name, "i-1").await;