From fdf4f799f1552aecfc52d27ae1e74906a484afcc Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Wed, 15 Nov 2023 22:17:10 +0000 Subject: [PATCH] wip --- .../db-queries/src/db/datastore/inventory.rs | 530 +++++++++--------- nexus/tests/output/nexus_tags.txt | 1 + .../output/uncovered-authz-endpoints.txt | 1 + openapi/nexus.json | 61 ++ 4 files changed, 317 insertions(+), 276 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/inventory.rs b/nexus/db-queries/src/db/datastore/inventory.rs index 0bf39fb4854..d94a2aec29e 100644 --- a/nexus/db-queries/src/db/datastore/inventory.rs +++ b/nexus/db-queries/src/db/datastore/inventory.rs @@ -10,8 +10,6 @@ use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::queries::ALLOW_FULL_TABLE_SCAN_SQL; use crate::db::TransactionError; -use anyhow::anyhow; -use anyhow::bail; use anyhow::Context; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; @@ -44,6 +42,7 @@ use nexus_db_model::SwCaboose; use nexus_types::inventory::Collection; use omicron_common::api::external::Error; use omicron_common::api::external::InternalContext; +use omicron_common::bail_unless; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::num::NonZeroU32; @@ -823,47 +822,273 @@ impl DataStore { .await .unwrap()) } -} - -/// Extra interfaces that are not intended (and potentially unsafe) for use in -/// Nexus, but useful for testing and `omdb` -pub trait DataStoreInventoryTest: Send + Sync { - /// List all collections - /// - /// This does not paginate. - fn inventory_collections(&self) -> BoxFuture>>; - /// Make a best effort to read the given collection while limiting queries - /// to `limit` results. Returns as much as it was able to get. The - /// returned bool indicates whether the returned collection might be - /// incomplete because the limit was reached. - fn inventory_collection_read_best_effort( + /// Attempt to read the given collection while limiting queries to `limit` + /// records and returning nothing if `limit` is not large enough. + async fn inventory_collection_read_all_or_nothing( &self, id: Uuid, limit: NonZeroU32, - ) -> BoxFuture>; + ) -> Result { + let (collection, limit_reached) = + self.inventory_collection_read_best_effort(id, limit).await?; + bail_unless!( + !limit_reached, + "hit limit of {} records while loading collection", + limit + ); + Ok(collection) + } /// Attempt to read the given collection while limiting queries to `limit` - /// records - fn inventory_collection_read_all_or_nothing( + /// records and returning up to `limit` records. + async fn inventory_collection_read_best_effort( &self, id: Uuid, limit: NonZeroU32, - ) -> BoxFuture> { - async move { - let (collection, limit_reached) = - self.inventory_collection_read_best_effort(id, limit).await?; - anyhow::ensure!( - !limit_reached, - "hit limit of {} records while loading collection", - limit + ) -> Result<(Collection, bool), Error> { + let conn = &self.pool_connection_for_tests().await?; + let sql_limit = i64::from(u32::from(limit)); + let usize_limit = usize::try_from(u32::from(limit)).unwrap(); + let mut limit_reached = false; + let (time_started, time_done, collector) = { + use db::schema::inv_collection::dsl; + + let collections = dsl::inv_collection + .filter(dsl::id.eq(id)) + .limit(2) + .select(InvCollection::as_select()) + .load_async(&**conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })?; + bail_unless!(collections.len() == 1); + let collection = collections.into_iter().next().unwrap(); + ( + collection.time_started, + collection.time_done, + collection.collector, + ) + }; + + let errors: Vec = { + use db::schema::inv_collection_error::dsl; + dsl::inv_collection_error + .filter(dsl::inv_collection_id.eq(id)) + .order_by(dsl::idx) + .limit(sql_limit) + .select(InvCollectionError::as_select()) + .load_async(&**conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? + .into_iter() + .map(|e| e.message) + .collect() + }; + limit_reached = limit_reached || errors.len() == usize_limit; + + let sps: BTreeMap<_, _> = { + use db::schema::inv_service_processor::dsl; + dsl::inv_service_processor + .filter(dsl::inv_collection_id.eq(id)) + .limit(sql_limit) + .select(InvServiceProcessor::as_select()) + .load_async(&**conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? + .into_iter() + .map(|sp_row| { + let baseboard_id = sp_row.hw_baseboard_id; + ( + baseboard_id, + nexus_types::inventory::ServiceProcessor::from(sp_row), + ) + }) + .collect() + }; + limit_reached = limit_reached || sps.len() == usize_limit; + + let rots: BTreeMap<_, _> = { + use db::schema::inv_root_of_trust::dsl; + dsl::inv_root_of_trust + .filter(dsl::inv_collection_id.eq(id)) + .limit(sql_limit) + .select(InvRootOfTrust::as_select()) + .load_async(&**conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? + .into_iter() + .map(|rot_row| { + let baseboard_id = rot_row.hw_baseboard_id; + ( + baseboard_id, + nexus_types::inventory::RotState::from(rot_row), + ) + }) + .collect() + }; + limit_reached = limit_reached || rots.len() == usize_limit; + + // Collect the unique baseboard ids referenced by SPs and RoTs. + let baseboard_id_ids: BTreeSet<_> = + sps.keys().chain(rots.keys()).cloned().collect(); + // Fetch the corresponding baseboard records. + let baseboards_by_id: BTreeMap<_, _> = { + use db::schema::hw_baseboard_id::dsl; + dsl::hw_baseboard_id + .filter(dsl::id.eq_any(baseboard_id_ids)) + .limit(sql_limit) + .select(HwBaseboardId::as_select()) + .load_async(&**conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? + .into_iter() + .map(|bb| { + ( + bb.id, + Arc::new(nexus_types::inventory::BaseboardId::from(bb)), + ) + }) + .collect() + }; + limit_reached = limit_reached || baseboards_by_id.len() == usize_limit; + + // Having those, we can replace the keys in the maps above with + // references to the actual baseboard rather than the uuid. + let sps = sps + .into_iter() + .map(|(id, sp)| { + baseboards_by_id.get(&id).map(|bb| (bb.clone(), sp)).ok_or_else( + || { + Error::internal_error( + "missing baseboard that we should have fetched", + ) + }, + ) + }) + .collect::, _>>()?; + let rots = rots + .into_iter() + .map(|(id, rot)| { + baseboards_by_id + .get(&id) + .map(|bb| (bb.clone(), rot)) + .ok_or_else(|| { + Error::internal_error( + "missing baseboard that we should have fetched", + ) + }) + }) + .collect::, _>>()?; + + // Fetch records of cabooses found. + let inv_caboose_rows = { + use db::schema::inv_caboose::dsl; + dsl::inv_caboose + .filter(dsl::inv_collection_id.eq(id)) + .limit(sql_limit) + .select(InvCaboose::as_select()) + .load_async(&**conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })? + }; + limit_reached = limit_reached || inv_caboose_rows.len() == usize_limit; + + // Collect the unique sw_caboose_ids for those cabooses. + let sw_caboose_ids: BTreeSet<_> = inv_caboose_rows + .iter() + .map(|inv_caboose| inv_caboose.sw_caboose_id) + .collect(); + // Fetch the corresponing records. + let cabooses_by_id: BTreeMap<_, _> = { + use db::schema::sw_caboose::dsl; + dsl::sw_caboose + .filter(dsl::id.eq_any(sw_caboose_ids)) + .limit(sql_limit) + .select(SwCaboose::as_select()) + .load_async(&**conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? + .into_iter() + .map(|sw_caboose_row| { + ( + sw_caboose_row.id, + Arc::new(nexus_types::inventory::Caboose::from( + sw_caboose_row, + )), + ) + }) + .collect() + }; + limit_reached = limit_reached || cabooses_by_id.len() == usize_limit; + + // Assemble the lists of cabooses found. + let mut cabooses_found = BTreeMap::new(); + for c in inv_caboose_rows { + let by_baseboard = cabooses_found + .entry(nexus_types::inventory::CabooseWhich::from(c.which)) + .or_insert_with(BTreeMap::new); + let Some(bb) = baseboards_by_id.get(&c.hw_baseboard_id) else { + let msg = format!( + "unknown baseboard found in inv_caboose: {}", + c.hw_baseboard_id + ); + return Err(Error::internal_error(&msg)); + }; + let Some(sw_caboose) = cabooses_by_id.get(&c.sw_caboose_id) else { + let msg = format!( + "unknown caboose found in inv_caboose: {}", + c.sw_caboose_id + ); + return Err(Error::internal_error(&msg)); + }; + + let previous = by_baseboard.insert( + bb.clone(), + nexus_types::inventory::CabooseFound { + time_collected: c.time_collected, + source: c.source, + caboose: sw_caboose.clone(), + }, + ); + bail_unless!( + previous.is_none(), + "duplicate caboose found: {:?} baseboard {:?}", + c.which, + c.hw_baseboard_id ); - Ok(collection) } - .boxed() + + Ok(( + Collection { + id, + errors, + time_started, + time_done, + collector, + baseboards: baseboards_by_id.values().cloned().collect(), + cabooses: cabooses_by_id.values().cloned().collect(), + sps, + rots, + cabooses_found, + }, + limit_reached, + )) } } +/// Extra interfaces that are not intended (and potentially unsafe) for use in +/// Nexus, but useful for testing and `omdb` +pub trait DataStoreInventoryTest: Send + Sync { + /// List all collections + /// + /// This does not paginate. + fn inventory_collections(&self) -> BoxFuture>>; +} + impl DataStoreInventoryTest for DataStore { fn inventory_collections(&self) -> BoxFuture>> { async { @@ -888,253 +1113,6 @@ impl DataStoreInventoryTest for DataStore { } .boxed() } - - // This function could move into the datastore if it proves helpful. We'd - // need to work out how to report the usual type of Error. For now we don't - // need it so we limit its scope to the test suite. - fn inventory_collection_read_best_effort( - &self, - id: Uuid, - limit: NonZeroU32, - ) -> BoxFuture> { - async move { - let conn = &self - .pool_connection_for_tests() - .await - .context("getting connection")?; - let sql_limit = i64::from(u32::from(limit)); - let usize_limit = usize::try_from(u32::from(limit)).unwrap(); - let mut limit_reached = false; - let (time_started, time_done, collector) = { - use db::schema::inv_collection::dsl; - - let collections = dsl::inv_collection - .filter(dsl::id.eq(id)) - .limit(2) - .select(InvCollection::as_select()) - .load_async(&**conn) - .await - .context("loading collection")?; - anyhow::ensure!(collections.len() == 1); - let collection = collections.into_iter().next().unwrap(); - ( - collection.time_started, - collection.time_done, - collection.collector, - ) - }; - - let errors: Vec = { - use db::schema::inv_collection_error::dsl; - dsl::inv_collection_error - .filter(dsl::inv_collection_id.eq(id)) - .order_by(dsl::idx) - .limit(sql_limit) - .select(InvCollectionError::as_select()) - .load_async(&**conn) - .await - .context("loading collection errors")? - .into_iter() - .map(|e| e.message) - .collect() - }; - limit_reached = limit_reached || errors.len() == usize_limit; - - let sps: BTreeMap<_, _> = { - use db::schema::inv_service_processor::dsl; - dsl::inv_service_processor - .filter(dsl::inv_collection_id.eq(id)) - .limit(sql_limit) - .select(InvServiceProcessor::as_select()) - .load_async(&**conn) - .await - .context("loading service processors")? - .into_iter() - .map(|sp_row| { - let baseboard_id = sp_row.hw_baseboard_id; - ( - baseboard_id, - nexus_types::inventory::ServiceProcessor::from( - sp_row, - ), - ) - }) - .collect() - }; - limit_reached = limit_reached || sps.len() == usize_limit; - - let rots: BTreeMap<_, _> = { - use db::schema::inv_root_of_trust::dsl; - dsl::inv_root_of_trust - .filter(dsl::inv_collection_id.eq(id)) - .limit(sql_limit) - .select(InvRootOfTrust::as_select()) - .load_async(&**conn) - .await - .context("loading roots of trust")? - .into_iter() - .map(|rot_row| { - let baseboard_id = rot_row.hw_baseboard_id; - ( - baseboard_id, - nexus_types::inventory::RotState::from(rot_row), - ) - }) - .collect() - }; - limit_reached = limit_reached || rots.len() == usize_limit; - - // Collect the unique baseboard ids referenced by SPs and RoTs. - let baseboard_id_ids: BTreeSet<_> = - sps.keys().chain(rots.keys()).cloned().collect(); - // Fetch the corresponding baseboard records. - let baseboards_by_id: BTreeMap<_, _> = { - use db::schema::hw_baseboard_id::dsl; - dsl::hw_baseboard_id - .filter(dsl::id.eq_any(baseboard_id_ids)) - .limit(sql_limit) - .select(HwBaseboardId::as_select()) - .load_async(&**conn) - .await - .context("loading baseboards")? - .into_iter() - .map(|bb| { - ( - bb.id, - Arc::new( - nexus_types::inventory::BaseboardId::from(bb), - ), - ) - }) - .collect() - }; - limit_reached = - limit_reached || baseboards_by_id.len() == usize_limit; - - // Having those, we can replace the keys in the maps above with - // references to the actual baseboard rather than the uuid. - let sps = sps - .into_iter() - .map(|(id, sp)| { - baseboards_by_id - .get(&id) - .map(|bb| (bb.clone(), sp)) - .ok_or_else(|| { - anyhow!( - "missing baseboard that we should have fetched" - ) - }) - }) - .collect::, _>>()?; - let rots = - rots.into_iter() - .map(|(id, rot)| { - baseboards_by_id - .get(&id) - .map(|bb| (bb.clone(), rot)) - .ok_or_else(|| { - anyhow!("missing baseboard that we should have fetched") - }) - }) - .collect::, _>>()?; - - // Fetch records of cabooses found. - let inv_caboose_rows = { - use db::schema::inv_caboose::dsl; - dsl::inv_caboose - .filter(dsl::inv_collection_id.eq(id)) - .limit(sql_limit) - .select(InvCaboose::as_select()) - .load_async(&**conn) - .await - .context("loading inv_cabooses")? - }; - limit_reached = - limit_reached || inv_caboose_rows.len() == usize_limit; - - // Collect the unique sw_caboose_ids for those cabooses. - let sw_caboose_ids: BTreeSet<_> = inv_caboose_rows - .iter() - .map(|inv_caboose| inv_caboose.sw_caboose_id) - .collect(); - // Fetch the corresponing records. - let cabooses_by_id: BTreeMap<_, _> = { - use db::schema::sw_caboose::dsl; - dsl::sw_caboose - .filter(dsl::id.eq_any(sw_caboose_ids)) - .limit(sql_limit) - .select(SwCaboose::as_select()) - .load_async(&**conn) - .await - .context("loading sw_cabooses")? - .into_iter() - .map(|sw_caboose_row| { - ( - sw_caboose_row.id, - Arc::new(nexus_types::inventory::Caboose::from( - sw_caboose_row, - )), - ) - }) - .collect() - }; - limit_reached = - limit_reached || cabooses_by_id.len() == usize_limit; - - // Assemble the lists of cabooses found. - let mut cabooses_found = BTreeMap::new(); - for c in inv_caboose_rows { - let by_baseboard = cabooses_found - .entry(nexus_types::inventory::CabooseWhich::from(c.which)) - .or_insert_with(BTreeMap::new); - let Some(bb) = baseboards_by_id.get(&c.hw_baseboard_id) else { - bail!( - "unknown baseboard found in inv_caboose: {}", - c.hw_baseboard_id - ); - }; - let Some(sw_caboose) = cabooses_by_id.get(&c.sw_caboose_id) - else { - bail!( - "unknown caboose found in inv_caboose: {}", - c.sw_caboose_id - ); - }; - - let previous = by_baseboard.insert( - bb.clone(), - nexus_types::inventory::CabooseFound { - time_collected: c.time_collected, - source: c.source, - caboose: sw_caboose.clone(), - }, - ); - anyhow::ensure!( - previous.is_none(), - "duplicate caboose found: {:?} baseboard {:?}", - c.which, - c.hw_baseboard_id - ); - } - - Ok(( - Collection { - id, - errors, - time_started, - time_done, - collector, - baseboards: baseboards_by_id.values().cloned().collect(), - cabooses: cabooses_by_id.values().cloned().collect(), - sps, - rots, - cabooses_found, - }, - limit_reached, - )) - } - .boxed() - } } #[cfg(test)] diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 8c5fe953e32..a876e6080fe 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -122,6 +122,7 @@ sled_physical_disk_list GET /v1/system/hardware/sleds/{sle sled_view GET /v1/system/hardware/sleds/{sled_id} switch_list GET /v1/system/hardware/switches switch_view GET /v1/system/hardware/switches/{switch_id} +uninitialized_sled_list GET /v1/system/hardware/racks/{rack_id}/uninitialized-sleds API operations found with tag "system/metrics" OPERATION ID METHOD URL PATH diff --git a/nexus/tests/output/uncovered-authz-endpoints.txt b/nexus/tests/output/uncovered-authz-endpoints.txt index d76d9c54954..6a6b2a74444 100644 --- a/nexus/tests/output/uncovered-authz-endpoints.txt +++ b/nexus/tests/output/uncovered-authz-endpoints.txt @@ -1,5 +1,6 @@ API endpoints with no coverage in authz tests: ping (get "/v1/ping") +uninitialized_sled_list (get "/v1/system/hardware/racks/{rack_id}/uninitialized-sleds") device_auth_request (post "/device/auth") device_auth_confirm (post "/device/confirm") device_access_token (post "/device/token") diff --git a/openapi/nexus.json b/openapi/nexus.json index 74162a9b2bc..6fba0f3824d 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -3553,6 +3553,49 @@ } } }, + "/v1/system/hardware/racks/{rack_id}/uninitialized-sleds": { + "get": { + "tags": [ + "system/hardware" + ], + "summary": "List uninitialized sleds in a given rack", + "operationId": "uninitialized_sled_list", + "parameters": [ + { + "in": "path", + "name": "rack_id", + "description": "The rack's unique ID.", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_UninitializedSled", + "type": "array", + "items": { + "$ref": "#/components/schemas/UninitializedSled" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/v1/system/hardware/sleds": { "get": { "tags": [ @@ -13939,6 +13982,24 @@ "vlan_id" ] }, + "UninitializedSled": { + "description": "View of a sled that has not been added to an initialized rack yet", + "type": "object", + "properties": { + "baseboard": { + "$ref": "#/components/schemas/Baseboard" + }, + "cubby": { + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "baseboard", + "cubby" + ] + }, "User": { "description": "View of a User", "type": "object",