From 3f702ef442a2cb6522684c8b4028bc8a8b11ed6d Mon Sep 17 00:00:00 2001 From: Rain Date: Wed, 22 Nov 2023 17:26:19 -0800 Subject: [PATCH 1/7] [omicron-dev] increase test timeout to 30 seconds (#4557) On my machine (Ryzen 7950X) I saw that under load (32 tests running at the same time), the timeout would quite reliably be hit likely because cockroach was starved. Increasing it seems pretty harmless. --- dev-tools/omicron-dev/tests/test_omicron_dev.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/omicron-dev/tests/test_omicron_dev.rs b/dev-tools/omicron-dev/tests/test_omicron_dev.rs index f1e8177243..7e78e5dc5a 100644 --- a/dev-tools/omicron-dev/tests/test_omicron_dev.rs +++ b/dev-tools/omicron-dev/tests/test_omicron_dev.rs @@ -27,7 +27,7 @@ use subprocess::Redirection; const CMD_OMICRON_DEV: &str = env!("CARGO_BIN_EXE_omicron-dev"); /// timeout used for various things that should be pretty quick -const TIMEOUT: Duration = Duration::from_secs(15); +const TIMEOUT: Duration = Duration::from_secs(30); fn path_to_omicron_dev() -> PathBuf { path_to_executable(CMD_OMICRON_DEV) From 47968b8e17a1a16c1da605da0d418efd8fa6026e Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Sat, 25 Nov 2023 01:57:43 -0500 Subject: [PATCH 2/7] [Nexus] Add a sled to an initialized rack (#4545) This commit provides an external API for adding a sled to an already initialized rack. --- nexus/db-model/src/lib.rs | 2 + nexus/db-model/src/rack.rs | 19 +- nexus/db-model/src/schema.rs | 12 +- nexus/db-model/src/sled.rs | 4 +- .../src/sled_underlay_subnet_allocation.rs | 16 ++ nexus/db-model/src/switch.rs | 4 +- .../db-queries/src/db/datastore/inventory.rs | 73 ++++- nexus/db-queries/src/db/datastore/rack.rs | 254 ++++++++++++++++++ nexus/src/app/rack.rs | 139 ++++++++-- nexus/src/app/sled.rs | 3 + nexus/src/external_api/http_entrypoints.rs | 31 ++- nexus/tests/integration_tests/endpoints.rs | 17 +- nexus/tests/integration_tests/rack.rs | 66 +++++ nexus/tests/output/nexus_tags.txt | 1 + nexus/types/src/external_api/shared.rs | 36 +++ nexus/types/src/external_api/views.rs | 38 +-- nexus/types/src/internal_api/params.rs | 4 +- nexus/types/src/inventory.rs | 8 + openapi/nexus.json | 30 ++- schema/crdb/14.0.0/up1.sql | 37 +++ schema/crdb/14.0.0/up2.sql | 5 + schema/crdb/dbinit.sql | 47 +++- 22 files changed, 753 insertions(+), 93 deletions(-) create mode 100644 nexus/db-model/src/sled_underlay_subnet_allocation.rs create mode 100644 schema/crdb/14.0.0/up1.sql create mode 100644 schema/crdb/14.0.0/up2.sql diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index 6b65eb87ec..ac5bad26f8 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -72,6 +72,7 @@ mod sled; mod sled_instance; mod sled_resource; mod sled_resource_kind; +mod sled_underlay_subnet_allocation; mod snapshot; mod ssh_key; mod switch; @@ -153,6 +154,7 @@ pub use sled::*; pub use sled_instance::*; pub use sled_resource::*; pub use sled_resource_kind::*; +pub use sled_underlay_subnet_allocation::*; pub use snapshot::*; pub use ssh_key::*; pub use switch::*; diff --git a/nexus/db-model/src/rack.rs b/nexus/db-model/src/rack.rs index f2bc7528d2..580ec155b4 100644 --- a/nexus/db-model/src/rack.rs +++ b/nexus/db-model/src/rack.rs @@ -4,9 +4,8 @@ use crate::schema::rack; use db_macros::Asset; -use ipnetwork::{IpNetwork, Ipv6Network}; +use ipnetwork::IpNetwork; use nexus_types::{external_api::views, identity::Asset}; -use omicron_common::api; use uuid::Uuid; /// Information about a local rack. @@ -29,22 +28,6 @@ impl Rack { rack_subnet: None, } } - - pub fn subnet(&self) -> Result { - match self.rack_subnet { - Some(IpNetwork::V6(subnet)) => Ok(subnet), - Some(IpNetwork::V4(_)) => { - return Err(api::external::Error::InternalError { - internal_message: "rack subnet not IPv6".into(), - }) - } - None => { - return Err(api::external::Error::InternalError { - internal_message: "rack subnet not set".into(), - }) - } - } - } } impl From for views::Rack { diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 7f7dd57027..afeac5e6cd 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -755,6 +755,16 @@ table! { } } +table! { + sled_underlay_subnet_allocation (rack_id, sled_id) { + rack_id -> Uuid, + sled_id -> Uuid, + subnet_octet -> Int2, + hw_baseboard_id -> Uuid, + } +} +allow_tables_to_appear_in_same_query!(rack, sled_underlay_subnet_allocation); + table! { switch (id) { id -> Uuid, @@ -1289,7 +1299,7 @@ table! { /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(13, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(14, 0, 0); allow_tables_to_appear_in_same_query!( system_update, diff --git a/nexus/db-model/src/sled.rs b/nexus/db-model/src/sled.rs index ba572901c6..4c82aa5d23 100644 --- a/nexus/db-model/src/sled.rs +++ b/nexus/db-model/src/sled.rs @@ -8,7 +8,7 @@ use crate::ipv6; use crate::schema::{physical_disk, service, sled, zpool}; use chrono::{DateTime, Utc}; use db_macros::Asset; -use nexus_types::{external_api::views, identity::Asset}; +use nexus_types::{external_api::shared, external_api::views, identity::Asset}; use std::net::Ipv6Addr; use std::net::SocketAddrV6; use uuid::Uuid; @@ -88,7 +88,7 @@ impl From for views::Sled { Self { identity: sled.identity(), rack_id: sled.rack_id, - baseboard: views::Baseboard { + baseboard: shared::Baseboard { serial: sled.serial_number, part: sled.part_number, revision: sled.revision, diff --git a/nexus/db-model/src/sled_underlay_subnet_allocation.rs b/nexus/db-model/src/sled_underlay_subnet_allocation.rs new file mode 100644 index 0000000000..4da0bea669 --- /dev/null +++ b/nexus/db-model/src/sled_underlay_subnet_allocation.rs @@ -0,0 +1,16 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::schema::sled_underlay_subnet_allocation; +use uuid::Uuid; + +/// Underlay allocation for a sled added to an initialized rack +#[derive(Queryable, Insertable, Debug, Clone, Selectable)] +#[diesel(table_name = sled_underlay_subnet_allocation)] +pub struct SledUnderlaySubnetAllocation { + pub rack_id: Uuid, + pub sled_id: Uuid, + pub subnet_octet: i16, + pub hw_baseboard_id: Uuid, +} diff --git a/nexus/db-model/src/switch.rs b/nexus/db-model/src/switch.rs index c9db100b0a..159888d91e 100644 --- a/nexus/db-model/src/switch.rs +++ b/nexus/db-model/src/switch.rs @@ -2,7 +2,7 @@ use super::Generation; use crate::schema::switch; use chrono::{DateTime, Utc}; use db_macros::Asset; -use nexus_types::{external_api::views, identity::Asset}; +use nexus_types::{external_api::shared, external_api::views, identity::Asset}; use uuid::Uuid; /// Baseboard information about a switch. @@ -57,7 +57,7 @@ impl From for views::Switch { Self { identity: switch.identity(), rack_id: switch.rack_id, - baseboard: views::Baseboard { + baseboard: shared::Baseboard { serial: switch.serial_number, part: switch.part_number, revision: switch.revision, diff --git a/nexus/db-queries/src/db/datastore/inventory.rs b/nexus/db-queries/src/db/datastore/inventory.rs index 28a438629e..31b24a7e75 100644 --- a/nexus/db-queries/src/db/datastore/inventory.rs +++ b/nexus/db-queries/src/db/datastore/inventory.rs @@ -7,6 +7,7 @@ use crate::authz; use crate::context::OpContext; use crate::db; use crate::db::error::public_error_from_diesel; +use crate::db::error::public_error_from_diesel_lookup; use crate::db::error::ErrorHandler; use crate::db::queries::ALLOW_FULL_TABLE_SCAN_SQL; use crate::db::TransactionError; @@ -21,6 +22,7 @@ use diesel::ExpressionMethods; use diesel::IntoSql; use diesel::JoinOnDsl; use diesel::NullableExpressionMethods; +use diesel::OptionalExtension; use diesel::QueryDsl; use diesel::Table; use futures::future::BoxFuture; @@ -42,9 +44,12 @@ use nexus_db_model::SpType; use nexus_db_model::SpTypeEnum; use nexus_db_model::SwCaboose; use nexus_db_model::SwRotPage; +use nexus_types::inventory::BaseboardId; use nexus_types::inventory::Collection; use omicron_common::api::external::Error; use omicron_common::api::external::InternalContext; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; use omicron_common::bail_unless; use std::collections::BTreeMap; use std::collections::BTreeSet; @@ -910,30 +915,62 @@ impl DataStore { Ok(()) } + // Find the primary key for `hw_baseboard_id` given a `BaseboardId` + pub async fn find_hw_baseboard_id( + &self, + opctx: &OpContext, + baseboard_id: BaseboardId, + ) -> Result { + opctx.authorize(authz::Action::Read, &authz::INVENTORY).await?; + let conn = self.pool_connection_authorized(opctx).await?; + use db::schema::hw_baseboard_id::dsl; + dsl::hw_baseboard_id + .filter(dsl::serial_number.eq(baseboard_id.serial_number.clone())) + .filter(dsl::part_number.eq(baseboard_id.part_number.clone())) + .select(dsl::id) + .first_async::(&*conn) + .await + .map_err(|e| { + public_error_from_diesel_lookup( + e, + ResourceType::Sled, + &LookupType::ByCompositeId(format!("{baseboard_id:?}")), + ) + }) + } + /// Attempt to read the latest collection while limiting queries to `limit` /// records + /// + /// If there aren't any collections, return `Ok(None)`. pub async fn inventory_get_latest_collection( &self, opctx: &OpContext, limit: NonZeroU32, - ) -> Result { + ) -> Result, Error> { opctx.authorize(authz::Action::Read, &authz::INVENTORY).await?; let conn = self.pool_connection_authorized(opctx).await?; use db::schema::inv_collection::dsl; let collection_id = dsl::inv_collection .select(dsl::id) .order_by(dsl::time_started.desc()) - .limit(1) .first_async::(&*conn) .await + .optional() .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; - self.inventory_collection_read_all_or_nothing( - opctx, - collection_id, - limit, - ) - .await + let Some(collection_id) = collection_id else { + return Ok(None); + }; + + Ok(Some( + self.inventory_collection_read_all_or_nothing( + opctx, + collection_id, + limit, + ) + .await?, + )) } /// Attempt to read the given collection while limiting queries to `limit` @@ -1335,9 +1372,11 @@ mod test { use nexus_inventory::examples::Representative; use nexus_test_utils::db::test_setup_database; use nexus_test_utils::db::ALLOW_FULL_TABLE_SCAN_SQL; + use nexus_types::inventory::BaseboardId; use nexus_types::inventory::CabooseWhich; use nexus_types::inventory::Collection; use nexus_types::inventory::RotPageWhich; + use omicron_common::api::external::Error; use omicron_test_utils::dev; use std::num::NonZeroU32; use uuid::Uuid; @@ -1393,6 +1432,24 @@ mod test { } } + #[tokio::test] + async fn test_find_hw_baseboard_id_missing_returns_not_found() { + let logctx = dev::test_setup_log("inventory_insert"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + let baseboard_id = BaseboardId { + serial_number: "some-serial".into(), + part_number: "some-part".into(), + }; + let err = datastore + .find_hw_baseboard_id(&opctx, baseboard_id) + .await + .unwrap_err(); + assert!(matches!(err, Error::ObjectNotFound { .. })); + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + /// Tests inserting several collections, reading them back, and making sure /// they look the same. #[tokio::test] diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 2cc5880470..e11377f11a 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -41,6 +41,7 @@ use nexus_db_model::InitialDnsGroup; use nexus_db_model::PasswordHashString; use nexus_db_model::SiloUser; use nexus_db_model::SiloUserPasswordHash; +use nexus_db_model::SledUnderlaySubnetAllocation; use nexus_types::external_api::params as external_params; use nexus_types::external_api::shared; use nexus_types::external_api::shared::IdentityType; @@ -55,6 +56,7 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; +use omicron_common::bail_unless; use std::net::IpAddr; use uuid::Uuid; @@ -214,6 +216,126 @@ impl DataStore { Ok(()) } + // Return the subnet for the rack + pub async fn rack_subnet( + &self, + opctx: &OpContext, + rack_id: Uuid, + ) -> Result { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + let conn = self.pool_connection_authorized(opctx).await?; + use db::schema::rack::dsl; + // It's safe to unwrap the returned `rack_subnet` because + // we filter on `rack_subnet.is_not_null()` + let subnet = dsl::rack + .filter(dsl::id.eq(rack_id)) + .filter(dsl::rack_subnet.is_not_null()) + .select(dsl::rack_subnet) + .first_async::>(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + match subnet { + Some(subnet) => Ok(subnet), + None => Err(Error::internal_error( + "DB Error(bug): returned a null subnet for {rack_id}", + )), + } + } + + /// Allocate a rack subnet octet to a given sled + /// + /// 1. Find the existing allocations + /// 2. Calculate the new allocation + /// 3. Save the new allocation, if there isn't one for the given + /// `hw_baseboard_id` + /// 4. Return the new allocation + /// + // TODO: This could all actually be done in SQL using a `next_item` query. + // See https://github.com/oxidecomputer/omicron/issues/4544 + pub async fn allocate_sled_underlay_subnet_octets( + &self, + opctx: &OpContext, + rack_id: Uuid, + hw_baseboard_id: Uuid, + ) -> Result { + // Fetch all the existing allocations via self.rack_id + let allocations = self.rack_subnet_allocations(opctx, rack_id).await?; + + // Calculate the allocation for the new sled by choosing the minimum + // octet. The returned allocations are ordered by octet, so we will know + // when we have a free one. However, if we already have an allocation + // for the given sled then reuse that one. + const MIN_SUBNET_OCTET: i16 = 33; + let mut new_allocation = SledUnderlaySubnetAllocation { + rack_id, + sled_id: Uuid::new_v4(), + subnet_octet: MIN_SUBNET_OCTET, + hw_baseboard_id, + }; + let mut allocation_already_exists = false; + for allocation in allocations { + if allocation.hw_baseboard_id == new_allocation.hw_baseboard_id { + // We already have an allocation for this sled. + new_allocation = allocation; + allocation_already_exists = true; + break; + } + if allocation.subnet_octet == new_allocation.subnet_octet { + bail_unless!( + new_allocation.subnet_octet < 255, + "Too many sled subnets allocated" + ); + new_allocation.subnet_octet += 1; + } + } + + // Write the new allocation row to CRDB. The UNIQUE constraint + // on `subnet_octet` will prevent dueling administrators reusing + // allocations when sleds are being added. We will need another + // mechanism ala generation numbers when we must interleave additions + // and removals of sleds. + if !allocation_already_exists { + self.sled_subnet_allocation_insert(opctx, &new_allocation).await?; + } + + Ok(new_allocation) + } + + /// Return all current underlay allocations for the rack. + /// + /// Order allocations by `subnet_octet` + pub async fn rack_subnet_allocations( + &self, + opctx: &OpContext, + rack_id: Uuid, + ) -> Result, Error> { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + use db::schema::sled_underlay_subnet_allocation::dsl as subnet_dsl; + subnet_dsl::sled_underlay_subnet_allocation + .filter(subnet_dsl::rack_id.eq(rack_id)) + .select(SledUnderlaySubnetAllocation::as_select()) + .order_by(subnet_dsl::subnet_octet.asc()) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// Store a new sled subnet allocation in the database + pub async fn sled_subnet_allocation_insert( + &self, + opctx: &OpContext, + allocation: &SledUnderlaySubnetAllocation, + ) -> Result<(), Error> { + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + use db::schema::sled_underlay_subnet_allocation::dsl; + diesel::insert_into(dsl::sled_underlay_subnet_allocation) + .values(allocation.clone()) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + Ok(()) + } + // The following methods which return a `TxnError` take a `conn` parameter // which comes from the transaction created in `rack_set_initialized`. @@ -1518,4 +1640,136 @@ mod test { db.cleanup().await.unwrap(); logctx.cleanup_successful(); } + + #[tokio::test] + async fn rack_sled_subnet_allocations() { + let logctx = dev::test_setup_log("rack_sled_subnet_allocations"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let rack_id = Uuid::new_v4(); + + // Ensure we get an empty list when there are no allocations + let allocations = + datastore.rack_subnet_allocations(&opctx, rack_id).await.unwrap(); + assert!(allocations.is_empty()); + + // Add 5 allocations + for i in 0..5i16 { + let allocation = SledUnderlaySubnetAllocation { + rack_id, + sled_id: Uuid::new_v4(), + subnet_octet: 33 + i, + hw_baseboard_id: Uuid::new_v4(), + }; + datastore + .sled_subnet_allocation_insert(&opctx, &allocation) + .await + .unwrap(); + } + + // List all 5 allocations + let allocations = + datastore.rack_subnet_allocations(&opctx, rack_id).await.unwrap(); + + assert_eq!(5, allocations.len()); + + // Try to add another allocation for the same octet, but with a distinct + // sled_id. Ensure we get an error due to a unique constraint. + let mut should_fail_allocation = SledUnderlaySubnetAllocation { + rack_id, + sled_id: Uuid::new_v4(), + subnet_octet: 37, + hw_baseboard_id: Uuid::new_v4(), + }; + let _err = datastore + .sled_subnet_allocation_insert(&opctx, &should_fail_allocation) + .await + .unwrap_err(); + + // Adding an allocation for the same {rack_id, sled_id} pair fails + // the second time, even with a distinct subnet_epoch + let mut allocation = should_fail_allocation.clone(); + allocation.subnet_octet = 38; + datastore + .sled_subnet_allocation_insert(&opctx, &allocation) + .await + .unwrap(); + + should_fail_allocation.subnet_octet = 39; + should_fail_allocation.hw_baseboard_id = Uuid::new_v4(); + let _err = datastore + .sled_subnet_allocation_insert(&opctx, &should_fail_allocation) + .await + .unwrap_err(); + + // Allocations outside our expected range fail + let mut should_fail_allocation = SledUnderlaySubnetAllocation { + rack_id, + sled_id: Uuid::new_v4(), + subnet_octet: 32, + hw_baseboard_id: Uuid::new_v4(), + }; + let _err = datastore + .sled_subnet_allocation_insert(&opctx, &should_fail_allocation) + .await + .unwrap_err(); + should_fail_allocation.subnet_octet = 256; + let _err = datastore + .sled_subnet_allocation_insert(&opctx, &should_fail_allocation) + .await + .unwrap_err(); + + // We should have 6 allocations + let allocations = + datastore.rack_subnet_allocations(&opctx, rack_id).await.unwrap(); + + assert_eq!(6, allocations.len()); + assert_eq!( + vec![33, 34, 35, 36, 37, 38], + allocations.iter().map(|a| a.subnet_octet).collect::>() + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn allocate_sled_underlay_subnet_octets() { + let logctx = dev::test_setup_log("rack_sled_subnet_allocations"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let rack_id = Uuid::new_v4(); + + let mut allocated_octets = vec![]; + for _ in 0..5 { + allocated_octets.push( + datastore + .allocate_sled_underlay_subnet_octets( + &opctx, + rack_id, + Uuid::new_v4(), + ) + .await + .unwrap() + .subnet_octet, + ); + } + + let expected = vec![33, 34, 35, 36, 37]; + assert_eq!(expected, allocated_octets); + + // We should have 5 allocations in the DB, sorted appropriately + let allocations = + datastore.rack_subnet_allocations(&opctx, rack_id).await.unwrap(); + assert_eq!(5, allocations.len()); + assert_eq!( + expected, + allocations.iter().map(|a| a.subnet_octet).collect::>() + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } } diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 1c2e49e260..984ece2d0c 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -10,7 +10,7 @@ use crate::external_api::params::CertificateCreate; use crate::external_api::shared::ServiceUsingCertificate; use crate::internal_api::params::RackInitializationRequest; use gateway_client::types::SpType; -use ipnetwork::IpNetwork; +use ipnetwork::{IpNetwork, Ipv6Network}; use nexus_db_model::DnsGroup; use nexus_db_model::InitialDnsGroup; use nexus_db_model::{SwitchLinkFec, SwitchLinkSpeed}; @@ -29,13 +29,14 @@ use nexus_types::external_api::params::{ AddressLotCreate, LoopbackAddressCreate, Route, SiloCreate, SwitchPortSettingsCreate, }; +use nexus_types::external_api::shared::Baseboard; use nexus_types::external_api::shared::FleetRole; use nexus_types::external_api::shared::SiloIdentityMode; use nexus_types::external_api::shared::SiloRole; +use nexus_types::external_api::shared::UninitializedSled; use nexus_types::external_api::views; -use nexus_types::external_api::views::Baseboard; -use nexus_types::external_api::views::UninitializedSled; use nexus_types::internal_api::params::DnsRecord; +use omicron_common::address::{get_64_subnet, Ipv6Subnet, RACK_PREFIX}; use omicron_common::api::external::AddressLotKind; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; @@ -45,7 +46,10 @@ use omicron_common::api::external::LookupResult; use omicron_common::api::external::Name; use omicron_common::api::external::NameOrId; use omicron_common::api::internal::shared::ExternalPortDiscovery; +use sled_agent_client::types::AddSledRequest; use sled_agent_client::types::EarlyNetworkConfigBody; +use sled_agent_client::types::StartSledAgentRequest; +use sled_agent_client::types::StartSledAgentRequestBody; use sled_agent_client::types::{ BgpConfig, BgpPeerConfig, EarlyNetworkConfig, PortConfigV1, RackNetworkConfigV1, RouteConfig as SledRouteConfig, @@ -584,20 +588,7 @@ impl super::Nexus { if rack.rack_subnet.is_some() { return Ok(()); } - let addr = self - .sled_list(opctx, &DataPageParams::max_page()) - .await? - .get(0) - .ok_or(Error::InternalError { - internal_message: "no sleds at time of bootstore sync".into(), - })? - .address(); - - let sa = sled_agent_client::Client::new( - &format!("http://{}", addr), - self.log.clone(), - ); - + let sa = self.get_any_sled_agent(opctx).await?; let result = sa .read_network_bootstore_config_cache() .await @@ -619,7 +610,7 @@ impl super::Nexus { opctx: &OpContext, ) -> Result { let rack = self.rack_lookup(opctx, &self.rack_id).await?; - let subnet = rack.subnet()?; + let subnet = rack_subnet(rack.rack_subnet)?; let db_ports = self.active_port_settings(opctx).await?; let mut ports = Vec::new(); @@ -726,18 +717,28 @@ impl super::Nexus { &self, opctx: &OpContext, ) -> ListResultVec { + debug!(self.log, "Getting latest collection"); // Grab the SPs from the last collection let limit = NonZeroU32::new(50).unwrap(); let collection = self .db_datastore .inventory_get_latest_collection(opctx, limit) .await?; + + // There can't be any uninitialized sleds we know about + // if there is no inventory. + let Some(collection) = collection else { + return Ok(vec![]); + }; + let pagparams = DataPageParams { marker: None, direction: dropshot::PaginationOrder::Descending, // TODO: This limit is only suitable for a single sled cluster limit: NonZeroU32::new(32).unwrap(), }; + + debug!(self.log, "Listing sleds"); let sleds = self.db_datastore.sled_list(opctx, &pagparams).await?; let mut uninitialized_sleds: Vec = collection @@ -767,4 +768,106 @@ impl super::Nexus { uninitialized_sleds.retain(|s| !sled_baseboards.contains(&s.baseboard)); Ok(uninitialized_sleds) } + + /// Add a sled to an intialized rack + pub(crate) async fn add_sled_to_initialized_rack( + &self, + opctx: &OpContext, + sled: UninitializedSled, + ) -> Result<(), Error> { + let baseboard_id = sled.baseboard.clone().into(); + let hw_baseboard_id = + self.db_datastore.find_hw_baseboard_id(opctx, baseboard_id).await?; + + let subnet = self.db_datastore.rack_subnet(opctx, sled.rack_id).await?; + let rack_subnet = + Ipv6Subnet::::from(rack_subnet(Some(subnet))?); + + let allocation = self + .db_datastore + .allocate_sled_underlay_subnet_octets( + opctx, + sled.rack_id, + hw_baseboard_id, + ) + .await?; + + // Convert the baseboard as necessary + let baseboard = sled_agent_client::types::Baseboard::Gimlet { + identifier: sled.baseboard.serial.clone(), + model: sled.baseboard.part.clone(), + revision: sled.baseboard.revision, + }; + + // Make the call to sled-agent + let req = AddSledRequest { + sled_id: baseboard, + start_request: StartSledAgentRequest { + generation: 0, + schema_version: 1, + body: StartSledAgentRequestBody { + id: allocation.sled_id, + rack_id: allocation.rack_id, + use_trust_quorum: true, + is_lrtq_learner: true, + subnet: sled_agent_client::types::Ipv6Subnet { + net: get_64_subnet( + rack_subnet, + allocation.subnet_octet.try_into().unwrap(), + ) + .net() + .into(), + }, + }, + }, + }; + let sa = self.get_any_sled_agent(opctx).await?; + sa.add_sled_to_initialized_rack(&req).await.map_err(|e| { + Error::InternalError { + internal_message: format!( + "failed to add sled with baseboard {:?} to rack {}: {e}", + sled.baseboard, allocation.rack_id + ), + } + })?; + + Ok(()) + } + + async fn get_any_sled_agent( + &self, + opctx: &OpContext, + ) -> Result { + let addr = self + .sled_list(opctx, &DataPageParams::max_page()) + .await? + .get(0) + .ok_or(Error::InternalError { + internal_message: "no sled agents available".into(), + })? + .address(); + + Ok(sled_agent_client::Client::new( + &format!("http://{}", addr), + self.log.clone(), + )) + } +} + +pub fn rack_subnet( + rack_subnet: Option, +) -> Result { + match rack_subnet { + Some(IpNetwork::V6(subnet)) => Ok(subnet), + Some(IpNetwork::V4(_)) => { + return Err(Error::InternalError { + internal_message: "rack subnet not IPv6".into(), + }) + } + None => { + return Err(Error::InternalError { + internal_message: "rack subnet not set".into(), + }) + } + } } diff --git a/nexus/src/app/sled.rs b/nexus/src/app/sled.rs index 8189c0a93d..c2931f1441 100644 --- a/nexus/src/app/sled.rs +++ b/nexus/src/app/sled.rs @@ -38,6 +38,9 @@ impl super::Nexus { // TODO-robustness we should have a limit on how many sled agents there can // be (for graceful degradation at large scale). + // + // TODO-multisled: This should not use the rack_id for the given nexus, + // unless the DNS lookups at sled-agent are only for rack-local nexuses. pub(crate) async fn upsert_sled( &self, opctx: &OpContext, diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 428632bcf5..78f675c28a 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -6,10 +6,11 @@ use super::{ console_api, device_auth, params, + shared::UninitializedSled, views::{ self, Certificate, Group, IdentityProvider, Image, IpPool, IpPoolRange, - PhysicalDisk, Project, Rack, Role, Silo, Sled, Snapshot, SshKey, - UninitializedSled, User, UserBuiltin, Vpc, VpcRouter, VpcSubnet, + PhysicalDisk, Project, Rack, Role, Silo, Sled, Snapshot, SshKey, User, + UserBuiltin, Vpc, VpcRouter, VpcSubnet, }, }; use crate::external_api::shared; @@ -223,6 +224,7 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(switch_list)?; api.register(switch_view)?; api.register(uninitialized_sled_list)?; + api.register(add_sled_to_initialized_rack)?; api.register(user_builtin_list)?; api.register(user_builtin_view)?; @@ -4402,6 +4404,31 @@ async fn uninitialized_sled_list( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } +/// Add a sled to an initialized rack +// +// TODO: In the future this should really be a PUT request, once we resolve +// https://github.com/oxidecomputer/omicron/issues/4494. It should also +// explicitly be tied to a rack via a `rack_id` path param. For now we assume +// we are only operating on single rack systems. +#[endpoint { + method = POST, + path = "/v1/system/hardware/sleds/", + tags = ["system/hardware"] +}] +async fn add_sled_to_initialized_rack( + rqctx: RequestContext>, + sled: TypedBody, +) -> Result { + let apictx = rqctx.context(); + let nexus = &apictx.nexus; + let handler = async { + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + nexus.add_sled_to_initialized_rack(&opctx, sled.into_inner()).await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + // Sleds /// List sleds diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 64790c49c2..5dfdcc151d 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -21,8 +21,10 @@ use nexus_test_utils::SLED_AGENT_UUID; use nexus_test_utils::SWITCH_UUID; use nexus_types::external_api::params; use nexus_types::external_api::shared; +use nexus_types::external_api::shared::Baseboard; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::shared::Ipv4Range; +use nexus_types::external_api::shared::UninitializedSled; use omicron_common::api::external::AddressLotKind; use omicron_common::api::external::ByteCount; use omicron_common::api::external::IdentityMetadataCreateParams; @@ -39,6 +41,7 @@ use omicron_test_utils::certificates::CertificateChain; use std::net::IpAddr; use std::net::Ipv4Addr; use std::str::FromStr; +use uuid::Uuid; lazy_static! { pub static ref HARDWARE_RACK_URL: String = @@ -57,6 +60,16 @@ lazy_static! { pub static ref SLED_INSTANCES_URL: String = format!("/v1/system/hardware/sleds/{}/instances", SLED_AGENT_UUID); + pub static ref DEMO_UNINITIALIZED_SLED: UninitializedSled = UninitializedSled { + baseboard: Baseboard { + serial: "demo-serial".to_string(), + part: "demo-part".to_string(), + revision: 6 + }, + rack_id: Uuid::new_v4(), + cubby: 1 + }; + // Global policy pub static ref SYSTEM_POLICY_URL: &'static str = "/v1/system/policy"; @@ -1577,7 +1590,9 @@ lazy_static! { url: "/v1/system/hardware/sleds", visibility: Visibility::Public, unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], + allowed_methods: vec![AllowedMethod::Get, AllowedMethod::Post( + serde_json::to_value(&*DEMO_UNINITIALIZED_SLED).unwrap() + )], }, VerifyEndpoint { diff --git a/nexus/tests/integration_tests/rack.rs b/nexus/tests/integration_tests/rack.rs index 2c191f27ae..9f77223871 100644 --- a/nexus/tests/integration_tests/rack.rs +++ b/nexus/tests/integration_tests/rack.rs @@ -10,8 +10,14 @@ use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::TEST_SUITE_PASSWORD; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; +use nexus_types::external_api::shared::UninitializedSled; use nexus_types::external_api::views::Rack; +use nexus_types::internal_api::params::Baseboard; +use nexus_types::internal_api::params::SledAgentStartupInfo; +use nexus_types::internal_api::params::SledRole; +use omicron_common::api::external::ByteCount; use omicron_nexus::TestInterfaces; +use uuid::Uuid; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -77,3 +83,63 @@ async fn test_rack_initialization(cptestctx: &ControlPlaneTestContext) { ) .await; } + +#[nexus_test] +async fn test_uninitialized_sled_list(cptestctx: &ControlPlaneTestContext) { + let internal_client = &cptestctx.internal_client; + let external_client = &cptestctx.external_client; + let list_url = "/v1/system/hardware/uninitialized-sleds"; + let mut uninitialized_sleds = + NexusRequest::object_get(external_client, &list_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to get uninitialized sleds") + .parsed_body::>() + .unwrap(); + debug!(cptestctx.logctx.log, "{:#?}", uninitialized_sleds); + + // There are currently two fake sim gimlets created in the latest inventory + // collection as part of test setup. + assert_eq!(2, uninitialized_sleds.len()); + + // Insert one of these fake sleds into the `sled` table. + // Just pick some random fields other than `baseboard` + let baseboard = uninitialized_sleds.pop().unwrap().baseboard; + let sled_uuid = Uuid::new_v4(); + let sa = SledAgentStartupInfo { + sa_address: "[fd00:1122:3344:01::1]:8080".parse().unwrap(), + role: SledRole::Gimlet, + baseboard: Baseboard { + serial_number: baseboard.serial, + part_number: baseboard.part, + revision: baseboard.revision, + }, + usable_hardware_threads: 32, + usable_physical_ram: ByteCount::from_gibibytes_u32(100), + reservoir_size: ByteCount::from_mebibytes_u32(100), + }; + internal_client + .make_request( + Method::POST, + format!("/sled-agents/{sled_uuid}").as_str(), + Some(&sa), + StatusCode::NO_CONTENT, + ) + .await + .unwrap(); + + // Ensure there's only one unintialized sled remaining, and it's not + // the one that was just added into the `sled` table + let uninitialized_sleds_2 = + NexusRequest::object_get(external_client, &list_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to get uninitialized sleds") + .parsed_body::>() + .unwrap(); + debug!(cptestctx.logctx.log, "{:#?}", uninitialized_sleds); + assert_eq!(1, uninitialized_sleds_2.len()); + assert_eq!(uninitialized_sleds, uninitialized_sleds_2); +} diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 7f0c30c471..dd387ab979 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -110,6 +110,7 @@ snapshot_view GET /v1/snapshots/{snapshot} API operations found with tag "system/hardware" OPERATION ID METHOD URL PATH +add_sled_to_initialized_rack POST /v1/system/hardware/sleds networking_switch_port_apply_settings POST /v1/system/hardware/switch-port/{port}/settings networking_switch_port_clear_settings DELETE /v1/system/hardware/switch-port/{port}/settings networking_switch_port_list GET /v1/system/hardware/switch-port diff --git a/nexus/types/src/external_api/shared.rs b/nexus/types/src/external_api/shared.rs index 48fbb9c10d..a4c5ae1e62 100644 --- a/nexus/types/src/external_api/shared.rs +++ b/nexus/types/src/external_api/shared.rs @@ -245,6 +245,42 @@ pub enum UpdateableComponentType { HostOmicron, } +/// Properties that uniquely identify an Oxide hardware component +#[derive( + Clone, + Debug, + Serialize, + Deserialize, + JsonSchema, + PartialOrd, + Ord, + PartialEq, + Eq, +)] +pub struct Baseboard { + pub serial: String, + pub part: String, + pub revision: i64, +} + +/// A sled that has not been added to an initialized rack yet +#[derive( + Clone, + Debug, + Serialize, + Deserialize, + JsonSchema, + PartialOrd, + Ord, + PartialEq, + Eq, +)] +pub struct UninitializedSled { + pub baseboard: Baseboard, + pub rack_id: Uuid, + pub cubby: u16, +} + #[cfg(test)] mod test { use super::Policy; diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index b34fc7a542..9dfe36d63b 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -5,7 +5,7 @@ //! Views are response bodies, most of which are public lenses onto DB models. use crate::external_api::shared::{ - self, IpKind, IpRange, ServiceUsingCertificate, + self, Baseboard, IpKind, IpRange, ServiceUsingCertificate, }; use crate::identity::AssetIdentityMetadata; use api_identity::ObjectIdentity; @@ -274,44 +274,8 @@ pub struct Rack { pub identity: AssetIdentityMetadata, } -/// View of a sled that has not been added to an initialized rack yet -#[derive( - Clone, - Debug, - Serialize, - Deserialize, - JsonSchema, - PartialOrd, - Ord, - PartialEq, - Eq, -)] -pub struct UninitializedSled { - pub baseboard: Baseboard, - pub rack_id: Uuid, - pub cubby: u16, -} - // FRUs -/// Properties that uniquely identify an Oxide hardware component -#[derive( - Clone, - Debug, - Serialize, - Deserialize, - JsonSchema, - PartialOrd, - Ord, - PartialEq, - Eq, -)] -pub struct Baseboard { - pub serial: String, - pub part: String, - pub revision: i64, -} - // SLEDS /// An operator's view of a Sled. diff --git a/nexus/types/src/internal_api/params.rs b/nexus/types/src/internal_api/params.rs index c0991ebb17..bc25e8d4bd 100644 --- a/nexus/types/src/internal_api/params.rs +++ b/nexus/types/src/internal_api/params.rs @@ -25,7 +25,7 @@ use uuid::Uuid; /// /// Note that this may change if the sled is physically moved /// within the rack. -#[derive(Serialize, Deserialize, JsonSchema)] +#[derive(Serialize, Deserialize, JsonSchema, Debug)] #[serde(rename_all = "snake_case")] pub enum SledRole { /// The sled is a general compute sled. @@ -45,7 +45,7 @@ pub struct Baseboard { } /// Sent by a sled agent on startup to Nexus to request further instruction -#[derive(Serialize, Deserialize, JsonSchema)] +#[derive(Serialize, Deserialize, Debug, JsonSchema)] pub struct SledAgentStartupInfo { /// The address of the sled agent's API endpoint pub sa_address: SocketAddrV6, diff --git a/nexus/types/src/inventory.rs b/nexus/types/src/inventory.rs index 19c323d894..9401727162 100644 --- a/nexus/types/src/inventory.rs +++ b/nexus/types/src/inventory.rs @@ -20,6 +20,8 @@ use std::sync::Arc; use strum::EnumIter; use uuid::Uuid; +use crate::external_api::shared::Baseboard; + /// Results of collecting hardware/software inventory from various Omicron /// components /// @@ -131,6 +133,12 @@ pub struct BaseboardId { pub serial_number: String, } +impl From for BaseboardId { + fn from(value: Baseboard) -> Self { + BaseboardId { part_number: value.part, serial_number: value.serial } + } +} + /// Caboose contents found during a collection /// /// These are normalized in the database. Each distinct `Caboose` is assigned a diff --git a/openapi/nexus.json b/openapi/nexus.json index 0d19e81d9a..704aa393db 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -3610,6 +3610,34 @@ "x-dropshot-pagination": { "required": [] } + }, + "post": { + "tags": [ + "system/hardware" + ], + "summary": "Add a sled to an initialized rack", + "operationId": "add_sled_to_initialized_rack", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UninitializedSled" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } } }, "/v1/system/hardware/sleds/{sled_id}": { @@ -13971,7 +13999,7 @@ ] }, "UninitializedSled": { - "description": "View of a sled that has not been added to an initialized rack yet", + "description": "A sled that has not been added to an initialized rack yet", "type": "object", "properties": { "baseboard": { diff --git a/schema/crdb/14.0.0/up1.sql b/schema/crdb/14.0.0/up1.sql new file mode 100644 index 0000000000..3bff831ceb --- /dev/null +++ b/schema/crdb/14.0.0/up1.sql @@ -0,0 +1,37 @@ +-- Table of all sled subnets allocated for sleds added to an already initialized +-- rack. The sleds in this table and their allocated subnets are created before +-- a sled is added to the `sled` table. Addition to the `sled` table occurs +-- after the sled is initialized and notifies Nexus about itself. +-- +-- For simplicity and space savings, this table doesn't actually contain the +-- full subnets for a given sled, but only the octet that extends a /56 rack +-- subnet to a /64 sled subnet. The rack subnet is maintained in the `rack` +-- table. +-- +-- This table does not include subnet octets allocated during RSS and therefore +-- all of the octets start at 33. This makes the data in this table purely additive +-- post-RSS, which also implies that we cannot re-use subnet octets if an original +-- sled that was part of RSS was removed from the cluster. +CREATE TABLE IF NOT EXISTS omicron.public.sled_underlay_subnet_allocation ( + -- The physical identity of the sled + -- (foreign key into `hw_baseboard_id` table) + hw_baseboard_id UUID PRIMARY KEY, + + -- The rack to which a sled is being added + -- (foreign key into `rack` table) + -- + -- We require this because the sled is not yet part of the sled table when + -- we first allocate a subnet for it. + rack_id UUID NOT NULL, + + -- The sled to which a subnet is being allocated + -- + -- Eventually will be a foreign key into the `sled` table when the sled notifies nexus + -- about itself after initialization. + sled_id UUID NOT NULL, + + -- The octet that extends a /56 rack subnet to a /64 sled subnet + -- + -- Always between 33 and 255 inclusive + subnet_octet INT2 NOT NULL UNIQUE CHECK (subnet_octet BETWEEN 33 AND 255) +); diff --git a/schema/crdb/14.0.0/up2.sql b/schema/crdb/14.0.0/up2.sql new file mode 100644 index 0000000000..c3e18fa166 --- /dev/null +++ b/schema/crdb/14.0.0/up2.sql @@ -0,0 +1,5 @@ +-- Add an index which allows pagination by {rack_id, sled_id} pairs. +CREATE UNIQUE INDEX IF NOT EXISTS lookup_subnet_allocation_by_rack_and_sled ON omicron.public.sled_underlay_subnet_allocation ( + rack_id, + sled_id +); diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index fc3bc37fd7..728b084982 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -158,6 +158,51 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_resource_by_sled ON omicron.public.sled id ); + +-- Table of all sled subnets allocated for sleds added to an already initialized +-- rack. The sleds in this table and their allocated subnets are created before +-- a sled is added to the `sled` table. Addition to the `sled` table occurs +-- after the sled is initialized and notifies Nexus about itself. +-- +-- For simplicity and space savings, this table doesn't actually contain the +-- full subnets for a given sled, but only the octet that extends a /56 rack +-- subnet to a /64 sled subnet. The rack subnet is maintained in the `rack` +-- table. +-- +-- This table does not include subnet octets allocated during RSS and therefore +-- all of the octets start at 33. This makes the data in this table purely additive +-- post-RSS, which also implies that we cannot re-use subnet octets if an original +-- sled that was part of RSS was removed from the cluster. +CREATE TABLE IF NOT EXISTS omicron.public.sled_underlay_subnet_allocation ( + -- The physical identity of the sled + -- (foreign key into `hw_baseboard_id` table) + hw_baseboard_id UUID PRIMARY KEY, + + -- The rack to which a sled is being added + -- (foreign key into `rack` table) + -- + -- We require this because the sled is not yet part of the sled table when + -- we first allocate a subnet for it. + rack_id UUID NOT NULL, + + -- The sled to which a subnet is being allocated + -- + -- Eventually will be a foreign key into the `sled` table when the sled notifies nexus + -- about itself after initialization. + sled_id UUID NOT NULL, + + -- The octet that extends a /56 rack subnet to a /64 sled subnet + -- + -- Always between 33 and 255 inclusive + subnet_octet INT2 NOT NULL UNIQUE CHECK (subnet_octet BETWEEN 33 AND 255) +); + +-- Add an index which allows pagination by {rack_id, sled_id} pairs. +CREATE UNIQUE INDEX IF NOT EXISTS lookup_subnet_allocation_by_rack_and_sled ON omicron.public.sled_underlay_subnet_allocation ( + rack_id, + sled_id +); + /* * Switches */ @@ -2952,7 +2997,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '13.0.0', NULL) + ( TRUE, NOW(), NOW(), '14.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; From f03c7d5b460f149f626dd82bcf72cdc47d5a4552 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 28 Nov 2023 01:39:26 +0000 Subject: [PATCH 3/7] Update Rust to v1.74.0 (#4543) Co-authored-by: Rain --- bootstore/src/schemes/v0/request_manager.rs | 2 +- common/src/api/external/mod.rs | 2 +- illumos-utils/src/running_zone.rs | 5 +++-- nexus/db-queries/src/db/queries/volume.rs | 7 ++++--- nexus/db-queries/src/db/saga_recovery.rs | 3 +-- oximeter/instruments/src/kstat/link.rs | 4 ++-- rust-toolchain.toml | 2 +- wicket/src/ui/widgets/popup.rs | 2 +- 8 files changed, 14 insertions(+), 13 deletions(-) diff --git a/bootstore/src/schemes/v0/request_manager.rs b/bootstore/src/schemes/v0/request_manager.rs index 780213430c..90466fdc07 100644 --- a/bootstore/src/schemes/v0/request_manager.rs +++ b/bootstore/src/schemes/v0/request_manager.rs @@ -109,7 +109,7 @@ impl RequestManager { let expiry = now + self.config.rack_init_timeout; let mut acks = InitAcks::default(); acks.expected = - packages.keys().cloned().filter(|id| id != &self.id).collect(); + packages.keys().filter(|&id| id != &self.id).cloned().collect(); let req = TrackableRequest::InitRack { rack_uuid, packages: packages.clone(), diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index adf661516a..3e58d1d4d4 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -409,7 +409,7 @@ impl SemverVersion { /// This is the official ECMAScript-compatible validation regex for /// semver: /// - const VALIDATION_REGEX: &str = r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$"; + const VALIDATION_REGEX: &'static str = r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$"; } impl JsonSchema for SemverVersion { diff --git a/illumos-utils/src/running_zone.rs b/illumos-utils/src/running_zone.rs index bdf7ed0cbf..ba8cd009e8 100644 --- a/illumos-utils/src/running_zone.rs +++ b/illumos-utils/src/running_zone.rs @@ -214,7 +214,7 @@ mod zenter { // the contracts used for this come from templates that define becoming // empty as a critical event. pub fn contract_reaper(log: Logger) { - const EVENT_PATH: &[u8] = b"/system/contract/process/pbundle"; + const EVENT_PATH: &'static [u8] = b"/system/contract/process/pbundle"; const CT_PR_EV_EMPTY: u64 = 1; let cpath = CString::new(EVENT_PATH).unwrap(); @@ -327,7 +327,8 @@ mod zenter { } impl Template { - const TEMPLATE_PATH: &[u8] = b"/system/contract/process/template\0"; + const TEMPLATE_PATH: &'static [u8] = + b"/system/contract/process/template\0"; // Constants related to how the contract below is managed. See // `usr/src/uts/common/sys/contract/process.h` in the illumos sources diff --git a/nexus/db-queries/src/db/queries/volume.rs b/nexus/db-queries/src/db/queries/volume.rs index 31882dca89..2c1a9af19b 100644 --- a/nexus/db-queries/src/db/queries/volume.rs +++ b/nexus/db-queries/src/db/queries/volume.rs @@ -412,10 +412,11 @@ pub struct DecreaseCrucibleResourceCountAndSoftDeleteVolume { } impl DecreaseCrucibleResourceCountAndSoftDeleteVolume { - const UPDATED_REGION_SNAPSHOTS_TABLE: &str = "updated_region_snapshots"; - const REGION_SNAPSHOTS_TO_CLEAN_UP_TABLE: &str = + const UPDATED_REGION_SNAPSHOTS_TABLE: &'static str = + "updated_region_snapshots"; + const REGION_SNAPSHOTS_TO_CLEAN_UP_TABLE: &'static str = "region_snapshots_to_clean_up"; - const UPDATED_VOLUME_TABLE: &str = "updated_volume"; + const UPDATED_VOLUME_TABLE: &'static str = "updated_volume"; pub fn new(volume_id: Uuid, snapshot_addrs: Vec) -> Self { Self { diff --git a/nexus/db-queries/src/db/saga_recovery.rs b/nexus/db-queries/src/db/saga_recovery.rs index f3eada1645..802093b889 100644 --- a/nexus/db-queries/src/db/saga_recovery.rs +++ b/nexus/db-queries/src/db/saga_recovery.rs @@ -143,8 +143,7 @@ where .await }); - let mut completion_futures = vec![]; - completion_futures.reserve(recovery_futures.len()); + let mut completion_futures = Vec::with_capacity(recovery_futures.len()); // Loads and resumes all sagas in serial. for recovery_future in recovery_futures { let saga_complete_future = recovery_future.await?; diff --git a/oximeter/instruments/src/kstat/link.rs b/oximeter/instruments/src/kstat/link.rs index d22ac60378..03397c4108 100644 --- a/oximeter/instruments/src/kstat/link.rs +++ b/oximeter/instruments/src/kstat/link.rs @@ -268,8 +268,8 @@ mod tests { } impl TestEtherstub { - const PFEXEC: &str = "/usr/bin/pfexec"; - const DLADM: &str = "/usr/sbin/dladm"; + const PFEXEC: &'static str = "/usr/bin/pfexec"; + const DLADM: &'static str = "/usr/sbin/dladm"; fn new() -> Self { let name = format!( "kstest{}0", diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 804ff08cce..65ee8a9912 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -4,5 +4,5 @@ # # We choose a specific toolchain (rather than "stable") for repeatability. The # intent is to keep this up-to-date with recently-released stable Rust. -channel = "1.73.0" +channel = "1.74.0" profile = "default" diff --git a/wicket/src/ui/widgets/popup.rs b/wicket/src/ui/widgets/popup.rs index 19d7aa18b1..fb8c0f1f24 100644 --- a/wicket/src/ui/widgets/popup.rs +++ b/wicket/src/ui/widgets/popup.rs @@ -464,7 +464,7 @@ pub fn draw_buttons( let button_rects = Layout::default() .direction(Direction::Horizontal) .horizontal_margin(2) - .constraints(constraints.as_ref()) + .constraints(constraints) .split(rect); let block = Block::default() From b9d8b8f9c3e8f4b33cf11b546b96b5fe134906eb Mon Sep 17 00:00:00 2001 From: Rain Date: Mon, 27 Nov 2023 19:08:38 -0800 Subject: [PATCH 4/7] [update-engine] fix GroupDisplayStats to avoid integer underflow (#4561) This could happen if an empty `EventReport` is passed in -- in that case we'd transition to `Running` but return `NotStarted`. Fix this by not transitioning `self.kind` to `Running` if we're going to return `NotStarted`. This does bloat up the code a little but I think is clearer overall. Thanks to @jgallagher for all the help debugging this! Also clean up some related logic and add tests. Fixes #4507. --- .../examples/update-engine-basic/display.rs | 1 + update-engine/src/buffer.rs | 228 +-------- update-engine/src/display/group_display.rs | 454 +++++++++++++++--- update-engine/src/test_utils.rs | 284 ++++++++++- wicket/src/cli/rack_update.rs | 1 + 5 files changed, 683 insertions(+), 285 deletions(-) diff --git a/update-engine/examples/update-engine-basic/display.rs b/update-engine/examples/update-engine-basic/display.rs index 122777211b..891bdce6d3 100644 --- a/update-engine/examples/update-engine-basic/display.rs +++ b/update-engine/examples/update-engine-basic/display.rs @@ -88,6 +88,7 @@ async fn display_group( slog::info!(log, "setting up display"); let mut display = GroupDisplay::new( + log, [ (GroupDisplayKey::Example, "example"), (GroupDisplayKey::Other, "other"), diff --git a/update-engine/src/buffer.rs b/update-engine/src/buffer.rs index 6e0e66d6d0..36a0626963 100644 --- a/update-engine/src/buffer.rs +++ b/update-engine/src/buffer.rs @@ -1627,6 +1627,16 @@ pub enum TerminalKind { Aborted, } +impl fmt::Display for TerminalKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Completed => write!(f, "completed"), + Self::Failed => write!(f, "failed"), + Self::Aborted => write!(f, "aborted"), + } + } +} + impl ExecutionStatus { /// Returns the terminal status and the total amount of time elapsed, or /// None if the execution has not reached a terminal state. @@ -1671,17 +1681,13 @@ mod tests { use std::collections::HashSet; use anyhow::{bail, ensure, Context}; - use futures::StreamExt; use indexmap::IndexSet; use omicron_test_utils::dev::test_setup_log; use serde::{de::IntoDeserializer, Deserialize}; - use tokio::sync::mpsc; - use tokio_stream::wrappers::ReceiverStream; use crate::{ - events::{ProgressCounter, ProgressUnits, StepProgress}, - test_utils::TestSpec, - StepContext, StepSuccess, UpdateEngine, + events::ProgressCounter, + test_utils::{generate_test_events, GenerateTestEventsKind, TestSpec}, }; use super::*; @@ -1689,108 +1695,11 @@ mod tests { #[tokio::test] async fn test_buffer() { let logctx = test_setup_log("test_buffer"); - // The channel is big enough to contain all possible events. - let (sender, receiver) = mpsc::channel(512); - let engine: UpdateEngine = - UpdateEngine::new(&logctx.log, sender); - - engine - .new_step("foo".to_owned(), 1, "Step 1", move |_cx| async move { - StepSuccess::new(()).into() - }) - .register(); - - engine - .new_step("bar".to_owned(), 2, "Step 2", move |cx| async move { - for _ in 0..20 { - cx.send_progress(StepProgress::with_current_and_total( - 5, - 20, - ProgressUnits::BYTES, - Default::default(), - )) - .await; - - cx.send_progress(StepProgress::reset( - Default::default(), - "reset step 2", - )) - .await; - - cx.send_progress(StepProgress::retry("retry step 2")).await; - } - StepSuccess::new(()).into() - }) - .register(); - - engine - .new_step( - "nested".to_owned(), - 3, - "Step 3 (this is nested)", - move |parent_cx| async move { - parent_cx - .with_nested_engine(|engine| { - define_nested_engine(&parent_cx, engine); - Ok(()) - }) - .await - .expect_err("this is expected to fail"); - - StepSuccess::new(()).into() - }, - ) - .register(); - - let log = logctx.log.clone(); - engine - .new_step( - "remote-nested".to_owned(), - 20, - "Step 4 (remote nested)", - move |cx| async move { - let (sender, mut receiver) = mpsc::channel(16); - let mut engine = UpdateEngine::new(&log, sender); - define_remote_nested_engine(&mut engine, 20); - - let mut buffer = EventBuffer::default(); - - let mut execute_fut = std::pin::pin!(engine.execute()); - let mut execute_done = false; - loop { - tokio::select! { - res = &mut execute_fut, if !execute_done => { - res.expect("remote nested engine completed successfully"); - execute_done = true; - } - Some(event) = receiver.recv() => { - // Generate complete reports to ensure deduping - // happens within StepContexts. - buffer.add_event(event); - cx.send_nested_report(buffer.generate_report()).await?; - } - else => { - break; - } - } - } - - StepSuccess::new(()).into() - }, - ) - .register(); - - // The step index here (100) is large enough to be higher than all nested - // steps. - engine - .new_step("baz".to_owned(), 100, "Step 5", move |_cx| async move { - StepSuccess::new(()).into() - }) - .register(); - - engine.execute().await.expect("execution successful"); - let generated_events: Vec<_> = - ReceiverStream::new(receiver).collect().await; + let generated_events = generate_test_events( + &logctx.log, + GenerateTestEventsKind::Completed, + ) + .await; let test_cx = BufferTestContext::new(generated_events); @@ -2417,71 +2326,6 @@ mod tests { } } - fn define_nested_engine<'a>( - parent_cx: &'a StepContext, - engine: &mut UpdateEngine<'a, TestSpec>, - ) { - engine - .new_step( - "nested-foo".to_owned(), - 4, - "Nested step 1", - move |cx| async move { - parent_cx - .send_progress(StepProgress::with_current_and_total( - 1, - 3, - "steps", - Default::default(), - )) - .await; - cx.send_progress( - StepProgress::progress(Default::default()), - ) - .await; - StepSuccess::new(()).into() - }, - ) - .register(); - - engine - .new_step::<_, _, ()>( - "nested-bar".to_owned(), - 5, - "Nested step 2 (fails)", - move |cx| async move { - // This is used by NestedProgressCheck below. - parent_cx - .send_progress(StepProgress::with_current_and_total( - 2, - 3, - "steps", - Default::default(), - )) - .await; - - cx.send_progress(StepProgress::with_current( - 50, - "units", - Default::default(), - )) - .await; - - parent_cx - .send_progress(StepProgress::with_current_and_total( - 3, - 3, - "steps", - Default::default(), - )) - .await; - - bail!("failing step") - }, - ) - .register(); - } - #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum NestedProgressCheck { Initial, @@ -2530,42 +2374,4 @@ mod tests { ); } } - - fn define_remote_nested_engine( - engine: &mut UpdateEngine<'_, TestSpec>, - start_id: usize, - ) { - engine - .new_step( - "nested-foo".to_owned(), - start_id + 1, - "Nested step 1", - move |cx| async move { - cx.send_progress( - StepProgress::progress(Default::default()), - ) - .await; - StepSuccess::new(()).into() - }, - ) - .register(); - - engine - .new_step::<_, _, ()>( - "nested-bar".to_owned(), - start_id + 2, - "Nested step 2", - move |cx| async move { - cx.send_progress(StepProgress::with_current( - 20, - "units", - Default::default(), - )) - .await; - - StepSuccess::new(()).into() - }, - ) - .register(); - } } diff --git a/update-engine/src/display/group_display.rs b/update-engine/src/display/group_display.rs index 0d50489a9f..cfd37aac16 100644 --- a/update-engine/src/display/group_display.rs +++ b/update-engine/src/display/group_display.rs @@ -30,6 +30,7 @@ use super::{ pub struct GroupDisplay { // We don't need to add any buffering here because we already write data to // the writer in a line-buffered fashion (see Self::write_events). + log: slog::Logger, writer: W, max_width: usize, // This is set to the highest value of root_total_elapsed seen from any event reports. @@ -45,6 +46,7 @@ impl GroupDisplay { /// /// The function passed in is expected to create a writer. pub fn new( + log: &slog::Logger, keys_and_prefixes: impl IntoIterator, writer: W, ) -> Self @@ -70,6 +72,7 @@ impl GroupDisplay { let not_started = single_states.len(); Self { + log: log.new(slog::o!("component" => "GroupDisplay")), writer, max_width, // This creates the stopwatch in the stopped state with duration 0 -- i.e. a minimal @@ -84,6 +87,7 @@ impl GroupDisplay { /// Creates a new `GroupDisplay` with the provided report keys, using the /// `Display` impl to obtain the respective prefixes. pub fn new_with_display( + log: &slog::Logger, keys: impl IntoIterator, writer: W, ) -> Self @@ -91,6 +95,7 @@ impl GroupDisplay { K: fmt::Display, { Self::new( + log, keys.into_iter().map(|k| { let prefix = k.to_string(); (k, prefix) @@ -144,7 +149,30 @@ impl GroupDisplay { TokioSw::with_elapsed_started(root_total_elapsed); } } + self.stats.apply_result(result); + + if result.before != result.after { + slog::info!( + self.log, + "add_event_report caused state transition"; + "prefix" => &state.prefix, + "before" => %result.before, + "after" => %result.after, + "current_stats" => ?self.stats, + "root_total_elapsed" => ?result.root_total_elapsed, + ); + } else { + slog::trace!( + self.log, + "add_event_report called, state did not change"; + "prefix" => &state.prefix, + "state" => %result.before, + "current_stats" => ?self.stats, + "root_total_elapsed" => ?result.root_total_elapsed, + ); + } + Ok(()) } else { Err(UnknownReportKey {}) @@ -179,7 +207,7 @@ impl GroupDisplay { } } -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct GroupDisplayStats { /// The total number of reports. pub total: usize, @@ -236,18 +264,9 @@ impl GroupDisplayStats { } fn apply_result(&mut self, result: AddEventReportResult) { - // Process result.after first to avoid integer underflow. - match result.after { - SingleStateTag::NotStarted => self.not_started += 1, - SingleStateTag::Running => self.running += 1, - SingleStateTag::Terminal(TerminalKind::Completed) => { - self.completed += 1 - } - SingleStateTag::Terminal(TerminalKind::Failed) => self.failed += 1, - SingleStateTag::Terminal(TerminalKind::Aborted) => { - self.aborted += 1 - } - SingleStateTag::Overwritten => self.overwritten += 1, + if result.before == result.after { + // Nothing to do. + return; } match result.before { @@ -262,6 +281,19 @@ impl GroupDisplayStats { } SingleStateTag::Overwritten => self.overwritten -= 1, } + + match result.after { + SingleStateTag::NotStarted => self.not_started += 1, + SingleStateTag::Running => self.running += 1, + SingleStateTag::Terminal(TerminalKind::Completed) => { + self.completed += 1 + } + SingleStateTag::Terminal(TerminalKind::Failed) => self.failed += 1, + SingleStateTag::Terminal(TerminalKind::Aborted) => { + self.aborted += 1 + } + SingleStateTag::Overwritten => self.overwritten += 1, + } } fn format_line( @@ -336,92 +368,139 @@ impl SingleState { &mut self, event_report: EventReport, ) -> AddEventReportResult { - let before = match &self.kind { + match &mut self.kind { SingleStateKind::NotStarted { .. } => { - self.kind = SingleStateKind::Running { - event_buffer: EventBuffer::new(8), + // We're starting a new update. + let before = SingleStateTag::NotStarted; + let mut event_buffer = EventBuffer::default(); + let (after, root_total_elapsed) = + match Self::apply_report(&mut event_buffer, event_report) { + ApplyReportResult::NotStarted => { + // This means that the event report was empty. Don't + // update `self.kind`. + (SingleStateTag::NotStarted, None) + } + ApplyReportResult::Running(root_total_elapsed) => { + self.kind = + SingleStateKind::Running { event_buffer }; + (SingleStateTag::Running, Some(root_total_elapsed)) + } + ApplyReportResult::Terminal(info) => { + let terminal_kind = info.kind; + let root_total_elapsed = info.root_total_elapsed; + + self.kind = SingleStateKind::Terminal { + info, + pending_event_buffer: Some(event_buffer), + }; + ( + SingleStateTag::Terminal(terminal_kind), + root_total_elapsed, + ) + } + ApplyReportResult::Overwritten => { + self.kind = SingleStateKind::Overwritten { + displayed: false, + }; + (SingleStateTag::Overwritten, None) + } + }; + + AddEventReportResult { before, after, root_total_elapsed } + } + SingleStateKind::Running { event_buffer } => { + // We're in the middle of an update. + let before = SingleStateTag::Running; + let (after, root_total_elapsed) = match Self::apply_report( + event_buffer, + event_report, + ) { + ApplyReportResult::NotStarted => { + // This is an illegal state transition: once a + // non-empty event report has been received, the + // event buffer never goes back to the NotStarted + // state. + unreachable!("illegal state transition from Running to NotStarted") + } + ApplyReportResult::Running(root_total_elapsed) => { + (SingleStateTag::Running, Some(root_total_elapsed)) + } + ApplyReportResult::Terminal(info) => { + let terminal_kind = info.kind; + let root_total_elapsed = info.root_total_elapsed; + + // Grab the event buffer so we can store it in the + // Terminal state below. + let event_buffer = std::mem::replace( + event_buffer, + EventBuffer::new(0), + ); + + self.kind = SingleStateKind::Terminal { + info, + pending_event_buffer: Some(event_buffer), + }; + ( + SingleStateTag::Terminal(terminal_kind), + root_total_elapsed, + ) + } + ApplyReportResult::Overwritten => { + self.kind = + SingleStateKind::Overwritten { displayed: false }; + (SingleStateTag::Overwritten, None) + } }; - SingleStateTag::NotStarted + AddEventReportResult { before, after, root_total_elapsed } } - SingleStateKind::Running { .. } => SingleStateTag::Running, - SingleStateKind::Terminal { info, .. } => { // Once we've reached a terminal state, we don't record any more // events. - return AddEventReportResult::unchanged( + AddEventReportResult::unchanged( SingleStateTag::Terminal(info.kind), info.root_total_elapsed, - ); + ) } SingleStateKind::Overwritten { .. } => { // This update has already completed -- assume that the event // buffer is for a new update, which we don't show. - return AddEventReportResult::unchanged( + AddEventReportResult::unchanged( SingleStateTag::Overwritten, None, - ); + ) } - }; - - let SingleStateKind::Running { event_buffer } = &mut self.kind else { - unreachable!("other branches were handled above"); - }; + } + } + /// The internal logic used by [`Self::add_event_report`]. + fn apply_report( + event_buffer: &mut EventBuffer, + event_report: EventReport, + ) -> ApplyReportResult { if let Some(root_execution_id) = event_buffer.root_execution_id() { if event_report.root_execution_id != Some(root_execution_id) { // The report is for a different execution ID -- assume that // this event is completed and mark our current execution as // completed. - self.kind = SingleStateKind::Overwritten { displayed: false }; - return AddEventReportResult { - before, - after: SingleStateTag::Overwritten, - root_total_elapsed: None, - }; + return ApplyReportResult::Overwritten; } } event_buffer.add_event_report(event_report); - let (after, max_total_elapsed) = - match event_buffer.root_execution_summary() { - Some(summary) => { - match summary.execution_status { - ExecutionStatus::NotStarted => { - (SingleStateTag::NotStarted, None) - } - ExecutionStatus::Running { - root_total_elapsed: max_total_elapsed, - .. - } => (SingleStateTag::Running, Some(max_total_elapsed)), - ExecutionStatus::Terminal(info) => { - // Grab the event buffer to store it in the terminal state. - let event_buffer = std::mem::replace( - event_buffer, - EventBuffer::new(0), - ); - let terminal_kind = info.kind; - let root_total_elapsed = info.root_total_elapsed; - self.kind = SingleStateKind::Terminal { - info, - pending_event_buffer: Some(event_buffer), - }; - ( - SingleStateTag::Terminal(terminal_kind), - root_total_elapsed, - ) - } - } + match event_buffer.root_execution_summary() { + Some(summary) => match summary.execution_status { + ExecutionStatus::NotStarted => ApplyReportResult::NotStarted, + ExecutionStatus::Running { root_total_elapsed, .. } => { + ApplyReportResult::Running(root_total_elapsed) } - None => { - // We don't have a summary yet. - (SingleStateTag::NotStarted, None) + ExecutionStatus::Terminal(info) => { + ApplyReportResult::Terminal(info) } - }; - - AddEventReportResult { - before, - after, - root_total_elapsed: max_total_elapsed, + }, + None => { + // We don't have a summary yet. + ApplyReportResult::NotStarted + } } } @@ -488,6 +567,7 @@ enum SingleStateKind { }, } +#[derive(Clone, Copy, Debug, Eq, PartialEq)] struct AddEventReportResult { before: SingleStateTag, after: SingleStateTag, @@ -503,10 +583,238 @@ impl AddEventReportResult { } } -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, Eq, PartialEq)] enum SingleStateTag { NotStarted, Running, Terminal(TerminalKind), Overwritten, } + +impl fmt::Display for SingleStateTag { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::NotStarted => write!(f, "not started"), + Self::Running => write!(f, "running"), + Self::Terminal(kind) => write!(f, "{kind}"), + Self::Overwritten => write!(f, "overwritten"), + } + } +} + +#[derive(Clone, Debug)] +enum ApplyReportResult { + NotStarted, + Running(Duration), + Terminal(ExecutionTerminalInfo), + Overwritten, +} + +#[cfg(test)] +mod tests { + use omicron_test_utils::dev::test_setup_log; + + use super::*; + + use crate::test_utils::{generate_test_events, GenerateTestEventsKind}; + + #[tokio::test] + async fn test_stats() { + let logctx = test_setup_log("test_stats"); + // Generate three sets of events, one for each kind. + let generated_completed = generate_test_events( + &logctx.log, + GenerateTestEventsKind::Completed, + ) + .await; + let generated_failed = + generate_test_events(&logctx.log, GenerateTestEventsKind::Failed) + .await; + let generated_aborted = + generate_test_events(&logctx.log, GenerateTestEventsKind::Aborted) + .await; + + // Set up a `GroupDisplay` with three keys. + let mut group_display = GroupDisplay::new_with_display( + &logctx.log, + vec![ + GroupDisplayKey::Completed, + GroupDisplayKey::Failed, + GroupDisplayKey::Aborted, + GroupDisplayKey::Overwritten, + ], + std::io::stdout(), + ); + + let mut expected_stats = GroupDisplayStats { + total: 4, + not_started: 4, + running: 0, + completed: 0, + failed: 0, + aborted: 0, + overwritten: 0, + }; + assert_eq!(group_display.stats(), &expected_stats); + assert!(!expected_stats.is_terminal()); + assert!(!expected_stats.has_failures()); + + // Pass in an empty EventReport -- ensure that this doesn't move it to + // a Running state. + + group_display + .add_event_report( + &GroupDisplayKey::Completed, + EventReport::default(), + ) + .unwrap(); + assert_eq!(group_display.stats(), &expected_stats); + + // Pass in events one by one -- ensure that we're always in the running + // state until we've completed. + { + expected_stats.not_started -= 1; + expected_stats.running += 1; + + let n = generated_completed.len(); + + let mut buffer = EventBuffer::default(); + let mut last_seen = None; + + for (i, event) in + generated_completed.clone().into_iter().enumerate() + { + buffer.add_event(event); + let report = buffer.generate_report_since(&mut last_seen); + group_display + .add_event_report(&GroupDisplayKey::Completed, report) + .unwrap(); + if i == n - 1 { + // The last event should have moved us to the completed + // state. + expected_stats.running -= 1; + expected_stats.completed += 1; + } else { + // We should still be in the running state. + } + assert_eq!(group_display.stats(), &expected_stats); + assert!(!expected_stats.is_terminal()); + assert!(!expected_stats.has_failures()); + } + } + + // Pass in failed events, this time using buffer.generate_report() + // rather than buffer.generate_report_since(). + { + expected_stats.not_started -= 1; + expected_stats.running += 1; + + let n = generated_failed.len(); + + let mut buffer = EventBuffer::default(); + for (i, event) in generated_failed.clone().into_iter().enumerate() { + buffer.add_event(event); + let report = buffer.generate_report(); + group_display + .add_event_report(&GroupDisplayKey::Failed, report) + .unwrap(); + if i == n - 1 { + // The last event should have moved us to the failed state. + expected_stats.running -= 1; + expected_stats.failed += 1; + assert!(expected_stats.has_failures()); + } else { + // We should still be in the running state. + assert!(!expected_stats.has_failures()); + } + assert_eq!(group_display.stats(), &expected_stats); + } + } + + // Pass in aborted events all at once. + { + expected_stats.not_started -= 1; + expected_stats.running += 1; + + let mut buffer = EventBuffer::default(); + for event in generated_aborted { + buffer.add_event(event); + } + let report = buffer.generate_report(); + group_display + .add_event_report(&GroupDisplayKey::Aborted, report) + .unwrap(); + // The aborted events should have moved us to the aborted state. + expected_stats.running -= 1; + expected_stats.aborted += 1; + assert_eq!(group_display.stats(), &expected_stats); + + // Try passing in one of the events that, if we were running, would + // cause us to move to an overwritten state. Ensure that that does + // not happen (i.e. expected_stats stays the same) + let mut buffer = EventBuffer::default(); + buffer.add_event(generated_failed.first().unwrap().clone()); + let report = buffer.generate_report(); + group_display + .add_event_report(&GroupDisplayKey::Aborted, report) + .unwrap(); + assert_eq!(group_display.stats(), &expected_stats); + } + + // For the overwritten state, pass in half of the completed events, and + // then pass in all of the failed events. + + { + expected_stats.not_started -= 1; + expected_stats.running += 1; + + let mut buffer = EventBuffer::default(); + let n = generated_completed.len() / 2; + for event in generated_completed.into_iter().take(n) { + buffer.add_event(event); + } + let report = buffer.generate_report(); + group_display + .add_event_report(&GroupDisplayKey::Overwritten, report) + .unwrap(); + assert_eq!(group_display.stats(), &expected_stats); + + // Now pass in a single failed event, which has a different + // execution ID. + let mut buffer = EventBuffer::default(); + buffer.add_event(generated_failed.first().unwrap().clone()); + let report = buffer.generate_report(); + group_display + .add_event_report(&GroupDisplayKey::Overwritten, report) + .unwrap(); + // The overwritten event should have moved us to the overwritten + // state. + expected_stats.running -= 1; + expected_stats.overwritten += 1; + } + + assert!(expected_stats.has_failures()); + assert!(expected_stats.is_terminal()); + + logctx.cleanup_successful(); + } + + #[derive(Debug, Eq, PartialEq, Ord, PartialOrd)] + enum GroupDisplayKey { + Completed, + Failed, + Aborted, + Overwritten, + } + + impl fmt::Display for GroupDisplayKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Completed => write!(f, "completed"), + Self::Failed => write!(f, "failed"), + Self::Aborted => write!(f, "aborted"), + Self::Overwritten => write!(f, "overwritten"), + } + } + } +} diff --git a/update-engine/src/test_utils.rs b/update-engine/src/test_utils.rs index 0bacfbeb8d..b943d1ddfe 100644 --- a/update-engine/src/test_utils.rs +++ b/update-engine/src/test_utils.rs @@ -4,9 +4,16 @@ // Copyright 2023 Oxide Computer Company +use anyhow::bail; +use futures::StreamExt; use schemars::JsonSchema; +use tokio::sync::{mpsc, oneshot}; +use tokio_stream::wrappers::ReceiverStream; -use crate::{ExecutionId, StepSpec}; +use crate::{ + events::{Event, ProgressUnits, StepProgress}, + EventBuffer, ExecutionId, StepContext, StepSpec, StepSuccess, UpdateEngine, +}; #[derive(JsonSchema)] pub(crate) enum TestSpec {} @@ -27,3 +34,278 @@ pub(crate) static TEST_EXECUTION_UUID: &str = pub fn test_execution_id() -> ExecutionId { ExecutionId(TEST_EXECUTION_UUID.parse().expect("valid UUID")) } + +#[derive(Copy, Clone, Debug)] +pub(crate) enum GenerateTestEventsKind { + Completed, + Failed, + Aborted, +} + +pub(crate) async fn generate_test_events( + log: &slog::Logger, + kind: GenerateTestEventsKind, +) -> Vec> { + // The channel is big enough to contain all possible events. + let (sender, receiver) = mpsc::channel(512); + let engine = UpdateEngine::new(log, sender); + + match kind { + GenerateTestEventsKind::Completed => { + define_test_steps(log, &engine, LastStepOutcome::Completed); + engine.execute().await.expect("execution successful"); + } + GenerateTestEventsKind::Failed => { + define_test_steps(log, &engine, LastStepOutcome::Failed); + engine.execute().await.expect_err("execution failed"); + } + GenerateTestEventsKind::Aborted => { + // In this case, the last step signals that it has been reached via + // sending a message over this channel, and then waits forever. We + // abort execution by calling into the AbortHandle. + let (sender, receiver) = oneshot::channel(); + define_test_steps(log, &engine, LastStepOutcome::Aborted(sender)); + let abort_handle = engine.abort_handle(); + let mut execute_fut = std::pin::pin!(engine.execute()); + let mut receiver = std::pin::pin!(receiver); + let mut receiver_done = false; + loop { + tokio::select! { + res = &mut execute_fut => { + res.expect_err("execution should have been aborted, but completed successfully"); + break; + } + _ = &mut receiver, if !receiver_done => { + receiver_done = true; + abort_handle + .abort("test engine deliberately aborted") + .expect("engine should still be alive"); + } + } + } + } + } + + ReceiverStream::new(receiver).collect().await +} + +#[derive(Debug)] +enum LastStepOutcome { + Completed, + Failed, + Aborted(oneshot::Sender<()>), +} + +#[derive(Debug)] +enum Never {} + +fn define_test_steps( + log: &slog::Logger, + engine: &UpdateEngine, + last_step_outcome: LastStepOutcome, +) { + engine + .new_step("foo".to_owned(), 1, "Step 1", move |_cx| async move { + StepSuccess::new(()).into() + }) + .register(); + + engine + .new_step("bar".to_owned(), 2, "Step 2", move |cx| async move { + for _ in 0..20 { + cx.send_progress(StepProgress::with_current_and_total( + 5, + 20, + ProgressUnits::BYTES, + Default::default(), + )) + .await; + + cx.send_progress(StepProgress::reset( + Default::default(), + "reset step 2", + )) + .await; + + cx.send_progress(StepProgress::retry("retry step 2")).await; + } + StepSuccess::new(()).into() + }) + .register(); + + engine + .new_step( + "nested".to_owned(), + 3, + "Step 3 (this is nested)", + move |parent_cx| async move { + parent_cx + .with_nested_engine(|engine| { + define_nested_engine(&parent_cx, engine); + Ok(()) + }) + .await + .expect_err("this is expected to fail"); + + StepSuccess::new(()).into() + }, + ) + .register(); + + let log = log.clone(); + engine + .new_step( + "remote-nested".to_owned(), + 20, + "Step 4 (remote nested)", + move |cx| async move { + let (sender, mut receiver) = mpsc::channel(16); + let mut engine = UpdateEngine::new(&log, sender); + define_remote_nested_engine(&mut engine, 20); + + let mut buffer = EventBuffer::default(); + + let mut execute_fut = std::pin::pin!(engine.execute()); + let mut execute_done = false; + loop { + tokio::select! { + res = &mut execute_fut, if !execute_done => { + res.expect("remote nested engine completed successfully"); + execute_done = true; + } + Some(event) = receiver.recv() => { + // Generate complete reports to ensure deduping + // happens within StepContexts. + buffer.add_event(event); + cx.send_nested_report(buffer.generate_report()).await?; + } + else => { + break; + } + } + } + + StepSuccess::new(()).into() + }, + ) + .register(); + + // The step index here (100) is large enough to be higher than all nested + // steps. + engine + .new_step("baz".to_owned(), 100, "Step 5", move |_cx| async move { + match last_step_outcome { + LastStepOutcome::Completed => StepSuccess::new(()).into(), + LastStepOutcome::Failed => { + bail!("last step failed") + } + LastStepOutcome::Aborted(sender) => { + sender.send(()).expect("receiver should be alive"); + // The driver of the engine is responsible for aborting it + // at this point. + std::future::pending::().await; + unreachable!("pending future can never resolve"); + } + } + }) + .register(); +} + +fn define_nested_engine<'a>( + parent_cx: &'a StepContext, + engine: &mut UpdateEngine<'a, TestSpec>, +) { + engine + .new_step( + "nested-foo".to_owned(), + 4, + "Nested step 1", + move |cx| async move { + parent_cx + .send_progress(StepProgress::with_current_and_total( + 1, + 3, + "steps", + Default::default(), + )) + .await; + cx.send_progress(StepProgress::progress(Default::default())) + .await; + StepSuccess::new(()).into() + }, + ) + .register(); + + engine + .new_step::<_, _, ()>( + "nested-bar".to_owned(), + 5, + "Nested step 2 (fails)", + move |cx| async move { + // This is used by NestedProgressCheck below. + parent_cx + .send_progress(StepProgress::with_current_and_total( + 2, + 3, + "steps", + Default::default(), + )) + .await; + + cx.send_progress(StepProgress::with_current( + 50, + "units", + Default::default(), + )) + .await; + + parent_cx + .send_progress(StepProgress::with_current_and_total( + 3, + 3, + "steps", + Default::default(), + )) + .await; + + bail!("failing step") + }, + ) + .register(); +} + +fn define_remote_nested_engine( + engine: &mut UpdateEngine<'_, TestSpec>, + start_id: usize, +) { + engine + .new_step( + "nested-foo".to_owned(), + start_id + 1, + "Nested step 1", + move |cx| async move { + cx.send_progress(StepProgress::progress(Default::default())) + .await; + StepSuccess::new(()).into() + }, + ) + .register(); + + engine + .new_step::<_, _, ()>( + "nested-bar".to_owned(), + start_id + 2, + "Nested step 2", + move |cx| async move { + cx.send_progress(StepProgress::with_current( + 20, + "units", + Default::default(), + )) + .await; + + StepSuccess::new(()).into() + }, + ) + .register(); +} diff --git a/wicket/src/cli/rack_update.rs b/wicket/src/cli/rack_update.rs index fa41fa7b8c..cac0f09ee5 100644 --- a/wicket/src/cli/rack_update.rs +++ b/wicket/src/cli/rack_update.rs @@ -174,6 +174,7 @@ async fn do_attach_to_updates( output: CommandOutput<'_>, ) -> Result<()> { let mut display = GroupDisplay::new_with_display( + &log, update_ids.iter().copied(), output.stderr, ); From 9dcc32d98ec9a9bc2c137c6b4ac77730ebe38c8f Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 28 Nov 2023 05:24:00 +0000 Subject: [PATCH 5/7] Update taiki-e/install-action digest to c1dd9c9 (#4562) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`8f354f3` -> `c1dd9c9`](https://togithub.com/taiki-e/install-action/compare/8f354f3...c1dd9c9) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index d79c836fba..c006a41f35 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@8f354f35e51028c902e8ab954045e37739acf562 # v2 + uses: taiki-e/install-action@c1dd9c9e59427252db32b9ece987f4eebc3a021a # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From 19a01c20253044b73e1cb8846fd8b6d77543fdf4 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 28 Nov 2023 06:43:07 +0000 Subject: [PATCH 6/7] Update Rust crate percent-encoding to 2.3.1 (#4563) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index e4588efbde..04d7a1374d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -282,7 +282,7 @@ p256 = "0.13" parse-display = "0.8.2" partial-io = { version = "0.5.4", features = ["proptest1", "tokio1"] } paste = "1.0.14" -percent-encoding = "2.3.0" +percent-encoding = "2.3.1" pem = "1.1" petgraph = "0.6.4" postgres-protocol = "0.6.6" From 55b39533cfe9a3f2fc1185adaa9c2118efaee6bf Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 28 Nov 2023 10:01:57 -0800 Subject: [PATCH 7/7] Update Rust crate camino-tempfile to 1.1.1 (#4565) --- Cargo.lock | 40 ++++++++++++++++++++++++--------------- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 22 +++++++++++++-------- 3 files changed, 40 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 07f804b03d..76107c8f4e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -324,7 +324,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4d45f362125ed144544e57b0ec6de8fd6a296d41a6252fc4a20c0cf12e9ed3a" dependencies = [ - "rustix 0.38.9", + "rustix 0.38.25", "tempfile", "windows-sys 0.48.0", ] @@ -754,9 +754,9 @@ dependencies = [ [[package]] name = "camino-tempfile" -version = "1.0.2" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2ab15a83d13f75dbd86f082bdefd160b628476ef58d3b900a0ef74e001bb097" +checksum = "cb905055fa81e4d427f919b2cd0d76a998267de7d225ea767a1894743a5263c2" dependencies = [ "camino", "tempfile", @@ -2151,7 +2151,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef033ed5e9bad94e55838ca0ca906db0e043f517adda0c8b79c7a8c66c93c1b5" dependencies = [ "cfg-if 1.0.0", - "rustix 0.38.9", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -3383,7 +3383,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.2", - "rustix 0.38.9", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -3636,9 +3636,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.5" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" +checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" [[package]] name = "lock_api" @@ -4935,6 +4935,7 @@ dependencies = [ "diesel", "digest", "either", + "errno", "flate2", "futures", "futures-channel", @@ -4979,7 +4980,7 @@ dependencies = [ "regex-syntax 0.8.2", "reqwest", "ring 0.16.20", - "rustix 0.38.9", + "rustix 0.38.25", "schemars", "semver 1.0.20", "serde", @@ -6421,6 +6422,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_users" version = "0.4.3" @@ -6872,14 +6882,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.9" +version = "0.38.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bfe0f2582b4931a45d1fa608f8a8722e8b3c7ac54dd6d5f3b3212791fedef49" +checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e" dependencies = [ "bitflags 2.4.0", "errno", "libc", - "linux-raw-sys 0.4.5", + "linux-raw-sys 0.4.11", "windows-sys 0.48.0", ] @@ -8170,14 +8180,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if 1.0.0", "fastrand", - "redox_syscall 0.3.5", - "rustix 0.38.9", + "redox_syscall 0.4.1", + "rustix 0.38.25", "windows-sys 0.48.0", ] diff --git a/Cargo.toml b/Cargo.toml index 04d7a1374d..239fb453dc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -160,7 +160,7 @@ byteorder = "1.5.0" bytes = "1.5.0" bytesize = "1.3.0" camino = "1.1" -camino-tempfile = "1.0.2" +camino-tempfile = "1.1.1" cancel-safe-futures = "0.1.5" chacha20poly1305 = "0.10.1" ciborium = "0.2.1" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 1a289bd0cb..7757b4ad8b 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -209,58 +209,64 @@ bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-f hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.x86_64-unknown-linux-gnu.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.x86_64-apple-darwin.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.x86_64-apple-darwin.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.aarch64-apple-darwin.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.aarch64-apple-darwin.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.x86_64-unknown-illumos.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] } [target.x86_64-unknown-illumos.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] }