From 372d980791cdd67071c82692557fd89b6e2951ec Mon Sep 17 00:00:00 2001 From: iliana etaoin Date: Tue, 14 May 2024 14:41:43 -0700 Subject: [PATCH 01/37] fix #5743 vs. #5751 semantic merge conflict (#5765) --- nexus/reconfigurator/execution/src/sled_state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/reconfigurator/execution/src/sled_state.rs b/nexus/reconfigurator/execution/src/sled_state.rs index aaa5b6bc268..fafc1c2e44c 100644 --- a/nexus/reconfigurator/execution/src/sled_state.rs +++ b/nexus/reconfigurator/execution/src/sled_state.rs @@ -90,7 +90,7 @@ mod tests { async fn test_decommission_is_idempotent( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), From 949c6abc31b004dba6c6276831a2f2bd8061dab2 Mon Sep 17 00:00:00 2001 From: Rain Date: Tue, 14 May 2024 15:26:24 -0700 Subject: [PATCH 02/37] [nexus-types] more validation for PlanningInput (#5644) While reconciling the PlanningInput with the blueprint, I was a bit concerned about getting nonsensical inputs (e.g. multiple zones get the same IPs) and the system going haywire. In particular, I was concerned about the Serialize and Deserialize impls which were seemingly added for reconfigurator-cli. However, they were liabilities that didn't check for internal invariants. To address this, this PR introduces the notion of a `TriMap`. A `TriMap` is a 1:1:1 map which can be looked up by any one of three keys. Instead of storing just a regular map, the `PlanningInput` now stores a couple of `TriMap` instances via an intermediate `OmicronZoneNetworkResources` struct. A `TriMap` is implemented as a vector with three indexes. It's a pretty straightforward implementation, and I've also added property-based tests to ensure that a `TriMap` is always valid (including always deserializing to a valid structure). At the moment, a `TriMap` does not allow removing entries. If necessary, removals can be implemented by just marking the entry as dead and removing the keys from the indexes. This is fine for our use case, since `TriMap`s aren't long-lived. I've also factored out the omicron zone IP and NIC code into an `OmicronZoneNetworkResources` struct. In the future, we'll make the `BlueprintBuilder` use this struct, so that blueprints are validated whenever they're being mutated. We can also add this to the general code that validates blueprints. --- Cargo.lock | 3 + dev-tools/reconfigurator-cli/src/main.rs | 5 +- nexus/db-model/src/network_interface.rs | 4 +- .../db-queries/src/db/datastore/deployment.rs | 5 +- nexus/reconfigurator/planning/src/example.rs | 5 +- nexus/types/Cargo.toml | 5 + .../deployment/tri_map.txt | 7 + nexus/types/src/deployment.rs | 17 +- .../types/src/deployment/network_resources.rs | 307 +++++++++++ nexus/types/src/deployment/planning_input.rs | 345 +++++------- nexus/types/src/deployment/tri_map.rs | 511 ++++++++++++++++++ nexus/types/src/deployment/zone_type.rs | 2 +- uuid-kinds/src/lib.rs | 1 + 13 files changed, 982 insertions(+), 235 deletions(-) create mode 100644 nexus/types/proptest-regressions/deployment/tri_map.txt create mode 100644 nexus/types/src/deployment/network_resources.rs create mode 100644 nexus/types/src/deployment/tri_map.rs diff --git a/Cargo.lock b/Cargo.lock index f0ff8138e88..e1e445cc3c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4899,6 +4899,7 @@ dependencies = [ "base64 0.22.1", "chrono", "clap", + "derive-where", "dns-service-client", "futures", "gateway-client", @@ -4911,6 +4912,7 @@ dependencies = [ "omicron-workspace-hack", "openssl", "parse-display", + "proptest", "schemars", "serde", "serde_json", @@ -4921,6 +4923,7 @@ dependencies = [ "steno", "strum", "tabled", + "test-strategy", "thiserror", "uuid", ] diff --git a/dev-tools/reconfigurator-cli/src/main.rs b/dev-tools/reconfigurator-cli/src/main.rs index 72add6ce8c4..f088c9d97d7 100644 --- a/dev-tools/reconfigurator-cli/src/main.rs +++ b/dev-tools/reconfigurator-cli/src/main.rs @@ -32,7 +32,9 @@ use nexus_types::inventory::SledRole; use omicron_common::api::external::Generation; use omicron_common::api::external::Name; use omicron_uuid_kinds::CollectionUuid; +use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::SledUuid; +use omicron_uuid_kinds::VnicUuid; use reedline::{Reedline, Signal}; use std::collections::BTreeMap; use std::io::BufRead; @@ -146,7 +148,8 @@ impl ReconfiguratorSim { .add_omicron_zone_external_ip(zone.id, external_ip) .context("adding omicron zone external IP")?; let nic = OmicronZoneNic { - id: nic.id, + // TODO-cleanup use `TypedUuid` everywhere + id: VnicUuid::from_untyped_uuid(nic.id), mac: nic.mac, ip: nic.ip, slot: nic.slot, diff --git a/nexus/db-model/src/network_interface.rs b/nexus/db-model/src/network_interface.rs index ff774699d61..8520afdb762 100644 --- a/nexus/db-model/src/network_interface.rs +++ b/nexus/db-model/src/network_interface.rs @@ -17,7 +17,9 @@ use ipnetwork::NetworkSize; use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::api::{external, internal}; +use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::VnicUuid; use sled_agent_client::ZoneKind; use uuid::Uuid; @@ -207,7 +209,7 @@ impl TryFrom<&'_ ServiceNetworkInterface> }); } Ok(Self { - id: nic.id(), + id: VnicUuid::from_untyped_uuid(nic.id()), mac: *nic.mac, ip: nic.ip.ip(), slot: *nic.slot, diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index 7359f1725b6..09bc2eef0f8 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -1346,6 +1346,7 @@ mod tests { use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; + use once_cell::sync::Lazy; use pretty_assertions::assert_eq; use rand::thread_rng; use rand::Rng; @@ -1353,8 +1354,8 @@ mod tests { use std::mem; use std::net::Ipv6Addr; - static EMPTY_PLANNING_INPUT: PlanningInput = - PlanningInputBuilder::empty_input(); + static EMPTY_PLANNING_INPUT: Lazy = + Lazy::new(|| PlanningInputBuilder::empty_input()); // This is a not-super-future-maintainer-friendly helper to check that all // the subtables related to blueprints have been pruned of a specific diff --git a/nexus/reconfigurator/planning/src/example.rs b/nexus/reconfigurator/planning/src/example.rs index 24dbbd15acd..f8748be7582 100644 --- a/nexus/reconfigurator/planning/src/example.rs +++ b/nexus/reconfigurator/planning/src/example.rs @@ -13,7 +13,9 @@ use nexus_types::deployment::OmicronZoneNic; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledFilter; use nexus_types::inventory::Collection; +use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::SledKind; +use omicron_uuid_kinds::VnicUuid; use typed_rng::TypedUuidRng; pub struct ExampleSystem { @@ -105,7 +107,8 @@ impl ExampleSystem { .add_omicron_zone_nic( service_id, OmicronZoneNic { - id: nic.id, + // TODO-cleanup use `TypedUuid` everywhere + id: VnicUuid::from_untyped_uuid(nic.id), mac: nic.mac, ip: nic.ip, slot: nic.slot, diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index 372cee858ac..8f766334169 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -12,6 +12,7 @@ anyhow.workspace = true chrono.workspace = true clap.workspace = true base64.workspace = true +derive-where.workspace = true futures.workspace = true humantime.workspace = true ipnetwork.workspace = true @@ -38,3 +39,7 @@ omicron-common.workspace = true omicron-passwords.workspace = true omicron-workspace-hack.workspace = true sled-agent-client.workspace = true + +[dev-dependencies] +proptest.workspace = true +test-strategy.workspace = true diff --git a/nexus/types/proptest-regressions/deployment/tri_map.txt b/nexus/types/proptest-regressions/deployment/tri_map.txt new file mode 100644 index 00000000000..c3f4260f522 --- /dev/null +++ b/nexus/types/proptest-regressions/deployment/tri_map.txt @@ -0,0 +1,7 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc bafcbc817cff65814a6f3233f1ef3d6c36f75c37ad35175d17d1c8484a734034 # shrinks to input = _ProptestOpsArgs { initial: {(0, '$', ""): "", (0, ' ', ""): ""}, ops: [] } diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index b7b5bf6aac4..a577c4978c4 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -42,15 +42,22 @@ use strum::IntoEnumIterator; use thiserror::Error; use uuid::Uuid; +mod network_resources; mod planning_input; +mod tri_map; mod zone_type; +pub use network_resources::AddNetworkResourceError; +pub use network_resources::OmicronZoneExternalFloatingAddr; +pub use network_resources::OmicronZoneExternalFloatingIp; +pub use network_resources::OmicronZoneExternalIp; +pub use network_resources::OmicronZoneExternalIpEntry; +pub use network_resources::OmicronZoneExternalIpKey; +pub use network_resources::OmicronZoneExternalSnatIp; +pub use network_resources::OmicronZoneNetworkResources; +pub use network_resources::OmicronZoneNic; +pub use network_resources::OmicronZoneNicEntry; pub use planning_input::DiskFilter; -pub use planning_input::OmicronZoneExternalFloatingAddr; -pub use planning_input::OmicronZoneExternalFloatingIp; -pub use planning_input::OmicronZoneExternalIp; -pub use planning_input::OmicronZoneExternalSnatIp; -pub use planning_input::OmicronZoneNic; pub use planning_input::PlanningInput; pub use planning_input::PlanningInputBuildError; pub use planning_input::PlanningInputBuilder; diff --git a/nexus/types/src/deployment/network_resources.rs b/nexus/types/src/deployment/network_resources.rs new file mode 100644 index 00000000000..15f495d87a7 --- /dev/null +++ b/nexus/types/src/deployment/network_resources.rs @@ -0,0 +1,307 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::tri_map::TriMap; +use super::tri_map::TriMapEntry; +use anyhow::anyhow; +use omicron_common::api::external::MacAddr; +use omicron_common::api::internal::shared::SourceNatConfig; +use omicron_uuid_kinds::ExternalIpUuid; +use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::VnicUuid; +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use std::net::IpAddr; +use std::net::SocketAddr; +use thiserror::Error; + +/// Tracker and validator for network resources allocated to Omicron-managed +/// zones. +/// +/// ## Implementation notes +/// +/// `OmicronZoneNetworkResources` consists of two 1:1:1 "trijective" maps: +/// +/// 1. Providing a unique map for Omicron zone IDs, external IP IDs, and +/// external IPs. +/// 2. Providing a unique map for Omicron zone IDs, vNIC IDs, and vNICs. +/// +/// One question that arises: should there instead be a single 1:1:1:1:1 map? +/// In other words, is there a 1:1 mapping between external IPs and vNICs as +/// well? The answer is "generally yes", but: +/// +/// - They're not stored in the database that way, and it's possible that +/// there's some divergence. +/// - We currently don't plan to get any utility out of asserting the 1:1:1:1:1 +/// map. The main planned use of this is for expunged zone garbage collection +/// -- while that benefits from trijective maps tremendously, there's no +/// additional value in asserting a unique mapping between external IPs and +/// vNICs. +/// +/// So we use two separate maps for now. But a single map is always a +/// possibility in the future, if required. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct OmicronZoneNetworkResources { + /// external IPs allocated to Omicron zones + omicron_zone_external_ips: TriMap, + + /// vNICs allocated to Omicron zones + omicron_zone_nics: TriMap, +} + +impl OmicronZoneNetworkResources { + pub fn new() -> Self { + Self { + omicron_zone_external_ips: TriMap::new(), + omicron_zone_nics: TriMap::new(), + } + } + + pub fn add_external_ip( + &mut self, + zone_id: OmicronZoneUuid, + ip: OmicronZoneExternalIp, + ) -> Result<(), AddNetworkResourceError> { + let entry = OmicronZoneExternalIpEntry { zone_id, ip }; + self.omicron_zone_external_ips.insert_no_dups(entry).map_err(|err| { + AddNetworkResourceError::DuplicateOmicronZoneExternalIp { + zone_id, + ip, + err: anyhow!(err), + } + }) + } + + pub fn add_nic( + &mut self, + zone_id: OmicronZoneUuid, + nic: OmicronZoneNic, + ) -> Result<(), AddNetworkResourceError> { + let entry = OmicronZoneNicEntry { zone_id, nic: nic.clone() }; + self.omicron_zone_nics.insert_no_dups(entry).map_err(|err| { + AddNetworkResourceError::DuplicateOmicronZoneNic { + zone_id, + nic, + err: anyhow!(err), + } + }) + } + + pub fn get_external_ip_by_zone_id( + &self, + zone_id: OmicronZoneUuid, + ) -> Option<&OmicronZoneExternalIpEntry> { + self.omicron_zone_external_ips.get1(&zone_id) + } + + pub fn get_external_ip_by_external_ip_id( + &self, + ip: ExternalIpUuid, + ) -> Option<&OmicronZoneExternalIpEntry> { + self.omicron_zone_external_ips.get2(&ip) + } + + pub fn get_external_ip_by_ip( + &self, + ip: OmicronZoneExternalIpKey, + ) -> Option<&OmicronZoneExternalIpEntry> { + self.omicron_zone_external_ips.get3(&ip) + } + + pub fn get_nic_by_zone_id( + &self, + zone_id: OmicronZoneUuid, + ) -> Option<&OmicronZoneNicEntry> { + self.omicron_zone_nics.get1(&zone_id) + } + + pub fn get_nic_by_vnic_id( + &self, + vnic_id: VnicUuid, + ) -> Option<&OmicronZoneNicEntry> { + self.omicron_zone_nics.get2(&vnic_id) + } + + pub fn get_nic_by_mac(&self, mac: MacAddr) -> Option<&OmicronZoneNicEntry> { + self.omicron_zone_nics.get3(&mac) + } +} + +/// External IP variants possible for Omicron-managed zones. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum OmicronZoneExternalIp { + Floating(OmicronZoneExternalFloatingIp), + Snat(OmicronZoneExternalSnatIp), + // We may eventually want `Ephemeral(_)` too (arguably Nexus could be + // ephemeral?), but for now we only have Floating and Snat uses. +} + +impl OmicronZoneExternalIp { + pub fn id(&self) -> ExternalIpUuid { + match self { + OmicronZoneExternalIp::Floating(ext) => ext.id, + OmicronZoneExternalIp::Snat(ext) => ext.id, + } + } + + pub fn ip(&self) -> IpAddr { + match self { + OmicronZoneExternalIp::Floating(ext) => ext.ip, + OmicronZoneExternalIp::Snat(ext) => ext.snat_cfg.ip, + } + } + + pub fn ip_key(&self) -> OmicronZoneExternalIpKey { + match self { + OmicronZoneExternalIp::Floating(ip) => { + OmicronZoneExternalIpKey::Floating(ip.ip) + } + OmicronZoneExternalIp::Snat(snat) => { + OmicronZoneExternalIpKey::Snat(snat.snat_cfg) + } + } + } +} + +/// An IP-based key suitable for uniquely identifying an +/// [`OmicronZoneExternalIp`]. +/// +/// We can't use the IP itself to uniquely identify an external IP because SNAT +/// IPs can have overlapping addresses. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum OmicronZoneExternalIpKey { + Floating(IpAddr), + Snat(SourceNatConfig), +} + +/// Floating external IP allocated to an Omicron-managed zone. +/// +/// This is a slimmer `nexus_db_model::ExternalIp` that only stores the fields +/// necessary for blueprint planning, and requires that the zone have a single +/// IP. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, +)] +pub struct OmicronZoneExternalFloatingIp { + pub id: ExternalIpUuid, + pub ip: IpAddr, +} + +/// Floating external address with port allocated to an Omicron-managed zone. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, +)] +pub struct OmicronZoneExternalFloatingAddr { + pub id: ExternalIpUuid, + pub addr: SocketAddr, +} + +impl OmicronZoneExternalFloatingAddr { + pub fn into_ip(self) -> OmicronZoneExternalFloatingIp { + OmicronZoneExternalFloatingIp { id: self.id, ip: self.addr.ip() } + } +} + +/// SNAT (outbound) external IP allocated to an Omicron-managed zone. +/// +/// This is a slimmer `nexus_db_model::ExternalIp` that only stores the fields +/// necessary for blueprint planning, and requires that the zone have a single +/// IP. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, +)] +pub struct OmicronZoneExternalSnatIp { + pub id: ExternalIpUuid, + pub snat_cfg: SourceNatConfig, +} + +/// Network interface allocated to an Omicron-managed zone. +/// +/// This is a slimmer `nexus_db_model::ServiceNetworkInterface` that only stores +/// the fields necessary for blueprint planning. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct OmicronZoneNic { + pub id: VnicUuid, + pub mac: MacAddr, + pub ip: IpAddr, + pub slot: u8, + pub primary: bool, +} + +/// A pair of an Omicron zone ID and an external IP. +/// +/// Part of [`OmicronZoneNetworkResources`]. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct OmicronZoneExternalIpEntry { + pub zone_id: OmicronZoneUuid, + pub ip: OmicronZoneExternalIp, +} + +/// Specification for the tri-map of Omicron zone external IPs. +impl TriMapEntry for OmicronZoneExternalIpEntry { + type K1 = OmicronZoneUuid; + type K2 = ExternalIpUuid; + + // Note: cannot use IpAddr here, because SNAT IPs can overlap as long as + // their port blocks are disjoint. + type K3 = OmicronZoneExternalIpKey; + + fn key1(&self) -> Self::K1 { + self.zone_id + } + + fn key2(&self) -> Self::K2 { + self.ip.id() + } + + fn key3(&self) -> Self::K3 { + self.ip.ip_key() + } +} + +/// A pair of an Omicron zone ID and a network interface. +/// +/// Part of [`OmicronZoneNetworkResources`]. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct OmicronZoneNicEntry { + zone_id: OmicronZoneUuid, + nic: OmicronZoneNic, +} + +impl TriMapEntry for OmicronZoneNicEntry { + type K1 = OmicronZoneUuid; + type K2 = VnicUuid; + type K3 = MacAddr; + + fn key1(&self) -> Self::K1 { + self.zone_id + } + + fn key2(&self) -> Self::K2 { + self.nic.id + } + + fn key3(&self) -> Self::K3 { + self.nic.mac + } +} + +#[derive(Debug, Error)] +pub enum AddNetworkResourceError { + #[error("associating Omicron zone {zone_id} with {ip:?} failed due to duplicates")] + DuplicateOmicronZoneExternalIp { + zone_id: OmicronZoneUuid, + ip: OmicronZoneExternalIp, + #[source] + err: anyhow::Error, + }, + #[error("associating Omicron zone {zone_id} with {nic:?} failed due to duplicates")] + DuplicateOmicronZoneNic { + zone_id: OmicronZoneUuid, + nic: OmicronZoneNic, + #[source] + err: anyhow::Error, + }, +} diff --git a/nexus/types/src/deployment/planning_input.rs b/nexus/types/src/deployment/planning_input.rs index 89d8bae6607..ccb15b858a6 100644 --- a/nexus/types/src/deployment/planning_input.rs +++ b/nexus/types/src/deployment/planning_input.rs @@ -5,34 +5,136 @@ //! Types describing inputs the Reconfigurator needs to plan and produce new //! blueprints. +use super::AddNetworkResourceError; +use super::OmicronZoneExternalIp; +use super::OmicronZoneNetworkResources; +use super::OmicronZoneNic; use crate::external_api::views::PhysicalDiskPolicy; use crate::external_api::views::PhysicalDiskState; use crate::external_api::views::SledPolicy; use crate::external_api::views::SledProvisionPolicy; use crate::external_api::views::SledState; use clap::ValueEnum; +use ipnetwork::IpNetwork; use omicron_common::address::IpRange; use omicron_common::address::Ipv6Subnet; use omicron_common::address::SLED_PREFIX; use omicron_common::api::external::Generation; -use omicron_common::api::external::MacAddr; -use omicron_common::api::internal::shared::SourceNatConfig; use omicron_common::api::internal::shared::SourceNatConfigError; use omicron_common::disk::DiskIdentity; -use omicron_uuid_kinds::ExternalIpUuid; use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; -use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; use std::collections::btree_map::Entry; use std::collections::BTreeMap; -use std::net::IpAddr; -use std::net::SocketAddr; use strum::IntoEnumIterator; -use uuid::Uuid; + +/// Policy and database inputs to the Reconfigurator planner +/// +/// The primary inputs to the planner are the parent (either a parent blueprint +/// or an inventory collection) and this structure. This type holds the +/// fleet-wide policy as well as any additional information fetched from CRDB +/// that the planner needs to make decisions. +/// +/// The current policy is pretty limited. It's aimed primarily at supporting +/// the add/remove sled use case. +/// +/// The planning input has some internal invariants that code outside of this +/// module can rely on. They include: +/// +/// - Each Omicron zone has at most one external IP and at most one vNIC. +/// - A given external IP or vNIC is only associated with a single Omicron +/// zone. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PlanningInput { + /// fleet-wide policy + policy: Policy, + + /// current internal DNS version + internal_dns_version: Generation, + + /// current external DNS version + external_dns_version: Generation, + + /// per-sled policy and resources + sleds: BTreeMap, + + /// per-zone network resources + network_resources: OmicronZoneNetworkResources, +} + +impl PlanningInput { + pub fn internal_dns_version(&self) -> Generation { + self.internal_dns_version + } + + pub fn external_dns_version(&self) -> Generation { + self.external_dns_version + } + + pub fn target_nexus_zone_count(&self) -> usize { + self.policy.target_nexus_zone_count + } + + pub fn service_ip_pool_ranges(&self) -> &[IpRange] { + &self.policy.service_ip_pool_ranges + } + + pub fn all_sleds( + &self, + filter: SledFilter, + ) -> impl Iterator + '_ { + self.sleds.iter().filter_map(move |(&sled_id, details)| { + filter + .matches_policy_and_state(details.policy, details.state) + .then_some((sled_id, details)) + }) + } + + pub fn all_sled_ids( + &self, + filter: SledFilter, + ) -> impl Iterator + '_ { + self.all_sleds(filter).map(|(sled_id, _)| sled_id) + } + + pub fn all_sled_resources( + &self, + filter: SledFilter, + ) -> impl Iterator + '_ { + self.all_sleds(filter) + .map(|(sled_id, details)| (sled_id, &details.resources)) + } + + pub fn sled_policy(&self, sled_id: &SledUuid) -> Option { + self.sleds.get(sled_id).map(|details| details.policy) + } + + pub fn sled_resources(&self, sled_id: &SledUuid) -> Option<&SledResources> { + self.sleds.get(sled_id).map(|details| &details.resources) + } + + pub fn network_resources(&self) -> &OmicronZoneNetworkResources { + &self.network_resources + } + + /// Convert this `PlanningInput` back into a [`PlanningInputBuilder`] + /// + /// This is primarily useful for tests that want to mutate an existing + /// [`PlanningInput`]. + pub fn into_builder(self) -> PlanningInputBuilder { + PlanningInputBuilder { + policy: self.policy, + internal_dns_version: self.internal_dns_version, + external_dns_version: self.external_dns_version, + sleds: self.sleds, + network_resources: self.network_resources, + } + } +} /// Describes a single disk already managed by the sled. #[derive(Debug, Clone, Serialize, Deserialize)] @@ -152,85 +254,6 @@ impl SledResources { } } -/// External IP variants possible for Omicron-managed zones. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] -pub enum OmicronZoneExternalIp { - Floating(OmicronZoneExternalFloatingIp), - Snat(OmicronZoneExternalSnatIp), - // We may eventually want `Ephemeral(_)` too (arguably Nexus could be - // ephemeral?), but for now we only have Floating and Snat uses. -} - -impl OmicronZoneExternalIp { - pub fn id(&self) -> ExternalIpUuid { - match self { - OmicronZoneExternalIp::Floating(ext) => ext.id, - OmicronZoneExternalIp::Snat(ext) => ext.id, - } - } - - pub fn ip(&self) -> IpAddr { - match self { - OmicronZoneExternalIp::Floating(ext) => ext.ip, - OmicronZoneExternalIp::Snat(ext) => ext.snat_cfg.ip, - } - } -} - -/// Floating external IP allocated to an Omicron-managed zone. -/// -/// This is a slimmer `nexus_db_model::ExternalIp` that only stores the fields -/// necessary for blueprint planning, and requires that the zone have a single -/// IP. -#[derive( - Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, -)] -pub struct OmicronZoneExternalFloatingIp { - pub id: ExternalIpUuid, - pub ip: IpAddr, -} - -/// Floating external address with port allocated to an Omicron-managed zone. -#[derive( - Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, -)] -pub struct OmicronZoneExternalFloatingAddr { - pub id: ExternalIpUuid, - pub addr: SocketAddr, -} - -impl OmicronZoneExternalFloatingAddr { - pub fn into_ip(self) -> OmicronZoneExternalFloatingIp { - OmicronZoneExternalFloatingIp { id: self.id, ip: self.addr.ip() } - } -} - -/// SNAT (outbound) external IP allocated to an Omicron-managed zone. -/// -/// This is a slimmer `nexus_db_model::ExternalIp` that only stores the fields -/// necessary for blueprint planning, and requires that the zone have a single -/// IP. -#[derive( - Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, -)] -pub struct OmicronZoneExternalSnatIp { - pub id: ExternalIpUuid, - pub snat_cfg: SourceNatConfig, -} - -/// Network interface allocated to an Omicron-managed zone. -/// -/// This is a slimmer `nexus_db_model::ServiceNetworkInterface` that only stores -/// the fields necessary for blueprint planning. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct OmicronZoneNic { - pub id: Uuid, - pub mac: MacAddr, - pub ip: IpAddr, - pub slot: u8, - pub primary: bool, -} - /// Filters that apply to sleds. /// /// This logic lives here rather than within the individual components making @@ -426,37 +449,6 @@ pub struct Policy { pub target_nexus_zone_count: usize, } -/// Policy and database inputs to the Reconfigurator planner -/// -/// The primary inputs to the planner are the parent (either a parent blueprint -/// or an inventory collection) and this structure. This type holds the -/// fleet-wide policy as well as any additional information fetched from CRDB -/// that the planner needs to make decisions. -/// -/// -/// The current policy is pretty limited. It's aimed primarily at supporting -/// the add/remove sled use case. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct PlanningInput { - /// fleet-wide policy - policy: Policy, - - /// current internal DNS version - internal_dns_version: Generation, - - /// current external DNS version - external_dns_version: Generation, - - /// per-sled policy and resources - sleds: BTreeMap, - - /// external IPs allocated to Omicron zones - omicron_zone_external_ips: BTreeMap, - - /// vNICs allocated to Omicron zones - omicron_zone_nics: BTreeMap, -} - #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SledDetails { /// current sled policy @@ -467,82 +459,14 @@ pub struct SledDetails { pub resources: SledResources, } -impl PlanningInput { - pub fn internal_dns_version(&self) -> Generation { - self.internal_dns_version - } - - pub fn external_dns_version(&self) -> Generation { - self.external_dns_version - } - - pub fn target_nexus_zone_count(&self) -> usize { - self.policy.target_nexus_zone_count - } - - pub fn service_ip_pool_ranges(&self) -> &[IpRange] { - &self.policy.service_ip_pool_ranges - } - - pub fn all_sleds( - &self, - filter: SledFilter, - ) -> impl Iterator + '_ { - self.sleds.iter().filter_map(move |(&sled_id, details)| { - filter - .matches_policy_and_state(details.policy, details.state) - .then_some((sled_id, details)) - }) - } - - pub fn all_sled_ids( - &self, - filter: SledFilter, - ) -> impl Iterator + '_ { - self.all_sleds(filter).map(|(sled_id, _)| sled_id) - } - - pub fn all_sled_resources( - &self, - filter: SledFilter, - ) -> impl Iterator + '_ { - self.all_sleds(filter) - .map(|(sled_id, details)| (sled_id, &details.resources)) - } - - pub fn sled_policy(&self, sled_id: &SledUuid) -> Option { - self.sleds.get(sled_id).map(|details| details.policy) - } - - pub fn sled_resources(&self, sled_id: &SledUuid) -> Option<&SledResources> { - self.sleds.get(sled_id).map(|details| &details.resources) - } - - // Convert this `PlanningInput` back into a [`PlanningInputBuilder`] - // - // This is primarily useful for tests that want to mutate an existing - // `PlanningInput`. - pub fn into_builder(self) -> PlanningInputBuilder { - PlanningInputBuilder { - policy: self.policy, - internal_dns_version: self.internal_dns_version, - external_dns_version: self.external_dns_version, - sleds: self.sleds, - omicron_zone_external_ips: self.omicron_zone_external_ips, - omicron_zone_nics: self.omicron_zone_nics, - } - } -} - #[derive(Debug, thiserror::Error)] pub enum PlanningInputBuildError { #[error("duplicate sled ID: {0}")] DuplicateSledId(SledUuid), - #[error("Omicron zone {zone_id} already has an external IP ({ip:?})")] - DuplicateOmicronZoneExternalIp { - zone_id: OmicronZoneUuid, - ip: OmicronZoneExternalIp, - }, + #[error("Omicron zone {zone_id} has a range of IPs ({ip:?}), only a single IP is supported")] + NotSingleIp { zone_id: OmicronZoneUuid, ip: IpNetwork }, + #[error(transparent)] + AddNetworkResource(#[from] AddNetworkResourceError), #[error("Omicron zone {0} has an ephemeral IP (unsupported)")] EphemeralIpUnsupported(OmicronZoneUuid), #[error("Omicron zone {zone_id} has a bad SNAT config")] @@ -551,8 +475,6 @@ pub enum PlanningInputBuildError { #[source] err: SourceNatConfigError, }, - #[error("Omicron zone {zone_id} already has a NIC ({nic:?})")] - DuplicateOmicronZoneNic { zone_id: OmicronZoneUuid, nic: OmicronZoneNic }, } /// Constructor for [`PlanningInput`]. @@ -562,12 +484,12 @@ pub struct PlanningInputBuilder { internal_dns_version: Generation, external_dns_version: Generation, sleds: BTreeMap, - omicron_zone_external_ips: BTreeMap, - omicron_zone_nics: BTreeMap, + network_resources: OmicronZoneNetworkResources, } impl PlanningInputBuilder { - pub const fn empty_input() -> PlanningInput { + pub fn empty_input() -> PlanningInput { + // This empty input is known to be valid. PlanningInput { policy: Policy { service_ip_pool_ranges: Vec::new(), @@ -576,8 +498,7 @@ impl PlanningInputBuilder { internal_dns_version: Generation::new(), external_dns_version: Generation::new(), sleds: BTreeMap::new(), - omicron_zone_external_ips: BTreeMap::new(), - omicron_zone_nics: BTreeMap::new(), + network_resources: OmicronZoneNetworkResources::new(), } } @@ -591,8 +512,7 @@ impl PlanningInputBuilder { internal_dns_version, external_dns_version, sleds: BTreeMap::new(), - omicron_zone_external_ips: BTreeMap::new(), - omicron_zone_nics: BTreeMap::new(), + network_resources: OmicronZoneNetworkResources::new(), } } @@ -617,18 +537,7 @@ impl PlanningInputBuilder { zone_id: OmicronZoneUuid, ip: OmicronZoneExternalIp, ) -> Result<(), PlanningInputBuildError> { - match self.omicron_zone_external_ips.entry(zone_id) { - Entry::Vacant(slot) => { - slot.insert(ip); - Ok(()) - } - Entry::Occupied(prev) => { - Err(PlanningInputBuildError::DuplicateOmicronZoneExternalIp { - zone_id, - ip: *prev.get(), - }) - } - } + Ok(self.network_resources.add_external_ip(zone_id, ip)?) } pub fn add_omicron_zone_nic( @@ -636,18 +545,7 @@ impl PlanningInputBuilder { zone_id: OmicronZoneUuid, nic: OmicronZoneNic, ) -> Result<(), PlanningInputBuildError> { - match self.omicron_zone_nics.entry(zone_id) { - Entry::Vacant(slot) => { - slot.insert(nic); - Ok(()) - } - Entry::Occupied(prev) => { - Err(PlanningInputBuildError::DuplicateOmicronZoneNic { - zone_id, - nic: prev.get().clone(), - }) - } - } + Ok(self.network_resources.add_nic(zone_id, nic)?) } pub fn policy_mut(&mut self) -> &mut Policy { @@ -676,8 +574,7 @@ impl PlanningInputBuilder { internal_dns_version: self.internal_dns_version, external_dns_version: self.external_dns_version, sleds: self.sleds, - omicron_zone_external_ips: self.omicron_zone_external_ips, - omicron_zone_nics: self.omicron_zone_nics, + network_resources: self.network_resources, } } } diff --git a/nexus/types/src/deployment/tri_map.rs b/nexus/types/src/deployment/tri_map.rs new file mode 100644 index 00000000000..52b64aec43c --- /dev/null +++ b/nexus/types/src/deployment/tri_map.rs @@ -0,0 +1,511 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::{ + borrow::Borrow, + collections::{hash_map, BTreeSet, HashMap}, + fmt, + hash::Hash, +}; + +use derive_where::derive_where; +use serde::{Deserialize, Serialize, Serializer}; + +/// An append-only 1:1:1 (trijective) map for three keys and a value. +/// +/// The storage mechanism is a vector of entries, with indexes into that vector +/// stored in three hashmaps. This allows for efficient lookups by any of the +/// three keys, while preventing duplicates. +/// +/// Not totally generic yet, just meant for the deployment use case. +#[derive_where(Clone, Debug, Default)] +pub(crate) struct TriMap { + entries: Vec, + // Invariant: the value (usize) in these maps are valid indexes into + // `entries`, and are a 1:1 mapping. + k1_to_entry: HashMap, + k2_to_entry: HashMap, + k3_to_entry: HashMap, +} + +// Note: Eq and PartialEq are not implemented for TriMap. Implementing them +// would need to be done with care, because TriMap is not semantically like an +// IndexMap: two maps are equivalent even if their entries are in a different +// order. + +/// The `Serialize` impl for `TriMap` serializes just the list of entries. +impl Serialize for TriMap +where + T: Serialize, +{ + fn serialize( + &self, + serializer: S, + ) -> Result { + // Serialize just the entries -- don't serialize the indexes. We'll + // rebuild the indexes on deserialization. + self.entries.serialize(serializer) + } +} + +/// The `Deserialize` impl for `TriMap` deserializes the list of entries and +/// then rebuilds the indexes, producing an error if there are any duplicates. +impl<'de, T: TriMapEntry> Deserialize<'de> for TriMap +where + T: Deserialize<'de>, +{ + fn deserialize>( + deserializer: D, + ) -> Result { + // First, deserialize the entries. + let entries = Vec::::deserialize(deserializer)?; + + // Now build a map from scratch, inserting the entries sequentially. + // This will catch issues with duplicates. + let mut map = TriMap::new(); + for entry in entries { + map.insert_no_dups(entry).map_err(serde::de::Error::custom)?; + } + + Ok(map) + } +} + +pub(crate) trait TriMapEntry: Clone + fmt::Debug { + type K1: Eq + Hash + Clone + fmt::Debug; + type K2: Eq + Hash + Clone + fmt::Debug; + type K3: Eq + Hash + Clone + fmt::Debug; + + fn key1(&self) -> Self::K1; + fn key2(&self) -> Self::K2; + fn key3(&self) -> Self::K3; +} + +impl TriMap { + pub(crate) fn new() -> Self { + Self { + entries: Vec::new(), + k1_to_entry: HashMap::new(), + k2_to_entry: HashMap::new(), + k3_to_entry: HashMap::new(), + } + } + + /// Checks general invariants of the map. + /// + /// The code below always upholds these invariants, but it's useful to have + /// an explicit check for tests. + #[cfg(test)] + fn validate(&self) -> anyhow::Result<()> { + use anyhow::{ensure, Context}; + + // Check that all the maps are of the right size. + ensure!( + self.entries.len() == self.k1_to_entry.len(), + "key1 index has {} entries, but there are {} entries", + self.k1_to_entry.len(), + self.entries.len() + ); + ensure!( + self.entries.len() == self.k2_to_entry.len(), + "key2 index has {} entries, but there are {} entries", + self.k2_to_entry.len(), + self.entries.len() + ); + ensure!( + self.entries.len() == self.k3_to_entry.len(), + "key3 index has {} entries, but there are {} entries", + self.k3_to_entry.len(), + self.entries.len() + ); + + // Check that the indexes are all correct. + for (ix, entry) in self.entries.iter().enumerate() { + let key1 = entry.key1(); + let key2 = entry.key2(); + let key3 = entry.key3(); + + let ix1 = self.k1_to_entry.get(&key1).context(format!( + "entry at index {ix} ({entry:?}) has no key1 index" + ))?; + let ix2 = self.k2_to_entry.get(&key2).context(format!( + "entry at index {ix} ({entry:?}) has no key2 index" + ))?; + let ix3 = self.k3_to_entry.get(&key3).context(format!( + "entry at index {ix} ({entry:?}) has no key3 index" + ))?; + + if *ix1 != ix || *ix2 != ix || *ix3 != ix { + return Err(anyhow::anyhow!( + "entry at index {} has mismatched indexes: key1: {}, key2: {}, key3: {}", + ix, + ix1, + ix2, + ix3 + )); + } + } + + Ok(()) + } + + /// Inserts a value into the set, returning an error if any duplicates were + /// added. + pub(crate) fn insert_no_dups( + &mut self, + value: T, + ) -> Result<(), DuplicateEntry> { + let mut dups = BTreeSet::new(); + + // Check for duplicates *before* inserting the new entry, because we + // don't want to partially insert the new entry and then have to roll + // back. + let e1 = detect_dup_or_insert( + self.k1_to_entry.entry(value.key1()), + &mut dups, + ); + let e2 = detect_dup_or_insert( + self.k2_to_entry.entry(value.key2()), + &mut dups, + ); + let e3 = detect_dup_or_insert( + self.k3_to_entry.entry(value.key3()), + &mut dups, + ); + + if !dups.is_empty() { + return Err(DuplicateEntry { + new: value, + dups: dups.iter().map(|ix| self.entries[*ix].clone()).collect(), + }); + } + + let next_index = self.entries.len(); + self.entries.push(value); + // e1, e2 and e3 are all Some because if they were None, dups would be + // non-empty, and we'd have bailed out earlier. + e1.unwrap().insert(next_index); + e2.unwrap().insert(next_index); + e3.unwrap().insert(next_index); + + Ok(()) + } + + pub(crate) fn get1(&self, key1: &Q) -> Option<&T> + where + T::K1: Borrow, + Q: Eq + Hash + ?Sized, + { + self.k1_to_entry.get(key1).map(|ix| &self.entries[*ix]) + } + + pub(crate) fn get2(&self, key2: &Q) -> Option<&T> + where + T::K2: Borrow, + Q: Eq + Hash + ?Sized, + { + self.k2_to_entry.get(key2).map(|ix| &self.entries[*ix]) + } + + pub(crate) fn get3(&self, key3: &Q) -> Option<&T> + where + T::K3: Borrow, + Q: Eq + Hash + ?Sized, + { + self.k3_to_entry.get(key3).map(|ix| &self.entries[*ix]) + } +} + +fn detect_dup_or_insert<'a, K>( + entry: hash_map::Entry<'a, K, usize>, + dups: &mut BTreeSet, +) -> Option> { + match entry { + hash_map::Entry::Vacant(slot) => Some(slot), + hash_map::Entry::Occupied(slot) => { + dups.insert(*slot.get()); + None + } + } +} + +#[derive(Debug)] +pub struct DuplicateEntry { + new: T, + dups: Vec, +} + +impl fmt::Display for DuplicateEntry { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "duplicate entry: {:?} conflicts with existing: {:?}", + self.new, self.dups + ) + } +} + +impl std::error::Error for DuplicateEntry {} + +#[cfg(test)] +mod tests { + use super::*; + use proptest::prelude::*; + use test_strategy::{proptest, Arbitrary}; + + #[derive( + Clone, Debug, Eq, PartialEq, Arbitrary, Serialize, Deserialize, + )] + struct TestEntry { + key1: u8, + key2: char, + key3: String, + value: String, + } + + impl TriMapEntry for TestEntry { + // These types are chosen to represent various kinds of keys in the + // proptest below. + // + // We use u8 since there can only be 256 values, increasing the + // likelihood of collisions in the proptest below. + type K1 = u8; + // char is chosen because the Arbitrary impl for it is biased towards + // ASCII, increasing the likelihood of collisions. + type K2 = char; + // String is a generally open-ended type that probably won't have many + // collisions. + type K3 = String; + + fn key1(&self) -> Self::K1 { + self.key1 + } + + fn key2(&self) -> Self::K2 { + self.key2 + } + + fn key3(&self) -> Self::K3 { + self.key3.clone() + } + } + + #[test] + fn test_insert_entry_no_dups() { + let mut map = TriMap::::new(); + + // Add an element. + let v1 = TestEntry { + key1: 0, + key2: 'a', + key3: "x".to_string(), + value: "v".to_string(), + }; + map.insert_no_dups(v1.clone()).unwrap(); + + // Add an exact duplicate, which should error out. + let error = map.insert_no_dups(v1.clone()).unwrap_err(); + assert_eq!(&error.new, &v1); + assert_eq!(error.dups, vec![v1.clone()]); + + // Add a duplicate against just key1, which should error out. + let v2 = TestEntry { + key1: 0, + key2: 'b', + key3: "y".to_string(), + value: "v".to_string(), + }; + let error = map.insert_no_dups(v2.clone()).unwrap_err(); + assert_eq!(&error.new, &v2); + assert_eq!(error.dups, vec![v1.clone()]); + + // Add a duplicate against just key2, which should error out. + let v3 = TestEntry { + key1: 1, + key2: 'a', + key3: "y".to_string(), + value: "v".to_string(), + }; + let error = map.insert_no_dups(v3.clone()).unwrap_err(); + assert_eq!(&error.new, &v3); + + // Add a duplicate against just key3, which should error out. + let v4 = TestEntry { + key1: 1, + key2: 'b', + key3: "x".to_string(), + value: "v".to_string(), + }; + let error = map.insert_no_dups(v4.clone()).unwrap_err(); + assert_eq!(&error.new, &v4); + + // Add an entry that doesn't have any conflicts. + let v5 = TestEntry { + key1: 1, + key2: 'b', + key3: "y".to_string(), + value: "v".to_string(), + }; + map.insert_no_dups(v5.clone()).unwrap(); + } + + /// Represents a naive version of `TriMap` that doesn't have any indexes + /// and does linear scans. + #[derive(Debug)] + struct NaiveTriMap { + entries: Vec, + } + + impl NaiveTriMap { + fn new() -> Self { + Self { entries: Vec::new() } + } + + fn insert_entry_no_dups( + &mut self, + entry: TestEntry, + ) -> Result<(), DuplicateEntry> { + let dups = self + .entries + .iter() + .filter(|e| { + e.key1 == entry.key1 + || e.key2 == entry.key2 + || e.key3 == entry.key3 + }) + .cloned() + .collect::>(); + + if !dups.is_empty() { + return Err(DuplicateEntry { new: entry, dups }); + } + + self.entries.push(entry); + Ok(()) + } + } + + #[derive(Debug, Arbitrary)] + enum Operation { + // Make inserts a bit more common to try and fill up the map. + #[weight(3)] + Insert(TestEntry), + Get1(u8), + Get2(char), + Get3(String), + } + + #[proptest] + fn proptest_serialize_roundtrip(values: Vec) { + let mut map = TriMap::::new(); + let mut first_error = None; + for value in values.clone() { + // Ignore errors from duplicates which are quite possible to occur + // here, since we're just testing serialization. But store the + // first error to ensure that deserialization returns errors. + if let Err(error) = map.insert_no_dups(value) { + if first_error.is_none() { + first_error = Some(error); + } + } + } + + let serialized = serde_json::to_string(&map).unwrap(); + let deserialized: TriMap = + serde_json::from_str(&serialized).unwrap(); + + assert_eq!(map.entries, deserialized.entries, "entries match"); + // All of the indexes should be the same too. + assert_eq!( + map.k1_to_entry, deserialized.k1_to_entry, + "k1 indexes match" + ); + assert_eq!( + map.k2_to_entry, deserialized.k2_to_entry, + "k2 indexes match" + ); + assert_eq!( + map.k3_to_entry, deserialized.k3_to_entry, + "k3 indexes match" + ); + + // Try deserializing the full list of values directly, and see that the + // error reported is the same as first_error. + // + // Here we rely on the fact that a TriMap is serialized as just a + // vector. + let serialized = serde_json::to_string(&values).unwrap(); + let res: Result, _> = + serde_json::from_str(&serialized); + match (first_error, res) { + (None, Ok(_)) => {} // No error, should be fine + (Some(first_error), Ok(_)) => { + panic!( + "expected error ({first_error}), but deserialization succeeded" + ) + } + (None, Err(error)) => { + panic!("unexpected error: {error}, deserialization should have succeeded") + } + (Some(first_error), Err(error)) => { + // first_error is the error from the map, and error is the + // deserialization error (which should always be a custom + // error, stored as a string). + let expected = first_error.to_string(); + let actual = error.to_string(); + assert_eq!(actual, expected, "error matches"); + } + } + } + + #[proptest(cases = 16)] + fn proptest_ops( + #[strategy(prop::collection::vec(any::(), 0..1024))] + ops: Vec, + ) { + let mut map = TriMap::::new(); + let mut naive_map = NaiveTriMap::new(); + + // Now perform the operations on both maps. + for op in ops { + match op { + Operation::Insert(entry) => { + let map_res = map.insert_no_dups(entry.clone()); + let naive_res = + naive_map.insert_entry_no_dups(entry.clone()); + + assert_eq!(map_res.is_ok(), naive_res.is_ok()); + if let Err(map_err) = map_res { + let naive_err = naive_res.unwrap_err(); + assert_eq!(map_err.new, naive_err.new); + assert_eq!(map_err.dups, naive_err.dups); + } + + map.validate().expect("map should be valid"); + } + Operation::Get1(key1) => { + let map_res = map.get1(&key1); + let naive_res = + naive_map.entries.iter().find(|e| e.key1 == key1); + + assert_eq!(map_res, naive_res); + } + Operation::Get2(key2) => { + let map_res = map.get2(&key2); + let naive_res = + naive_map.entries.iter().find(|e| e.key2 == key2); + + assert_eq!(map_res, naive_res); + } + Operation::Get3(key3) => { + let map_res = map.get3(&key3); + let naive_res = + naive_map.entries.iter().find(|e| e.key3 == key3); + + assert_eq!(map_res, naive_res); + } + } + } + } +} diff --git a/nexus/types/src/deployment/zone_type.rs b/nexus/types/src/deployment/zone_type.rs index 035e0667bc0..9f663015cde 100644 --- a/nexus/types/src/deployment/zone_type.rs +++ b/nexus/types/src/deployment/zone_type.rs @@ -196,7 +196,7 @@ impl BlueprintZoneType { } pub mod blueprint_zone_type { - use crate::deployment::planning_input::OmicronZoneExternalFloatingAddr; + use crate::deployment::OmicronZoneExternalFloatingAddr; use crate::deployment::OmicronZoneExternalFloatingIp; use crate::deployment::OmicronZoneExternalSnatIp; use crate::inventory::OmicronZoneDataset; diff --git a/uuid-kinds/src/lib.rs b/uuid-kinds/src/lib.rs index 489e0da3659..2fc08972a69 100644 --- a/uuid-kinds/src/lib.rs +++ b/uuid-kinds/src/lib.rs @@ -62,5 +62,6 @@ impl_typed_uuid_kind! { Upstairs => "upstairs", UpstairsRepair => "upstairs_repair", UpstairsSession => "upstairs_session", + Vnic => "vnic", Zpool => "zpool", } From c8c7b41b4f3ba7111b60f65675b98c7a380f6421 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Tue, 14 May 2024 19:20:45 -0400 Subject: [PATCH 03/37] fix #5733 vs #5751 semantic merge conflict (#5767) same problem and fix as #5765 --- nexus/tests/integration_tests/rack.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/tests/integration_tests/rack.rs b/nexus/tests/integration_tests/rack.rs index 3e10ebcca47..c72c59b6f73 100644 --- a/nexus/tests/integration_tests/rack.rs +++ b/nexus/tests/integration_tests/rack.rs @@ -205,7 +205,7 @@ async fn test_sled_add(cptestctx: &ControlPlaneTestContext) { assert_eq!(sled_id, repeat_sled_id); // Now upsert the sled. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; nexus .datastore() .sled_upsert(SledUpdate::new( From 75661286020288b84c4cb138b21b1ff132c53590 Mon Sep 17 00:00:00 2001 From: Eliza Weisman Date: Tue, 14 May 2024 18:05:19 -0700 Subject: [PATCH 04/37] [nexus] deflake `test_instance_watcher_metrics` (#5768) Presently, `test_instance_watcher_metrics` will wait for the `instance_watcher` background task to have run before making assertions about metrics, but it does *not* ensure that oximeter has actually collected those metrics. This can result in flaky failures --- see #5752. This commit adds explicit calls to `oximeter.force_collect()` prior to making assertions, to ensure that the latest metrics have been collected. Fixes #5752 --- nexus/tests/integration_tests/metrics.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nexus/tests/integration_tests/metrics.rs b/nexus/tests/integration_tests/metrics.rs index 71d18f95eed..abcc7f1c75d 100644 --- a/nexus/tests/integration_tests/metrics.rs +++ b/nexus/tests/integration_tests/metrics.rs @@ -329,6 +329,7 @@ async fn test_instance_watcher_metrics( let client = &cptestctx.external_client; let internal_client = &cptestctx.internal_client; let nexus = &cptestctx.server.server_context().nexus; + let oximeter = &cptestctx.oximeter; // TODO(eliza): consider factoring this out to a generic // `activate_background_task` function in `nexus-test-utils` eventually? @@ -399,6 +400,8 @@ async fn test_instance_watcher_metrics( ) .await .unwrap(); + // Make sure that the latest metrics have been collected. + oximeter.force_collect().await; }; #[track_caller] @@ -443,11 +446,8 @@ async fn test_instance_watcher_metrics( let project = create_project_and_pool(&client).await; let project_name = project.identity.name.as_str(); // Wait until Nexus registers as a producer with Oximeter. - wait_for_producer( - &cptestctx.oximeter, - cptestctx.server.server_context().nexus.id(), - ) - .await; + wait_for_producer(&oximeter, cptestctx.server.server_context().nexus.id()) + .await; eprintln!("--- creating instance 1 ---"); let instance1 = create_instance(&client, project_name, "i-1").await; From 5ace1af5b4699f07c94dc92e420f8aab6bafb740 Mon Sep 17 00:00:00 2001 From: liffy <629075+lifning@users.noreply.github.com> Date: Tue, 14 May 2024 21:43:24 -0700 Subject: [PATCH 05/37] async-await-ifying DumpSetup (#5229) Replace the std::thread + Mutex originally used in DumpSetup with a tokio task and message-passing. (Verified cores and kernel dumps and logs all getting archived appropriately on a bench gimlet) --- Cargo.toml | 2 +- illumos-utils/src/dumpadm.rs | 26 ++- sled-agent/src/dump_setup.rs | 345 ++++++++++++++++++++--------------- 3 files changed, 210 insertions(+), 163 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 19d2924a7e4..83a41ff8343 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -491,7 +491,7 @@ wicket-common = { path = "wicket-common" } wicketd-client = { path = "clients/wicketd-client" } zeroize = { version = "1.7.0", features = ["zeroize_derive", "std"] } zip = { version = "0.6.6", default-features = false, features = ["deflate","bzip2"] } -zone = { version = "0.3", default-features = false, features = ["async", "sync"] } +zone = { version = "0.3", default-features = false, features = ["async"] } # newtype-uuid is set to default-features = false because we don't want to # depend on std in omicron-uuid-kinds (in case a no-std library wants to access diff --git a/illumos-utils/src/dumpadm.rs b/illumos-utils/src/dumpadm.rs index e37874f795d..5a8247041b6 100644 --- a/illumos-utils/src/dumpadm.rs +++ b/illumos-utils/src/dumpadm.rs @@ -1,11 +1,10 @@ use crate::{execute, ExecutionError}; -use byteorder::{LittleEndian, ReadBytesExt}; use camino::Utf8PathBuf; use std::ffi::OsString; -use std::fs::File; -use std::io::{Seek, SeekFrom}; use std::os::unix::ffi::OsStringExt; use std::process::Command; +use tokio::fs::File; +use tokio::io::{AsyncReadExt, AsyncSeekExt, SeekFrom}; pub const DUMPADM: &str = "/usr/sbin/dumpadm"; pub const SAVECORE: &str = "/usr/bin/savecore"; @@ -48,11 +47,11 @@ pub enum DumpHdrError { /// been a core written there at all, Err(DumpHdrError::InvalidVersion) if the /// dumphdr isn't the one we know how to handle (10), or other variants of /// DumpHdrError if there are I/O failures while reading the block device. -pub fn dump_flag_is_valid( +pub async fn dump_flag_is_valid( dump_slice: &Utf8PathBuf, ) -> Result { - let mut f = File::open(dump_slice).map_err(DumpHdrError::OpenRaw)?; - f.seek(SeekFrom::Start(DUMP_OFFSET)).map_err(DumpHdrError::Seek)?; + let mut f = File::open(dump_slice).await.map_err(DumpHdrError::OpenRaw)?; + f.seek(SeekFrom::Start(DUMP_OFFSET)).await.map_err(DumpHdrError::Seek)?; // read the first few fields of dumphdr. // typedef struct dumphdr { @@ -62,21 +61,18 @@ pub fn dump_flag_is_valid( // /* [...] */ // } - let magic = - f.read_u32::().map_err(DumpHdrError::ReadMagic)?; - if magic != DUMP_MAGIC { + let magic = f.read_u32().await.map_err(DumpHdrError::ReadMagic)?; + if magic != DUMP_MAGIC.to_be() { return Err(DumpHdrError::InvalidMagic(magic)); } - let version = - f.read_u32::().map_err(DumpHdrError::ReadVersion)?; - if version != DUMP_VERSION { + let version = f.read_u32().await.map_err(DumpHdrError::ReadVersion)?; + if version != DUMP_VERSION.to_be() { return Err(DumpHdrError::InvalidVersion(version)); } - let flags = - f.read_u32::().map_err(DumpHdrError::ReadFlags)?; - Ok((flags & DF_VALID) != 0) + let flags = f.read_u32().await.map_err(DumpHdrError::ReadFlags)?; + Ok((flags & DF_VALID.to_be()) != 0) } pub enum DumpContentType { diff --git a/sled-agent/src/dump_setup.rs b/sled-agent/src/dump_setup.rs index 4717f8b49ed..02d3d41dd7b 100644 --- a/sled-agent/src/dump_setup.rs +++ b/sled-agent/src/dump_setup.rs @@ -82,6 +82,7 @@ //! rotated log files having the same modified time to the second), the //! number is incremented by 1 until no conflict remains. +use async_trait::async_trait; use camino::Utf8PathBuf; use derive_more::{AsRef, From}; use illumos_utils::coreadm::{CoreAdm, CoreFileOption}; @@ -97,8 +98,8 @@ use slog::Logger; use std::collections::HashSet; use std::ffi::OsString; use std::path::{Path, PathBuf}; -use std::sync::{Arc, Weak}; use std::time::{Duration, SystemTime, SystemTimeError, UNIX_EPOCH}; +use tokio::sync::mpsc::Receiver; use zone::{Zone, ZoneError}; const ZFS_PROP_USED: &str = "used"; @@ -117,14 +118,14 @@ struct DebugDataset(Utf8PathBuf); #[derive(AsRef, Clone, Debug, Eq, From, Hash, Ord, PartialEq, PartialOrd)] struct CoreDataset(Utf8PathBuf); -#[derive(AsRef, Clone, From)] -struct CoreZpool { +#[derive(AsRef, Clone, Debug, From)] +pub(super) struct CoreZpool { mount_config: MountConfig, name: ZpoolName, } -#[derive(AsRef, Clone, From)] -struct DebugZpool { +#[derive(AsRef, Clone, Debug, From)] +pub(super) struct DebugZpool { mount_config: MountConfig, name: ZpoolName, } @@ -167,6 +168,16 @@ trait GetMountpoint: AsRef { } } } + +#[derive(Debug)] +enum DumpSetupCmd { + UpdateDumpdevSetup { + dump_slices: Vec, + debug_datasets: Vec, + core_datasets: Vec, + }, +} + struct DumpSetupWorker { core_dataset_names: Vec, debug_dataset_names: Vec, @@ -182,33 +193,33 @@ struct DumpSetupWorker { savecored_slices: HashSet, log: Logger, + rx: Receiver, coredumpadm_invoker: Box, zfs_invoker: Box, zone_invoker: Box, } pub struct DumpSetup { - worker: Arc>, + tx: tokio::sync::mpsc::Sender, mount_config: MountConfig, - _poller: std::thread::JoinHandle<()>, + _poller: tokio::task::JoinHandle<()>, log: Logger, } impl DumpSetup { pub fn new(log: &Logger, mount_config: MountConfig) -> Self { - let worker = Arc::new(std::sync::Mutex::new(DumpSetupWorker::new( + let (tx, rx) = tokio::sync::mpsc::channel(16); + let worker = DumpSetupWorker::new( Box::new(RealCoreDumpAdm {}), Box::new(RealZfs {}), Box::new(RealZone {}), log.new(o!("component" => "DumpSetup-worker")), - ))); - let worker_weak = Arc::downgrade(&worker); - let log_poll = log.new(o!("component" => "DumpSetup-archival")); - let _poller = std::thread::spawn(move || { - Self::poll_file_archival(worker_weak, log_poll) - }); + rx, + ); + let _poller = + tokio::spawn(async move { worker.poll_file_archival().await }); let log = log.new(o!("component" => "DumpSetup")); - Self { worker, mount_config, _poller, log } + Self { tx, mount_config, _poller, log } } pub(crate) async fn update_dumpdev_setup( @@ -268,55 +279,16 @@ impl DumpSetup { } } - let savecore_lock = self.worker.clone(); - let log_tmp = log.new(o!("component" => "DumpSetup-mutex")); - tokio::task::spawn_blocking(move || match savecore_lock.lock() { - Ok(mut guard) => { - guard.update_disk_loadout( - m2_dump_slices, - u2_debug_datasets, - m2_core_datasets, - ); - } - Err(err) => { - error!(log_tmp, "DumpSetup mutex poisoned: {err:?}"); - } - }); - } - - fn poll_file_archival( - worker: Weak>, - log: Logger, - ) { - info!(log, "DumpSetup poll loop started."); - loop { - if let Some(mutex) = worker.upgrade() { - match mutex.lock() { - Ok(mut guard) => { - guard.reevaluate_choices(); - if let Err(err) = guard.archive_files() { - error!( - log, - "Failed to archive debug/dump files: {err:?}" - ); - } - } - Err(err) => { - error!( - log, - "DumpSetup mutex poisoned in poll thread: {err:?}" - ); - break; - } - } - } else { - info!( - log, - "DumpSetup weak pointer dropped, leaving poll loop." - ); - break; - } - std::thread::sleep(ARCHIVAL_INTERVAL); + if let Err(err) = self + .tx + .send(DumpSetupCmd::UpdateDumpdevSetup { + dump_slices: m2_dump_slices, + debug_datasets: u2_debug_datasets, + core_datasets: m2_core_datasets, + }) + .await + { + error!(log, "DumpSetup channel closed: {:?}", err.0); } } } @@ -331,9 +303,10 @@ enum ZfsGetError { Parse(#[from] std::num::ParseIntError), } +#[async_trait] trait CoreDumpAdmInvoker { fn coreadm(&self, core_dir: &Utf8PathBuf) -> Result<(), ExecutionError>; - fn dumpadm( + async fn dumpadm( &self, dump_slice: &Utf8PathBuf, savecore_dir: Option<&Utf8PathBuf>, @@ -378,14 +351,16 @@ trait ZfsInvoker { ) -> Utf8PathBuf; } +#[async_trait] trait ZoneInvoker { - fn get_zones(&self) -> Result, ArchiveLogsError>; + async fn get_zones(&self) -> Result, ArchiveLogsError>; } struct RealCoreDumpAdm {} struct RealZfs {} struct RealZone {} +#[async_trait] impl CoreDumpAdmInvoker for RealCoreDumpAdm { fn coreadm(&self, core_dir: &Utf8PathBuf) -> Result<(), ExecutionError> { let mut cmd = CoreAdm::new(); @@ -414,7 +389,7 @@ impl CoreDumpAdmInvoker for RealCoreDumpAdm { // function also invokes `savecore(8)` to save it into that directory. // On success, returns Ok(Some(stdout)) if `savecore(8)` was invoked, or // Ok(None) if it wasn't. - fn dumpadm( + async fn dumpadm( &self, dump_slice: &Utf8PathBuf, savecore_dir: Option<&Utf8PathBuf>, @@ -427,7 +402,7 @@ impl CoreDumpAdmInvoker for RealCoreDumpAdm { // which is in the ramdisk pool), because dumpadm refuses to do what // we ask otherwise. let tmp_crash = "/tmp/crash"; - std::fs::create_dir_all(tmp_crash).map_err(|err| { + tokio::fs::create_dir_all(tmp_crash).await.map_err(|err| { ExecutionError::ExecutionStart { command: format!("mkdir {tmp_crash:?}"), err, @@ -457,7 +432,7 @@ impl CoreDumpAdmInvoker for RealCoreDumpAdm { if savecore_dir.is_some() { // and does the dump slice have one to save off if let Ok(true) = - illumos_utils::dumpadm::dump_flag_is_valid(dump_slice) + illumos_utils::dumpadm::dump_flag_is_valid(dump_slice).await { return illumos_utils::dumpadm::SaveCore.execute(); } @@ -490,9 +465,11 @@ impl ZfsInvoker for RealZfs { } } +#[async_trait] impl ZoneInvoker for RealZone { - fn get_zones(&self) -> Result, ArchiveLogsError> { - Ok(zone::Adm::list_blocking()? + async fn get_zones(&self) -> Result, ArchiveLogsError> { + Ok(zone::Adm::list() + .await? .into_iter() .filter(|z| z.global() || z.name().starts_with(ZONE_PREFIX)) .collect::>()) @@ -505,6 +482,7 @@ impl DumpSetupWorker { zfs_invoker: Box, zone_invoker: Box, log: Logger, + rx: Receiver, ) -> Self { Self { core_dataset_names: vec![], @@ -517,24 +495,67 @@ impl DumpSetupWorker { known_core_dirs: vec![], savecored_slices: Default::default(), log, + rx, coredumpadm_invoker, zfs_invoker, zone_invoker, } } + async fn poll_file_archival(mut self) { + info!(self.log, "DumpSetup poll loop started."); + loop { + match tokio::time::timeout(ARCHIVAL_INTERVAL, self.rx.recv()).await + { + Ok(Some(DumpSetupCmd::UpdateDumpdevSetup { + dump_slices, + debug_datasets, + core_datasets, + })) => { + self.update_disk_loadout( + dump_slices, + debug_datasets, + core_datasets, + ); + } + Ok(None) => { + warn!( + self.log, + "Control channel closed, no more dump archival!" + ); + break; + } + Err(_elapsed) => { + // no new disks, just pump cores/logs with what we've got + } + } + // regardless of whether we updated disks, + // at least every ARCHIVAL_INTERVAL, + // figure out if we should change our target volumes... + self.reevaluate_choices().await; + // and then do the actual archiving. + if let Err(err) = self.archive_files().await { + error!(self.log, "Failed to archive debug/dump files: {err:?}"); + } + } + } + fn update_disk_loadout( &mut self, dump_slices: Vec, debug_datasets: Vec, core_datasets: Vec, ) { + info!( + self.log, + "Updated view of disks"; + "core_datasets" => %core_datasets.len(), + "debug_datasets" => %debug_datasets.len(), + "dump_slices" => %dump_slices.len(), + ); self.core_dataset_names = core_datasets; self.debug_dataset_names = debug_datasets; - self.known_dump_slices = dump_slices; - - self.reevaluate_choices(); } // only allow mounted zfs datasets into 'known_*_dirs', @@ -554,7 +575,7 @@ impl DumpSetupWorker { .collect(); } - fn reevaluate_choices(&mut self) { + async fn reevaluate_choices(&mut self) { self.update_mounted_dirs(); self.known_dump_slices.sort(); @@ -609,7 +630,7 @@ impl DumpSetupWorker { self.chosen_debug_dir = None; } else { warn!(self.log, "All candidate debug/dump dirs are over usage threshold, removing older archived files"); - if let Err(err) = self.cleanup() { + if let Err(err) = self.cleanup().await { error!(self.log, "Couldn't clean up any debug/dump dirs, may hit dataset quota in {x:?}: {err:?}"); } else { self.chosen_debug_dir = None; @@ -665,7 +686,9 @@ impl DumpSetupWorker { // Let's try to see if it appears to have a kernel dump already match illumos_utils::dumpadm::dump_flag_is_valid( dump_slice.as_ref(), - ) { + ) + .await + { Ok(true) => { debug!(self.log, "Dump slice {dump_slice:?} appears to have a valid header; will attempt to savecore"); } @@ -676,7 +699,9 @@ impl DumpSetupWorker { debug!(self.log, "Dump slice {dump_slice:?} appears to be unused: {err:?}"); } } - if let Ok(saved) = self.dumpadm_and_savecore(&dump_slice) { + if let Ok(saved) = + self.dumpadm_and_savecore(&dump_slice).await + { if let Some(out) = saved { info!(self.log, "Previous dump on slice {dump_slice:?} saved, configured slice as target for new dumps. {out:?}"); } @@ -691,13 +716,16 @@ impl DumpSetupWorker { for dump_slice in &self.known_dump_slices { match illumos_utils::dumpadm::dump_flag_is_valid( dump_slice.as_ref(), - ) { + ) + .await + { Ok(false) => { // Have dumpadm write the config for crash dumps to be // on this slice, at least, until a U.2 comes along. match self .coredumpadm_invoker .dumpadm(dump_slice.as_ref(), None) + .await { Ok(_) => { info!(self.log, "Using dump device {dump_slice:?} with no savecore destination (no U.2 debug zvol yet)"); @@ -731,7 +759,7 @@ impl DumpSetupWorker { changed_slice = true; // temporarily changes the system's dump slice so savecore(8) // can update the header in the slice when it finishes... - match self.dumpadm_and_savecore(&dump_slice) { + match self.dumpadm_and_savecore(&dump_slice).await { Ok(saved) => { if let Some(stdout) = &saved { info!( @@ -759,6 +787,7 @@ impl DumpSetupWorker { if let Err(err) = self .coredumpadm_invoker .dumpadm(dump_slice.as_ref(), None) + .await { error!(self.log, "Could not restore dump slice to {dump_slice:?}: {err:?}"); } @@ -767,7 +796,7 @@ impl DumpSetupWorker { } } - fn archive_files(&self) -> std::io::Result<()> { + async fn archive_files(&self) -> tokio::io::Result<()> { if let Some(debug_dir) = &self.chosen_debug_dir { if self.known_core_dirs.is_empty() { info!(self.log, "No core dump locations yet known."); @@ -780,6 +809,7 @@ impl DumpSetupWorker { if let Err(err) = Self::copy_sync_and_remove(&entry.path(), &dest) + .await { error!( self.log, @@ -804,7 +834,7 @@ impl DumpSetupWorker { ); } - if let Err(err) = self.archive_logs() { + if let Err(err) = self.archive_logs().await { if !matches!(err, ArchiveLogsError::NoDebugDirYet) { error!( self.log, @@ -816,32 +846,32 @@ impl DumpSetupWorker { Ok(()) } - fn copy_sync_and_remove( + async fn copy_sync_and_remove( source: impl AsRef, dest: impl AsRef, - ) -> std::io::Result<()> { + ) -> tokio::io::Result<()> { let source = source.as_ref(); let dest = dest.as_ref(); - let mut dest_f = std::fs::File::create(&dest)?; - let mut src_f = std::fs::File::open(&source)?; + let mut dest_f = tokio::fs::File::create(&dest).await?; + let mut src_f = tokio::fs::File::open(&source).await?; - std::io::copy(&mut src_f, &mut dest_f)?; + tokio::io::copy(&mut src_f, &mut dest_f).await?; - dest_f.sync_all()?; + dest_f.sync_all().await?; drop(src_f); drop(dest_f); - std::fs::remove_file(source)?; + tokio::fs::remove_file(source).await?; Ok(()) } - fn archive_logs(&self) -> Result<(), ArchiveLogsError> { + async fn archive_logs(&self) -> Result<(), ArchiveLogsError> { let debug_dir = self .chosen_debug_dir .as_ref() .ok_or(ArchiveLogsError::NoDebugDirYet)?; - let oxz_zones = self.zone_invoker.get_zones()?; + let oxz_zones = self.zone_invoker.get_zones().await?; for zone in oxz_zones { let logdir = if zone.global() { PathBuf::from("/var/svc/log") @@ -849,12 +879,12 @@ impl DumpSetupWorker { zone.path().join("root/var/svc/log") }; let zone_name = zone.name(); - self.archive_logs_inner(debug_dir, logdir, zone_name)?; + self.archive_logs_inner(debug_dir, logdir, zone_name).await?; } Ok(()) } - fn archive_logs_inner( + async fn archive_logs_inner( &self, debug_dir: &DebugDataset, logdir: PathBuf, @@ -873,7 +903,7 @@ impl DumpSetupWorker { } let dest_dir = debug_dir.as_ref().join(zone_name).into_std_path_buf(); if !rotated_log_files.is_empty() { - std::fs::create_dir_all(&dest_dir)?; + tokio::fs::create_dir_all(&dest_dir).await?; let count = rotated_log_files.len(); info!( self.log, @@ -903,7 +933,7 @@ impl DumpSetupWorker { break; } } - if let Err(err) = Self::copy_sync_and_remove(&entry, dest) { + if let Err(err) = Self::copy_sync_and_remove(&entry, dest).await { warn!(self.log, "Failed to archive {entry:?}: {err:?}"); } } @@ -919,7 +949,7 @@ impl DumpSetupWorker { // for savecore to behave the way we want (i.e. clear the flag // after succeeding), we could hypothetically miss a dump if // the kernel crashes again while savecore is still running. - fn dumpadm_and_savecore( + async fn dumpadm_and_savecore( &mut self, dump_slice: &DumpSlicePath, ) -> Result, ExecutionError> { @@ -931,6 +961,7 @@ impl DumpSetupWorker { match self .coredumpadm_invoker .dumpadm(dump_slice.as_ref(), Some(&savecore_dir)) + .await { Ok(saved) => { self.savecored_slices.insert(dump_slice.clone()); @@ -940,10 +971,10 @@ impl DumpSetupWorker { } } - fn cleanup(&self) -> Result<(), CleanupError> { + async fn cleanup(&self) -> Result<(), CleanupError> { let mut dir_info = Vec::new(); for dir in &self.known_debug_dirs { - match self.scope_dir_for_cleanup(dir) { + match self.scope_dir_for_cleanup(dir).await { Ok(info) => { dir_info.push((info, dir)); } @@ -967,7 +998,7 @@ impl DumpSetupWorker { // the i/o error *may* be an issue with the underlying disk, so // we continue to the dataset with the next-oldest average age // of files-to-delete in the sorted list. - if let Err(err) = std::fs::remove_file(&path) { + if let Err(err) = tokio::fs::remove_file(&path).await { error!(self.log, "Couldn't delete {path:?} from debug dataset, skipping {dir:?}. {err:?}"); continue 'outer; } @@ -980,7 +1011,7 @@ impl DumpSetupWorker { Ok(()) } - fn scope_dir_for_cleanup( + async fn scope_dir_for_cleanup( &self, debug_dir: &DebugDataset, ) -> Result { @@ -999,7 +1030,7 @@ impl DumpSetupWorker { for path in glob::glob(debug_dir.as_ref().join("**/*").as_str())?.flatten() { - let meta = std::fs::metadata(&path)?; + let meta = tokio::fs::metadata(&path).await?; // we need this to be a Duration rather than SystemTime so we can // do math to it later. let time = meta.modified()?.duration_since(UNIX_EPOCH)?; @@ -1033,7 +1064,7 @@ impl DumpSetupWorker { #[derive(thiserror::Error, Debug)] pub enum ArchiveLogsError { #[error("I/O error: {0}")] - IoError(#[from] std::io::Error), + IoError(#[from] tokio::io::Error), #[error("Error calling zoneadm: {0}")] Zoneadm(#[from] ZoneError), #[error("Non-UTF8 zone path for zone {0}")] @@ -1053,7 +1084,7 @@ enum CleanupError { #[error("Failed to query ZFS properties: {0}")] ZfsError(#[from] ZfsGetError), #[error("I/O error: {0}")] - IoError(#[from] std::io::Error), + IoError(#[from] tokio::io::Error), #[error("Glob pattern invalid: {0}")] Glob(#[from] glob::PatternError), #[error("A file's observed modified time was before the Unix epoch: {0}")] @@ -1075,9 +1106,9 @@ mod tests { }; use sled_storage::dataset::{CRASH_DATASET, DUMP_DATASET}; use std::collections::HashMap; - use std::io::Write; use std::str::FromStr; use tempfile::TempDir; + use tokio::io::AsyncWriteExt; impl Clone for ZfsGetError { fn clone(&self) -> Self { @@ -1103,6 +1134,7 @@ mod tests { pub zones: Vec, } + #[async_trait] impl CoreDumpAdmInvoker for FakeCoreDumpAdm { fn coreadm( &self, @@ -1111,7 +1143,7 @@ mod tests { Ok(()) } - fn dumpadm( + async fn dumpadm( &self, _dump_slice: &Utf8PathBuf, _savecore_dir: Option<&Utf8PathBuf>, @@ -1168,14 +1200,15 @@ mod tests { .join(mountpoint) } } + #[async_trait] impl ZoneInvoker for FakeZone { - fn get_zones(&self) -> Result, ArchiveLogsError> { + async fn get_zones(&self) -> Result, ArchiveLogsError> { Ok(self.zones.clone()) } } - #[test] - fn test_does_not_configure_coreadm_when_no_crash_dataset_mounted() { + #[tokio::test] + async fn test_does_not_configure_coreadm_when_no_crash_dataset_mounted() { let logctx = omicron_test_utils::dev::test_setup_log( "test_does_not_configure_coreadm_when_no_crash_dataset_mounted", ); @@ -1193,10 +1226,12 @@ mod tests { }), Box::::default(), logctx.log.clone(), + tokio::sync::mpsc::channel(1).1, ); // nothing when no disks worker.update_disk_loadout(vec![], vec![], vec![]); + worker.reevaluate_choices().await; assert_eq!(worker.chosen_core_dir, None); // nothing when only a disk that's not ready @@ -1205,12 +1240,13 @@ mod tests { name: ZpoolName::from_str(NOT_MOUNTED_INTERNAL).unwrap(), }; worker.update_disk_loadout(vec![], vec![], vec![non_mounted_zpool]); + worker.reevaluate_choices().await; assert_eq!(worker.chosen_core_dir, None); logctx.cleanup_successful(); } - #[test] - fn test_configures_coreadm_only_when_crash_dataset_mounted() { + #[tokio::test] + async fn test_configures_coreadm_only_when_crash_dataset_mounted() { let logctx = omicron_test_utils::dev::test_setup_log( "test_configures_coreadm_only_when_crash_dataset_mounted", ); @@ -1266,6 +1302,7 @@ mod tests { }), Box::::default(), logctx.log.clone(), + tokio::sync::mpsc::channel(1).1, ); // something when there's one that's ready! @@ -1274,8 +1311,9 @@ mod tests { vec![], vec![non_mounted_zpool.clone(), mounted_zpool], ); + worker.reevaluate_choices().await; assert_eq!( - worker.chosen_core_dir.as_ref().unwrap().0, + worker.chosen_core_dir.as_ref().expect("core dir wasn't chosen").0, Utf8PathBuf::from(ZPOOL_MNT).join(CRASH_DATASET) ); @@ -1285,34 +1323,35 @@ mod tests { vec![], vec![non_mounted_zpool, err_zpool], ); + worker.reevaluate_choices().await; assert_eq!(worker.chosen_core_dir, None); logctx.cleanup_successful(); } // we make these so illumos_utils::dumpadm::dump_flag_is_valid returns what we want - fn populate_tempdir_with_fake_dumps( + async fn populate_tempdir_with_fake_dumps( tempdir: &TempDir, ) -> (DumpSlicePath, DumpSlicePath) { let occupied = DumpSlicePath( Utf8PathBuf::from_path_buf(tempdir.path().join("occupied.bin")) .unwrap(), ); - let mut f = std::fs::File::create(occupied.as_ref()).unwrap(); - f.write_all(&[0u8; DUMP_OFFSET as usize]).unwrap(); - f.write_all(&DUMP_MAGIC.to_le_bytes()).unwrap(); - f.write_all(&DUMP_VERSION.to_le_bytes()).unwrap(); - f.write_all(&DF_VALID.to_le_bytes()).unwrap(); + let mut f = tokio::fs::File::create(occupied.as_ref()).await.unwrap(); + f.write_all(&[0u8; DUMP_OFFSET as usize]).await.unwrap(); + f.write_all(&DUMP_MAGIC.to_le_bytes()).await.unwrap(); + f.write_all(&DUMP_VERSION.to_le_bytes()).await.unwrap(); + f.write_all(&DF_VALID.to_le_bytes()).await.unwrap(); drop(f); let vacant = DumpSlicePath( Utf8PathBuf::from_path_buf(tempdir.path().join("vacant.bin")) .unwrap(), ); - let mut f = std::fs::File::create(vacant.as_ref()).unwrap(); - f.write_all(&[0u8; DUMP_OFFSET as usize]).unwrap(); - f.write_all(&DUMP_MAGIC.to_le_bytes()).unwrap(); - f.write_all(&DUMP_VERSION.to_le_bytes()).unwrap(); - f.write_all(&0u32.to_le_bytes()).unwrap(); + let mut f = tokio::fs::File::create(vacant.as_ref()).await.unwrap(); + f.write_all(&[0u8; DUMP_OFFSET as usize]).await.unwrap(); + f.write_all(&DUMP_MAGIC.to_le_bytes()).await.unwrap(); + f.write_all(&DUMP_VERSION.to_le_bytes()).await.unwrap(); + f.write_all(&0u32.to_le_bytes()).await.unwrap(); drop(f); (occupied, vacant) @@ -1320,8 +1359,8 @@ mod tests { // if we only have two filled dump slices and nowhere to evacuate them, // don't configure a dump slice at all. - #[test] - fn test_savecore_and_dumpadm_not_called_when_occupied_and_no_dir() { + #[tokio::test] + async fn test_savecore_and_dumpadm_not_called_when_occupied_and_no_dir() { let logctx = omicron_test_utils::dev::test_setup_log( "test_savecore_and_dumpadm_not_called_when_occupied_and_no_dir", ); @@ -1330,15 +1369,17 @@ mod tests { Box::::default(), Box::::default(), logctx.log.clone(), + tokio::sync::mpsc::channel(1).1, ); let tempdir = TempDir::new().unwrap(); - let (occupied, _) = populate_tempdir_with_fake_dumps(&tempdir); + let (occupied, _) = populate_tempdir_with_fake_dumps(&tempdir).await; worker.update_disk_loadout( vec![occupied.clone(), occupied], vec![], vec![], ); + worker.reevaluate_choices().await; assert!(worker.chosen_dump_slice.is_none()); logctx.cleanup_successful(); } @@ -1346,8 +1387,8 @@ mod tests { // if we have one dump slice that's free and one that's full, // and nowhere to savecore the full one, // we should always call dumpadm with the free one. - #[test] - fn test_dumpadm_called_when_vacant_slice_but_no_dir() { + #[tokio::test] + async fn test_dumpadm_called_when_vacant_slice_but_no_dir() { let logctx = omicron_test_utils::dev::test_setup_log( "test_dumpadm_called_when_vacant_slice_but_no_dir", ); @@ -1356,14 +1397,17 @@ mod tests { Box::::default(), Box::::default(), logctx.log.clone(), + tokio::sync::mpsc::channel(1).1, ); let tempdir = TempDir::new().unwrap(); - let (occupied, vacant) = populate_tempdir_with_fake_dumps(&tempdir); + let (occupied, vacant) = + populate_tempdir_with_fake_dumps(&tempdir).await; worker.update_disk_loadout( vec![occupied, vacant.clone()], vec![], vec![], ); + worker.reevaluate_choices().await; assert_eq!(worker.chosen_dump_slice.as_ref(), Some(&vacant)); logctx.cleanup_successful(); } @@ -1371,8 +1415,8 @@ mod tests { // if we have two occupied dump slices, // but we also have somewhere to unload them, // call dumpadm and savecore. - #[test] - fn test_savecore_and_dumpadm_invoked_when_slices_occupied_and_dir_is_available( + #[tokio::test] + async fn test_savecore_and_dumpadm_invoked_when_slices_occupied_and_dir_is_available( ) { let logctx = omicron_test_utils::dev::test_setup_log("test_savecore_and_dumpadm_invoked_when_slices_occupied_and_dir_is_available"); const MOUNTED_EXTERNAL: &str = @@ -1395,9 +1439,10 @@ mod tests { }), Box::::default(), logctx.log.clone(), + tokio::sync::mpsc::channel(1).1, ); let tempdir = TempDir::new().unwrap(); - let (occupied, _) = populate_tempdir_with_fake_dumps(&tempdir); + let (occupied, _) = populate_tempdir_with_fake_dumps(&tempdir).await; let mounted_zpool = DebugZpool { mount_config: MountConfig::default(), @@ -1408,6 +1453,8 @@ mod tests { vec![mounted_zpool], vec![], ); + worker.reevaluate_choices().await; + assert_eq!(worker.chosen_dump_slice.as_ref(), Some(&occupied)); assert_eq!( worker.chosen_debug_dir.unwrap().0, @@ -1416,8 +1463,8 @@ mod tests { logctx.cleanup_successful(); } - #[test] - fn test_archives_rotated_logs_and_cores() { + #[tokio::test] + async fn test_archives_rotated_logs_and_cores() { let logctx = omicron_test_utils::dev::test_setup_log( "test_archives_rotated_logs_and_cores", ); @@ -1465,24 +1512,27 @@ mod tests { }), Box::new(FakeZone { zones: vec![zone.clone()] }), logctx.log.clone(), + tokio::sync::mpsc::channel(1).1, ); - std::fs::create_dir_all(&core_dir).unwrap(); - std::fs::create_dir_all(&debug_dir).unwrap(); - std::fs::create_dir_all(&zone_logs).unwrap(); + tokio::fs::create_dir_all(&core_dir).await.unwrap(); + tokio::fs::create_dir_all(&debug_dir).await.unwrap(); + tokio::fs::create_dir_all(&zone_logs).await.unwrap(); const LOG_NAME: &'static str = "foo.log.0"; - writeln!( - std::fs::File::create(zone_logs.join(LOG_NAME)).unwrap(), - "hello" - ) - .unwrap(); + tokio::fs::File::create(zone_logs.join(LOG_NAME)) + .await + .expect("creating fake log") + .write_all(b"hello") + .await + .expect("writing fake log"); const CORE_NAME: &str = "core.myzone.myexe.123.1690540950"; - writeln!( - std::fs::File::create(core_dir.join(CORE_NAME)).unwrap(), - "crunch" - ) - .unwrap(); + tokio::fs::File::create(core_dir.join(CORE_NAME)) + .await + .expect("creating fake core") + .write_all(b"crunch") + .await + .expect("writing fake core"); let mounted_core_zpool = CoreZpool { mount_config: MountConfig::default(), @@ -1498,7 +1548,8 @@ mod tests { vec![mounted_debug_zpool], vec![mounted_core_zpool], ); - worker.archive_files().unwrap(); + worker.reevaluate_choices().await; + worker.archive_files().await.unwrap(); // it'll be renamed to use an epoch timestamp instead of .0 let log_glob = From 59636c9b1ea7496c7cb79d83c7d5a79081135487 Mon Sep 17 00:00:00 2001 From: iliana etaoin Date: Tue, 14 May 2024 21:55:48 -0700 Subject: [PATCH 06/37] rearrange buildomat jobs; rewrite releng process in rust and aggressively parallelize (#5744) (Note: documentation says `cargo xtask releng` but I am going to wire that up in a follow-up PR; the current equivalent is `cargo run --release --bin omicron-releng`.) Prior to this change we have five main "release engineering" Buildomat jobs that do operations beyond running the test suite: - a **package** job which runs omicron-package in various configurations, - a **build OS images** job which builds the host and trampoline images, - a **TUF repo** job which builds the final TUF repo *(this is the build artifact we actually want)*, - a **deploy** job which uses the single-sled packages to test that a VM boots to SSH *(this is a test we actually want)*, - and a **CI tools** job which builds common tools used by multiple jobs. This looks like: ```mermaid graph LR package --> host-image["build OS images"] package --> deploy package --> tuf-repo["TUF repo"] host-image --> tuf-repo ci-tools["CI tools"] --> deploy ci-tools --> tuf-repo ``` (There are also the currently-disabled a4x2 jobs but those are independent of this particular graph.) I think the initial idea behind this was to reuse build artifacts where possible, but this is pretty complicated and adds a lot more output upload/download overhead than expected, which slows down the time to get the end artifact we actually want. This PR changes the graph to: ```mermaid graph LR package --> deploy tuf-repo["TUF repo"] ``` And the **TUF repo** job primarily runs a new **releng** binary, which runs all of the steps required to download and build all the components of the TUF repo in a single task, using a terrible job runner I wrote. The primary goal here was to reduce the time from pushing a commit to getting a TUF repo out the other end; this drops time-to-TUF-repo from ~80 minutes to ~45. In the process this also made it much easier to build a TUF repo (and iterate on that process) locally: just run `cargo xtask releng` (TODO: soon). It also deleted a lot of Bash. One thing to note is that, in service of the mission to get time-to-TUF-repo down as much as possible, that job _only_ uploads the TUF repo (and some logs). I also put all of the outputs for the **package** job into a single tarball for the **deploy** job to unpack. There are no longer separate uploads for the OS images and each zone; these can be extracted from the repo as we normally do. --- .github/buildomat/jobs/ci-tools.sh | 77 --- .github/buildomat/jobs/deploy.sh | 12 +- .github/buildomat/jobs/host-image.sh | 93 ---- .github/buildomat/jobs/package.sh | 115 +--- .github/buildomat/jobs/tuf-repo.sh | 138 ++--- Cargo.lock | 46 +- Cargo.toml | 5 +- caboose-util/Cargo.toml | 13 - caboose-util/src/main.rs | 32 -- dev-tools/releng/Cargo.toml | 35 ++ dev-tools/releng/src/cmd.rs | 167 ++++++ dev-tools/releng/src/hubris.rs | 148 +++++ dev-tools/releng/src/job.rs | 305 ++++++++++ dev-tools/releng/src/main.rs | 734 +++++++++++++++++++++++++ dev-tools/releng/src/tuf.rs | 149 +++++ dev-tools/xtask/Cargo.toml | 2 +- docs/releng.adoc | 81 +++ package-manifest.toml | 2 +- package/src/bin/omicron-package.rs | 25 +- package/src/lib.rs | 5 + tools/build-host-image.sh | 111 ---- tools/hubris_checksums | 8 - tools/hubris_version | 1 - tools/permslip_commit | 1 - tufaceous-lib/src/assemble/manifest.rs | 13 +- workspace-hack/Cargo.toml | 10 +- 26 files changed, 1754 insertions(+), 574 deletions(-) delete mode 100755 .github/buildomat/jobs/ci-tools.sh delete mode 100755 .github/buildomat/jobs/host-image.sh delete mode 100644 caboose-util/Cargo.toml delete mode 100644 caboose-util/src/main.rs create mode 100644 dev-tools/releng/Cargo.toml create mode 100644 dev-tools/releng/src/cmd.rs create mode 100644 dev-tools/releng/src/hubris.rs create mode 100644 dev-tools/releng/src/job.rs create mode 100644 dev-tools/releng/src/main.rs create mode 100644 dev-tools/releng/src/tuf.rs create mode 100644 docs/releng.adoc delete mode 100755 tools/build-host-image.sh delete mode 100644 tools/hubris_checksums delete mode 100644 tools/hubris_version delete mode 100644 tools/permslip_commit diff --git a/.github/buildomat/jobs/ci-tools.sh b/.github/buildomat/jobs/ci-tools.sh deleted file mode 100755 index 4c58731e249..00000000000 --- a/.github/buildomat/jobs/ci-tools.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/bash -#: -#: name = "helios / CI tools" -#: variety = "basic" -#: target = "helios-2.0" -#: rust_toolchain = "1.72.1" -#: output_rules = [ -#: "=/work/end-to-end-tests/*.gz", -#: "=/work/caboose-util.gz", -#: "=/work/tufaceous.gz", -#: "=/work/commtest", -#: "=/work/permslip.gz", -#: ] -#: access_repos = [ -#: "oxidecomputer/permission-slip", -#: "oxidecomputer/sshauth" -#: ] - -set -o errexit -set -o pipefail -set -o xtrace - -cargo --version -rustc --version - -ptime -m ./tools/install_builder_prerequisites.sh -yp - -########## end-to-end-tests ########## - -banner end-to-end-tests - -# -# Reduce debuginfo just to line tables. -# -export CARGO_PROFILE_DEV_DEBUG=1 -export CARGO_PROFILE_TEST_DEBUG=1 -export CARGO_INCREMENTAL=0 - -ptime -m cargo build --locked -p end-to-end-tests --tests --bin bootstrap \ - --message-format json-render-diagnostics >/tmp/output.end-to-end.json - -mkdir -p /work -ptime -m cargo build --locked -p end-to-end-tests --tests --bin commtest -cp target/debug/commtest /work/commtest - -mkdir -p /work/end-to-end-tests -for p in target/debug/bootstrap $(/opt/ooce/bin/jq -r 'select(.profile.test) | .executable' /tmp/output.end-to-end.json); do - # shellcheck disable=SC2094 - ptime -m gzip < "$p" > /work/end-to-end-tests/"$(basename "$p").gz" -done - -########## caboose-util ########## - -banner caboose-util - -ptime -m cargo build --locked -p caboose-util --release -ptime -m gzip < target/release/caboose-util > /work/caboose-util.gz - -########## tufaceous ########## - -banner tufaceous - -ptime -m cargo build --locked -p tufaceous --release -ptime -m gzip < target/release/tufaceous > /work/tufaceous.gz - -########## permission-slip ########## - -banner permission-slip - -source "./tools/permslip_commit" -git init /work/permission-slip-build -pushd /work/permission-slip-build -git remote add origin https://github.com/oxidecomputer/permission-slip.git -ptime -m git fetch --depth 1 origin "$COMMIT" -git checkout FETCH_HEAD -ptime -m cargo build --locked -p permission-slip-client --release -ptime -m gzip < target/release/permslip > /work/permslip.gz diff --git a/.github/buildomat/jobs/deploy.sh b/.github/buildomat/jobs/deploy.sh index 8d3e94cd5ec..c947a05e10a 100755 --- a/.github/buildomat/jobs/deploy.sh +++ b/.github/buildomat/jobs/deploy.sh @@ -20,8 +20,6 @@ #: [dependencies.package] #: job = "helios / package" #: -#: [dependencies.ci-tools] -#: job = "helios / CI tools" set -o errexit set -o pipefail @@ -144,13 +142,6 @@ pfexec chown build:build /opt/oxide/work cd /opt/oxide/work ptime -m tar xvzf /input/package/work/package.tar.gz -cp /input/package/work/zones/* out/ -mv out/nexus-single-sled.tar.gz out/nexus.tar.gz -mkdir tests -for p in /input/ci-tools/work/end-to-end-tests/*.gz; do - ptime -m gunzip < "$p" > "tests/$(basename "${p%.gz}")" - chmod a+x "tests/$(basename "${p%.gz}")" -done # Ask buildomat for the range of extra addresses that we're allowed to use, and # break them up into the ranges we need. @@ -354,7 +345,7 @@ echo "Waited for nexus: ${retry}s" export RUST_BACKTRACE=1 export E2E_TLS_CERT IPPOOL_START IPPOOL_END -eval "$(./tests/bootstrap)" +eval "$(./target/debug/bootstrap)" export OXIDE_HOST OXIDE_TOKEN # @@ -387,7 +378,6 @@ done /usr/oxide/oxide --resolve "$OXIDE_RESOLVE" --cacert "$E2E_TLS_CERT" \ image promote --project images --image debian11 -rm ./tests/bootstrap for test_bin in tests/*; do ./"$test_bin" done diff --git a/.github/buildomat/jobs/host-image.sh b/.github/buildomat/jobs/host-image.sh deleted file mode 100755 index 2f4d146a488..00000000000 --- a/.github/buildomat/jobs/host-image.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/bin/bash -#: -#: name = "helios / build OS images" -#: variety = "basic" -#: target = "helios-2.0" -#: rust_toolchain = "1.72.1" -#: output_rules = [ -#: "=/work/helios/upload/os-host.tar.gz", -#: "=/work/helios/upload/os-trampoline.tar.gz", -#: ] -#: access_repos = [ -#: "oxidecomputer/amd-apcb", -#: "oxidecomputer/amd-efs", -#: "oxidecomputer/amd-firmware", -#: "oxidecomputer/amd-flash", -#: "oxidecomputer/amd-host-image-builder", -#: "oxidecomputer/boot-image-tools", -#: "oxidecomputer/chelsio-t6-roms", -#: "oxidecomputer/compliance-pilot", -#: "oxidecomputer/facade", -#: "oxidecomputer/helios", -#: "oxidecomputer/helios-omicron-brand", -#: "oxidecomputer/helios-omnios-build", -#: "oxidecomputer/helios-omnios-extra", -#: "oxidecomputer/nanobl-rs", -#: ] -#: -#: [dependencies.package] -#: job = "helios / package" -#: -#: [[publish]] -#: series = "image" -#: name = "os.tar.gz" -#: from_output = "/work/helios/image/output/os.tar.gz" -#: - -set -o errexit -set -o pipefail -set -o xtrace - -cargo --version -rustc --version - -TOP=$PWD - -source "$TOP/tools/include/force-git-over-https.sh" - -# Check out helios into /work/helios -HELIOSDIR=/work/helios -git clone https://github.com/oxidecomputer/helios.git "$HELIOSDIR" -cd "$HELIOSDIR" -# Record the branch and commit in the output -git status --branch --porcelain=2 -# Setting BUILD_OS to no makes setup skip repositories we don't need for -# building the OS itself (we are just building an image from already built OS). -BUILD_OS=no gmake setup - -# Commands that "helios-build" would ask us to run (either explicitly or -# implicitly, to avoid an error). -rc=0 -pfexec pkg install -q /system/zones/brand/omicron1/tools || rc=$? -case $rc in - # `man pkg` notes that exit code 4 means no changes were made because - # there is nothing to do; that's fine. Any other exit code is an error. - 0 | 4) ;; - *) exit $rc ;; -esac - -pfexec zfs create -p "rpool/images/$USER" - - -# TODO: Consider importing zones here too? - -cd "$TOP" -OUTPUTDIR="$HELIOSDIR/upload" -mkdir "$OUTPUTDIR" - -banner OS -./tools/build-host-image.sh -B \ - -S /input/package/work/zones/switch-asic.tar.gz \ - "$HELIOSDIR" \ - /input/package/work/global-zone-packages.tar.gz - -mv "$HELIOSDIR/image/output/os.tar.gz" "$OUTPUTDIR/os-host.tar.gz" - -banner Trampoline - -./tools/build-host-image.sh -R \ - "$HELIOSDIR" \ - /input/package/work/trampoline-global-zone-packages.tar.gz - -mv "$HELIOSDIR/image/output/os.tar.gz" "$OUTPUTDIR/os-trampoline.tar.gz" - diff --git a/.github/buildomat/jobs/package.sh b/.github/buildomat/jobs/package.sh index 11a5a1a0eef..63e5e1ce716 100755 --- a/.github/buildomat/jobs/package.sh +++ b/.github/buildomat/jobs/package.sh @@ -3,24 +3,11 @@ #: name = "helios / package" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.72.1" +#: rust_toolchain = "1.77.2" #: output_rules = [ -#: "=/work/version.txt", #: "=/work/package.tar.gz", -#: "=/work/global-zone-packages.tar.gz", -#: "=/work/trampoline-global-zone-packages.tar.gz", -#: "=/work/zones/*.tar.gz", #: ] #: -#: [[publish]] -#: series = "image" -#: name = "global-zone-packages" -#: from_output = "/work/global-zone-packages.tar.gz" -#: -#: [[publish]] -#: series = "image" -#: name = "trampoline-global-zone-packages" -#: from_output = "/work/trampoline-global-zone-packages.tar.gz" set -o errexit set -o pipefail @@ -32,17 +19,6 @@ rustc --version WORK=/work pfexec mkdir -p $WORK && pfexec chown $USER $WORK -# -# Generate the version for control plane artifacts here. We use `0.git` as the -# prerelease field because it comes before `alpha`. -# -# In this job, we stamp the version into packages installed in the host and -# trampoline global zone images. -# -COMMIT=$(git rev-parse HEAD) -VERSION="8.0.0-0.ci+git${COMMIT:0:11}" -echo "$VERSION" >/work/version.txt - ptime -m ./tools/install_builder_prerequisites.sh -yp ptime -m ./tools/ci_download_softnpu_machinery @@ -52,88 +28,33 @@ ptime -m cargo run --locked --release --bin omicron-package -- \ -t test target create -i standard -m non-gimlet -s softnpu -r single-sled ptime -m cargo run --locked --release --bin omicron-package -- \ -t test package +mapfile -t packages \ + < <(cargo run --locked --release --bin omicron-package -- -t test list-outputs) # Build the xtask binary used by the deploy job ptime -m cargo build --locked --release -p xtask -# Assemble some utilities into a tarball that can be used by deployment -# phases of buildomat. +# Build the end-to-end tests +# Reduce debuginfo just to line tables. +export CARGO_PROFILE_DEV_DEBUG=line-tables-only +export CARGO_PROFILE_TEST_DEBUG=line-tables-only +ptime -m cargo build --locked -p end-to-end-tests --tests --bin bootstrap \ + --message-format json-render-diagnostics >/tmp/output.end-to-end.json +mkdir tests +/opt/ooce/bin/jq -r 'select(.profile.test) | .executable' /tmp/output.end-to-end.json \ + | xargs -I {} -t cp {} tests/ + +# Assemble these outputs and some utilities into a tarball that can be used by +# deployment phases of buildomat. files=( - out/*.tar out/target/test out/npuzone/* package-manifest.toml smf/sled-agent/non-gimlet/config.toml target/release/omicron-package target/release/xtask + target/debug/bootstrap + tests/* ) - -ptime -m tar cvzf $WORK/package.tar.gz "${files[@]}" - -tarball_src_dir="$(pwd)/out/versioned" -stamp_packages() { - for package in "$@"; do - cargo run --locked --release --bin omicron-package -- stamp "$package" "$VERSION" - done -} - -# Keep the single-sled Nexus zone around for the deploy job. (The global zone -# build below overwrites the file.) -mv out/nexus.tar.gz out/nexus-single-sled.tar.gz - -# Build necessary for the global zone -ptime -m cargo run --locked --release --bin omicron-package -- \ - -t host target create -i standard -m gimlet -s asic -r multi-sled -ptime -m cargo run --locked --release --bin omicron-package -- \ - -t host package -stamp_packages omicron-sled-agent mg-ddm-gz propolis-server overlay oxlog pumpkind-gz - -# Create global zone package @ $WORK/global-zone-packages.tar.gz -ptime -m ./tools/build-global-zone-packages.sh "$tarball_src_dir" $WORK - -# Non-Global Zones - -# Assemble Zone Images into their respective output locations. -# -# Zones that are included into another are intentionally omitted from this list -# (e.g., the switch zone tarballs contain several other zone tarballs: dendrite, -# mg-ddm, etc.). -# -# Note that when building for a real gimlet, `propolis-server` and `switch-*` -# should be included in the OS ramdisk. -mkdir -p $WORK/zones -zones=( - out/clickhouse.tar.gz - out/clickhouse_keeper.tar.gz - out/cockroachdb.tar.gz - out/crucible-pantry-zone.tar.gz - out/crucible-zone.tar.gz - out/external-dns.tar.gz - out/internal-dns.tar.gz - out/nexus.tar.gz - out/nexus-single-sled.tar.gz - out/oximeter.tar.gz - out/propolis-server.tar.gz - out/switch-*.tar.gz - out/ntp.tar.gz - out/omicron-gateway-softnpu.tar.gz - out/omicron-gateway-asic.tar.gz - out/overlay.tar.gz - out/probe.tar.gz -) -cp "${zones[@]}" $WORK/zones/ - -# -# Global Zone files for Trampoline image -# - -# Build necessary for the trampoline image -ptime -m cargo run --locked --release --bin omicron-package -- \ - -t recovery target create -i trampoline -ptime -m cargo run --locked --release --bin omicron-package -- \ - -t recovery package -stamp_packages installinator mg-ddm-gz - -# Create trampoline global zone package @ $WORK/trampoline-global-zone-packages.tar.gz -ptime -m ./tools/build-trampoline-global-zone-packages.sh "$tarball_src_dir" $WORK +ptime -m tar cvzf $WORK/package.tar.gz "${files[@]}" "${packages[@]}" diff --git a/.github/buildomat/jobs/tuf-repo.sh b/.github/buildomat/jobs/tuf-repo.sh index 89928a0030a..2ed1ae08c34 100755 --- a/.github/buildomat/jobs/tuf-repo.sh +++ b/.github/buildomat/jobs/tuf-repo.sh @@ -3,20 +3,29 @@ #: name = "helios / build TUF repo" #: variety = "basic" #: target = "helios-2.0" +#: rust_toolchain = "1.77.2" #: output_rules = [ -#: "=/work/manifest*.toml", -#: "=/work/repo-*.zip", -#: "=/work/repo-*.zip.sha256.txt", +#: "=/work/manifest.toml", +#: "=/work/repo.zip", +#: "=/work/repo.zip.sha256.txt", +#: "%/work/*.log", +#: ] +#: access_repos = [ +#: "oxidecomputer/amd-apcb", +#: "oxidecomputer/amd-efs", +#: "oxidecomputer/amd-firmware", +#: "oxidecomputer/amd-flash", +#: "oxidecomputer/amd-host-image-builder", +#: "oxidecomputer/boot-image-tools", +#: "oxidecomputer/chelsio-t6-roms", +#: "oxidecomputer/compliance-pilot", +#: "oxidecomputer/facade", +#: "oxidecomputer/helios", +#: "oxidecomputer/helios-omicron-brand", +#: "oxidecomputer/helios-omnios-build", +#: "oxidecomputer/helios-omnios-extra", +#: "oxidecomputer/nanobl-rs", #: ] -#: -#: [dependencies.ci-tools] -#: job = "helios / CI tools" -#: -#: [dependencies.package] -#: job = "helios / package" -#: -#: [dependencies.host] -#: job = "helios / build OS images" #: #: [[publish]] #: series = "rot-all" @@ -26,105 +35,34 @@ #: [[publish]] #: series = "rot-all" #: name = "repo.zip" -#: from_output = "/work/repo-rot-all.zip" +#: from_output = "/work/repo.zip" #: #: [[publish]] #: series = "rot-all" #: name = "repo.zip.sha256.txt" -#: from_output = "/work/repo-rot-all.zip.sha256.txt" +#: from_output = "/work/repo.zip.sha256.txt" #: set -o errexit set -o pipefail set -o xtrace -TOP=$PWD -VERSION=$(< /input/package/work/version.txt) - -for bin in caboose-util tufaceous permslip; do - ptime -m gunzip < /input/ci-tools/work/$bin.gz > /work/$bin - chmod a+x /work/$bin -done - -# -# We do two things here: -# 1. Run `omicron-package stamp` on all the zones. -# 2. Run `omicron-package unpack` to switch from "package-name.tar.gz" to "service_name.tar.gz". -# -mkdir /work/package -pushd /work/package -tar xf /input/package/work/package.tar.gz out package-manifest.toml target/release/omicron-package -target/release/omicron-package -t default target create -i standard -m gimlet -s asic -r multi-sled -ln -s /input/package/work/zones/* out/ -rm out/switch-softnpu.tar.gz # not used when target switch=asic -rm out/omicron-gateway-softnpu.tar.gz # not used when target switch=asic -rm out/nexus-single-sled.tar.gz # only used for deploy tests -for zone in out/*.tar.gz; do - target/release/omicron-package stamp "$(basename "${zone%.tar.gz}")" "$VERSION" -done -mv out/versioned/* out/ -OMICRON_NO_UNINSTALL=1 target/release/omicron-package unpack --out install -popd - -# Generate a throwaway repository key. -python3 -c 'import secrets; open("/work/key.txt", "w").write("ed25519:%s\n" % secrets.token_hex(32))' -read -r TUFACEOUS_KEY /work/manifest.toml <>/work/manifest.toml <>/work/manifest.toml <> /work/manifest.toml - done < $TOP/tools/permslip_$name - popd -} +rc=0 +pfexec pkg install -q /system/zones/brand/omicron1/tools || rc=$? +case $rc in + # `man pkg` notes that exit code 4 means no changes were made because + # there is nothing to do; that's fine. Any other exit code is an error. + 0 | 4) ;; + *) exit $rc ;; +esac -mkdir /work/hubris -pushd /work/hubris -download_region_manifests https://permslip-staging.corp.oxide.computer staging -download_region_manifests https://signer-us-west.corp.oxide.computer production -popd +pfexec zfs create -p "rpool/images/$USER/host" +pfexec zfs create -p "rpool/images/$USER/recovery" -/work/tufaceous assemble --no-generate-key /work/manifest.toml /work/repo-rot-all.zip -digest -a sha256 /work/repo-rot-all.zip > /work/repo-rot-all.zip.sha256.txt +cargo run --release --bin omicron-releng -- --output-dir /work diff --git a/Cargo.lock b/Cargo.lock index e1e445cc3c4..f7cded308b3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -785,15 +785,6 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "caboose-util" -version = "0.1.0" -dependencies = [ - "anyhow", - "hubtools", - "omicron-workspace-hack", -] - [[package]] name = "camino" version = "1.1.6" @@ -2547,6 +2538,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88a41f105fe1d5b6b34b2055e3dc59bb79b46b48b2040b9e6c7b4b5de097aa41" dependencies = [ "autocfg", + "tokio", ] [[package]] @@ -5592,6 +5584,37 @@ dependencies = [ "thiserror", ] +[[package]] +name = "omicron-releng" +version = "0.1.0" +dependencies = [ + "anyhow", + "camino", + "camino-tempfile", + "cargo_metadata", + "chrono", + "clap", + "fs-err", + "futures", + "hex", + "omicron-common", + "omicron-workspace-hack", + "omicron-zone-package", + "once_cell", + "reqwest", + "semver 1.0.22", + "serde", + "sha2", + "shell-words", + "slog", + "slog-async", + "slog-term", + "tar", + "tokio", + "toml 0.8.12", + "tufaceous-lib", +] + [[package]] name = "omicron-rpaths" version = "0.1.0" @@ -5778,6 +5801,7 @@ dependencies = [ "elliptic-curve", "ff", "flate2", + "fs-err", "futures", "futures-channel", "futures-core", @@ -5815,11 +5839,8 @@ dependencies = [ "pem-rfc7468", "petgraph", "postgres-types", - "ppv-lite86", "predicates", "proc-macro2", - "rand 0.8.5", - "rand_chacha 0.3.1", "regex", "regex-automata 0.4.5", "regex-syntax 0.8.2", @@ -5859,7 +5880,6 @@ dependencies = [ "yasna", "zerocopy 0.7.32", "zeroize", - "zip", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 83a41ff8343..891565f8573 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,6 @@ members = [ "api_identity", "bootstore", - "caboose-util", "certificates", "clients/bootstrap-agent-client", "clients/ddm-admin-client", @@ -21,6 +20,7 @@ members = [ "dev-tools/omicron-dev", "dev-tools/oxlog", "dev-tools/reconfigurator-cli", + "dev-tools/releng", "dev-tools/xtask", "dns-server", "end-to-end-tests", @@ -84,7 +84,6 @@ members = [ default-members = [ "bootstore", - "caboose-util", "certificates", "clients/bootstrap-agent-client", "clients/ddm-admin-client", @@ -103,6 +102,7 @@ default-members = [ "dev-tools/omicron-dev", "dev-tools/oxlog", "dev-tools/reconfigurator-cli", + "dev-tools/releng", # Do not include xtask in the list of default members, because this causes # hakari to not work as well and build times to be longer. # See omicron#4392. @@ -228,6 +228,7 @@ bytes = "1.6.0" camino = { version = "1.1", features = ["serde1"] } camino-tempfile = "1.1.1" cancel-safe-futures = "0.1.5" +cargo_metadata = "0.18.1" chacha20poly1305 = "0.10.1" ciborium = "0.2.2" cfg-if = "1.0" diff --git a/caboose-util/Cargo.toml b/caboose-util/Cargo.toml deleted file mode 100644 index ceff70b41db..00000000000 --- a/caboose-util/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -name = "caboose-util" -version = "0.1.0" -edition = "2021" -license = "MPL-2.0" - -[lints] -workspace = true - -[dependencies] -anyhow.workspace = true -hubtools.workspace = true -omicron-workspace-hack.workspace = true diff --git a/caboose-util/src/main.rs b/caboose-util/src/main.rs deleted file mode 100644 index 36851cd36d4..00000000000 --- a/caboose-util/src/main.rs +++ /dev/null @@ -1,32 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -// Copyright 2023 Oxide Computer Company - -use anyhow::{bail, Context, Result}; -use hubtools::{Caboose, RawHubrisArchive}; - -fn main() -> Result<()> { - let mut args = std::env::args().skip(1); - match args.next().context("subcommand required")?.as_str() { - "read-board" => { - let caboose = read_caboose(args.next())?; - println!("{}", std::str::from_utf8(caboose.board()?)?); - Ok(()) - } - "read-version" => { - let caboose = read_caboose(args.next())?; - println!("{}", std::str::from_utf8(caboose.version()?)?); - Ok(()) - } - unknown => bail!("unknown command {}", unknown), - } -} - -fn read_caboose(path: Option) -> Result { - let archive = RawHubrisArchive::load( - &path.context("path to hubris archive required")?, - )?; - Ok(archive.read_caboose()?) -} diff --git a/dev-tools/releng/Cargo.toml b/dev-tools/releng/Cargo.toml new file mode 100644 index 00000000000..19ede6c24db --- /dev/null +++ b/dev-tools/releng/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "omicron-releng" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[dependencies] +anyhow.workspace = true +camino.workspace = true +camino-tempfile.workspace = true +cargo_metadata.workspace = true +chrono.workspace = true +clap.workspace = true +fs-err = { workspace = true, features = ["tokio"] } +futures.workspace = true +hex.workspace = true +omicron-common.workspace = true +omicron-workspace-hack.workspace = true +omicron-zone-package.workspace = true +once_cell.workspace = true +reqwest.workspace = true +semver.workspace = true +serde.workspace = true +sha2.workspace = true +shell-words.workspace = true +slog.workspace = true +slog-async.workspace = true +slog-term.workspace = true +tar.workspace = true +tokio = { workspace = true, features = ["full"] } +toml.workspace = true +tufaceous-lib.workspace = true + +[lints] +workspace = true diff --git a/dev-tools/releng/src/cmd.rs b/dev-tools/releng/src/cmd.rs new file mode 100644 index 00000000000..198eabf99ff --- /dev/null +++ b/dev-tools/releng/src/cmd.rs @@ -0,0 +1,167 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::ffi::OsStr; +use std::path::Path; +use std::process::ExitStatus; +use std::process::Output; +use std::process::Stdio; +use std::time::Instant; + +use anyhow::ensure; +use anyhow::Context; +use anyhow::Result; +use slog::debug; +use slog::Logger; + +/// Wrapper for `tokio::process::Command` where the builder methods take/return +/// `self`, plus a number of convenience methods. +pub(crate) struct Command { + inner: tokio::process::Command, +} + +impl Command { + pub(crate) fn new(program: impl AsRef) -> Command { + Command { inner: tokio::process::Command::new(program) } + } + + pub(crate) fn arg(mut self, arg: impl AsRef) -> Command { + self.inner.arg(arg); + self + } + + pub(crate) fn args( + mut self, + args: impl IntoIterator>, + ) -> Command { + self.inner.args(args); + self + } + + pub(crate) fn current_dir(mut self, dir: impl AsRef) -> Command { + self.inner.current_dir(dir); + self + } + + pub(crate) fn env( + mut self, + key: impl AsRef, + value: impl AsRef, + ) -> Command { + self.inner.env(key, value); + self + } + + pub(crate) fn env_remove(mut self, key: impl AsRef) -> Command { + self.inner.env_remove(key); + self + } + + pub(crate) async fn is_success(mut self, logger: &Logger) -> Result { + self.inner + .stdin(Stdio::null()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()); + Ok(xtrace(&mut self, logger).await?.status.success()) + } + + pub(crate) async fn ensure_success( + mut self, + logger: &Logger, + ) -> Result<()> { + self.inner + .stdin(Stdio::null()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()); + let status = xtrace(&mut self, logger).await?.status; + check_status(self, status) + } + + pub(crate) async fn ensure_stdout( + mut self, + logger: &Logger, + ) -> Result { + self.inner + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::inherit()); + let output = xtrace(&mut self, logger).await?; + check_status(self, output.status)?; + String::from_utf8(output.stdout).context("command stdout was not UTF-8") + } + + pub(crate) fn into_parts(self) -> (Description, tokio::process::Command) { + (Description { str: self.to_string() }, self.inner) + } +} + +impl std::fmt::Display for Command { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let command = self.inner.as_std(); + for (name, value) in command.get_envs() { + if let Some(value) = value { + write!( + f, + "{}={} ", + shell_words::quote(&name.to_string_lossy()), + shell_words::quote(&value.to_string_lossy()) + )?; + } + } + write!( + f, + "{}", + shell_words::quote(&command.get_program().to_string_lossy()) + )?; + for arg in command.get_args() { + write!(f, " {}", shell_words::quote(&arg.to_string_lossy()))?; + } + Ok(()) + } +} + +/// Returned from [`Command::into_parts`] for use in the `job` module. +pub(crate) struct Description { + str: String, +} + +impl Description { + pub(crate) fn check_status(&self, status: ExitStatus) -> Result<()> { + check_status(self, status) + } +} + +impl std::fmt::Display for Description { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.str) + } +} + +fn check_status( + command: impl std::fmt::Display, + status: ExitStatus, +) -> Result<()> { + ensure!(status.success(), "command `{}` exited with {}", command, status); + Ok(()) +} + +async fn xtrace(command: &mut Command, logger: &Logger) -> Result { + command.inner.stdin(Stdio::null()).kill_on_drop(true); + debug!(logger, "running: {}", command); + let start = Instant::now(); + let output = command + .inner + .spawn() + .with_context(|| format!("failed to exec `{}`", command))? + .wait_with_output() + .await + .with_context(|| format!("failed to wait on `{}`", command))?; + debug!( + logger, + "process exited with {} ({:?})", + output.status, + Instant::now().saturating_duration_since(start) + ); + Ok(output) +} diff --git a/dev-tools/releng/src/hubris.rs b/dev-tools/releng/src/hubris.rs new file mode 100644 index 00000000000..685a729a9f2 --- /dev/null +++ b/dev-tools/releng/src/hubris.rs @@ -0,0 +1,148 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::collections::BTreeMap; +use std::collections::HashMap; + +use anyhow::Context; +use anyhow::Result; +use camino::Utf8PathBuf; +use fs_err::tokio as fs; +use futures::future::TryFutureExt; +use omicron_common::api::external::SemverVersion; +use omicron_common::api::internal::nexus::KnownArtifactKind; +use semver::Version; +use serde::Deserialize; +use tufaceous_lib::assemble::DeserializedArtifactData; +use tufaceous_lib::assemble::DeserializedArtifactSource; +use tufaceous_lib::assemble::DeserializedFileArtifactSource; +use tufaceous_lib::assemble::DeserializedManifest; + +pub(crate) async fn fetch_hubris_artifacts( + base_url: &'static str, + client: reqwest::Client, + manifest_list: Utf8PathBuf, + output_dir: Utf8PathBuf, +) -> Result<()> { + macro_rules! zip { + ($expr:expr) => { + output_dir.join(format!("{}.zip", $expr)) + }; + } + + fs::create_dir_all(&output_dir).await?; + + // This could be parallelized with FuturesUnordered but in practice this + // takes less time than OS builds. + + let mut manifest = DeserializedManifest { + system_version: SemverVersion(Version::new(0, 0, 0)), + artifacts: BTreeMap::new(), + }; + + for line in fs::read_to_string(manifest_list).await?.lines() { + if let Some(hash) = line.split_whitespace().next() { + let data = fetch_hash(base_url, &client, hash).await?; + let str = String::from_utf8(data).with_context(|| { + format!("hubris artifact manifest {} was not UTF-8", hash) + })?; + let hash_manifest: Manifest = + toml::from_str(&str).with_context(|| { + format!( + "failed to deserialize hubris artifact manifest {}", + hash + ) + })?; + for (kind, artifacts) in hash_manifest.artifacts { + for artifact in artifacts { + let (source, hashes) = match artifact.source { + Source::File(file) => ( + DeserializedArtifactSource::File { + path: zip!(file.hash), + }, + vec![file.hash], + ), + Source::CompositeRot { archive_a, archive_b } => ( + DeserializedArtifactSource::CompositeRot { + archive_a: + DeserializedFileArtifactSource::File { + path: zip!(archive_a.hash), + }, + archive_b: + DeserializedFileArtifactSource::File { + path: zip!(archive_b.hash), + }, + }, + vec![archive_a.hash, archive_b.hash], + ), + }; + manifest.artifacts.entry(kind).or_default().push( + DeserializedArtifactData { + name: artifact.name, + version: artifact.version, + source, + }, + ); + for hash in hashes { + let data = fetch_hash(base_url, &client, &hash).await?; + fs::write(output_dir.join(zip!(hash)), data).await?; + } + } + } + } + } + + fs::write( + output_dir.join("manifest.toml"), + toml::to_string_pretty(&manifest)?.into_bytes(), + ) + .await?; + Ok(()) +} + +async fn fetch_hash( + base_url: &'static str, + client: &reqwest::Client, + hash: &str, +) -> Result> { + client + .get(format!("{}/artifact/{}", base_url, hash)) + .send() + .and_then(|response| response.json()) + .await + .with_context(|| { + format!( + "failed to fetch hubris artifact {} from {}", + hash, base_url + ) + }) +} + +// These structs are similar to `DeserializeManifest` and friends from +// tufaceous-lib, except that the source is a hash instead of a file path. This +// hash is used to download the artifact from Permission Slip. +#[derive(Deserialize)] +struct Manifest { + #[serde(rename = "artifact")] + artifacts: HashMap>, +} + +#[derive(Deserialize)] +struct Artifact { + name: String, + version: SemverVersion, + source: Source, +} + +#[derive(Deserialize)] +#[serde(tag = "kind", rename_all = "kebab-case")] +enum Source { + File(FileSource), + CompositeRot { archive_a: FileSource, archive_b: FileSource }, +} + +#[derive(Deserialize)] +struct FileSource { + hash: String, +} diff --git a/dev-tools/releng/src/job.rs b/dev-tools/releng/src/job.rs new file mode 100644 index 00000000000..dcb58a0b920 --- /dev/null +++ b/dev-tools/releng/src/job.rs @@ -0,0 +1,305 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! A quick-and-dirty job runner. +//! +//! Jobs are async functions given a name. All jobs must be described before the +//! jobs can be run (`Jobs::run_all` consumes the job runner). Jobs can depend +//! on other jobs, which is implemented via `tokio::sync::oneshot` channels; a +//! completed job sends a message to all registered receivers, which are waiting +//! on the messages in order to run. This essentially creates a DAG, except +//! instead of us having to keep track of it, we make it Tokio's problem. +//! +//! A `tokio::sync::Semaphore` is used to restrict the number of jobs to +//! `std::thread::available_parallelism`, except for a hardcoded list of +//! prioritized job names that are allowed to ignore this. + +use std::collections::HashMap; +use std::future::Future; +use std::process::Stdio; +use std::sync::Arc; +use std::time::Instant; + +use anyhow::anyhow; +use anyhow::Context; +use anyhow::Result; +use camino::Utf8Path; +use camino::Utf8PathBuf; +use fs_err::tokio::File; +use futures::future::BoxFuture; +use futures::future::FutureExt; +use futures::stream::FuturesUnordered; +use futures::stream::TryStreamExt; +use slog::info; +use slog::Logger; +use tokio::io::AsyncBufReadExt; +use tokio::io::AsyncRead; +use tokio::io::AsyncWrite; +use tokio::io::AsyncWriteExt; +use tokio::io::BufReader; +use tokio::sync::oneshot; +use tokio::sync::oneshot::error::RecvError; +use tokio::sync::Semaphore; + +use crate::cmd::Command; + +// We want these two jobs to run without delay because they take the longest +// amount of time, so we allow them to run without taking a permit first. +const PERMIT_NOT_REQUIRED: [&str; 2] = ["host-package", "host-image"]; + +pub(crate) struct Jobs { + logger: Logger, + permits: Arc, + log_dir: Utf8PathBuf, + map: HashMap, +} + +struct Job { + future: BoxFuture<'static, Result<()>>, + wait_for: Vec>, + notify: Vec>, +} + +pub(crate) struct Selector<'a> { + jobs: &'a mut Jobs, + name: String, +} + +impl Jobs { + pub(crate) fn new( + logger: &Logger, + permits: Arc, + log_dir: &Utf8Path, + ) -> Jobs { + Jobs { + logger: logger.clone(), + permits, + log_dir: log_dir.to_owned(), + map: HashMap::new(), + } + } + + pub(crate) fn push( + &mut self, + name: impl AsRef, + future: impl Future> + Send + 'static, + ) -> Selector<'_> { + let name = name.as_ref().to_owned(); + assert!(!self.map.contains_key(&name), "duplicate job name {}", name); + self.map.insert( + name.clone(), + Job { + future: run_job( + self.logger.clone(), + self.permits.clone(), + name.clone(), + future, + ) + .boxed(), + wait_for: Vec::new(), + notify: Vec::new(), + }, + ); + Selector { jobs: self, name } + } + + pub(crate) fn push_command( + &mut self, + name: impl AsRef, + command: Command, + ) -> Selector<'_> { + let name = name.as_ref().to_owned(); + assert!(!self.map.contains_key(&name), "duplicate job name {}", name); + self.map.insert( + name.clone(), + Job { + future: spawn_with_output( + command, + self.logger.clone(), + self.permits.clone(), + name.clone(), + self.log_dir.join(&name).with_extension("log"), + ) + .boxed(), + wait_for: Vec::new(), + notify: Vec::new(), + }, + ); + Selector { jobs: self, name } + } + + pub(crate) fn select(&mut self, name: impl AsRef) -> Selector<'_> { + Selector { jobs: self, name: name.as_ref().to_owned() } + } + + pub(crate) async fn run_all(self) -> Result<()> { + self.map + .into_values() + .map(Job::run) + .collect::>() + .try_collect::<()>() + .await + } +} + +impl Job { + async fn run(self) -> Result<()> { + let result: Result<(), RecvError> = self + .wait_for + .into_iter() + .collect::>() + .try_collect::<()>() + .await; + result.map_err(|_| anyhow!("dependency failed"))?; + + self.future.await?; + for sender in self.notify { + // Ignore the error here -- the only reason we should fail to send + // our message is if a task has failed or the user hit Ctrl-C, at + // which point a bunch of error logging is not particularly useful. + sender.send(()).ok(); + } + Ok(()) + } +} + +impl<'a> Selector<'a> { + #[track_caller] + pub(crate) fn after(self, other: impl AsRef) -> Self { + let (sender, receiver) = oneshot::channel(); + self.jobs + .map + .get_mut(&self.name) + .expect("invalid job name") + .wait_for + .push(receiver); + self.jobs + .map + .get_mut(other.as_ref()) + .expect("invalid job name") + .notify + .push(sender); + self + } +} + +macro_rules! info_or_error { + ($logger:expr, $result:expr, $($tt:tt)*) => { + if $result.is_ok() { + ::slog::info!($logger, $($tt)*); + } else { + ::slog::error!($logger, $($tt)*); + } + }; +} + +async fn run_job( + logger: Logger, + permits: Arc, + name: String, + future: impl Future> + Send + 'static, +) -> Result<()> { + if !PERMIT_NOT_REQUIRED.contains(&name.as_str()) { + let _ = permits.acquire_owned().await?; + } + + info!(logger, "[{}] running task", name); + let start = Instant::now(); + let result = tokio::spawn(future).await?; + let duration = Instant::now().saturating_duration_since(start); + info_or_error!( + logger, + result, + "[{}] task {} ({:?})", + name, + if result.is_ok() { "succeeded" } else { "failed" }, + duration + ); + result +} + +async fn spawn_with_output( + command: Command, + logger: Logger, + permits: Arc, + name: String, + log_path: Utf8PathBuf, +) -> Result<()> { + if !PERMIT_NOT_REQUIRED.contains(&name.as_str()) { + let _ = permits.acquire_owned().await?; + } + + let (command_desc, mut command) = command.into_parts(); + + let log_file_1 = File::create(log_path).await?; + let log_file_2 = log_file_1.try_clone().await?; + + info!(logger, "[{}] running: {}", name, command_desc); + let start = Instant::now(); + let mut child = command + .kill_on_drop(true) + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .with_context(|| format!("failed to exec `{}`", command_desc))?; + + let stdout = spawn_reader( + format!("[{:>16}] ", name), + child.stdout.take().unwrap(), + tokio::io::stdout(), + log_file_1, + ); + let stderr = spawn_reader( + format!("[{:>16}] ", name), + child.stderr.take().unwrap(), + tokio::io::stderr(), + log_file_2, + ); + + let status = child.wait().await.with_context(|| { + format!("I/O error while waiting for job {:?} to complete", name) + })?; + let result = command_desc.check_status(status); + info_or_error!( + logger, + result, + "[{}] process exited with {} ({:?})", + name, + status, + Instant::now().saturating_duration_since(start) + ); + + // bubble up any errors from `spawn_reader` + stdout.await??; + stderr.await??; + + result +} + +fn spawn_reader( + prefix: String, + reader: impl AsyncRead + Send + Unpin + 'static, + mut terminal_writer: impl AsyncWrite + Send + Unpin + 'static, + logfile_writer: File, +) -> tokio::task::JoinHandle> { + let mut reader = BufReader::new(reader); + let mut logfile_writer = tokio::fs::File::from(logfile_writer); + let mut buf = prefix.into_bytes(); + let prefix_len = buf.len(); + tokio::spawn(async move { + loop { + buf.truncate(prefix_len); + // We have no particular control over the output from the child + // processes we run, so we read until a newline character without + // relying on valid UTF-8 output. + let size = reader.read_until(b'\n', &mut buf).await?; + if size == 0 { + return Ok(()); + } + terminal_writer.write_all(&buf).await?; + logfile_writer.write_all(&buf[prefix_len..]).await?; + } + }) +} diff --git a/dev-tools/releng/src/main.rs b/dev-tools/releng/src/main.rs new file mode 100644 index 00000000000..0fa43829313 --- /dev/null +++ b/dev-tools/releng/src/main.rs @@ -0,0 +1,734 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +mod cmd; +mod hubris; +mod job; +mod tuf; + +use std::sync::Arc; +use std::time::Duration; +use std::time::Instant; + +use anyhow::bail; +use anyhow::Context; +use anyhow::Result; +use camino::Utf8PathBuf; +use chrono::Utc; +use clap::Parser; +use fs_err::tokio as fs; +use omicron_zone_package::config::Config; +use once_cell::sync::Lazy; +use semver::Version; +use slog::debug; +use slog::error; +use slog::info; +use slog::Drain; +use slog::Logger; +use slog_term::FullFormat; +use slog_term::TermDecorator; +use tokio::sync::Semaphore; + +use crate::cmd::Command; +use crate::job::Jobs; + +/// The base version we're currently building. Build information is appended to +/// this later on. +/// +/// Under current policy, each new release is a major version bump, and +/// generally referred to only by the major version (e.g. 8.0.0 is referred +/// to as "v8", "version 8", or "release 8" to customers). The use of semantic +/// versioning is mostly to hedge for perhaps wanting something more granular in +/// the future. +const BASE_VERSION: Version = Version::new(8, 0, 0); + +#[derive(Debug, Clone, Copy)] +enum InstallMethod { + /// Unpack the tarball to `/opt/oxide/`, and install + /// `pkg/manifest.xml` (if it exists) to + /// `/lib/svc/manifest/site/.xml`. + Install, + /// Copy the tarball to `/opt/oxide/.tar.gz`. + Bundle, +} + +/// Packages to install or bundle in the host OS image. +const HOST_IMAGE_PACKAGES: [(&str, InstallMethod); 7] = [ + ("mg-ddm-gz", InstallMethod::Install), + ("omicron-sled-agent", InstallMethod::Install), + ("overlay", InstallMethod::Bundle), + ("oxlog", InstallMethod::Install), + ("propolis-server", InstallMethod::Bundle), + ("pumpkind-gz", InstallMethod::Install), + ("switch-asic", InstallMethod::Bundle), +]; +/// Packages to install or bundle in the recovery (trampoline) OS image. +const RECOVERY_IMAGE_PACKAGES: [(&str, InstallMethod); 2] = [ + ("installinator", InstallMethod::Install), + ("mg-ddm-gz", InstallMethod::Install), +]; +/// Packages to ship with the TUF repo. +const TUF_PACKAGES: [&str; 11] = [ + "clickhouse_keeper", + "clickhouse", + "cockroachdb", + "crucible-pantry-zone", + "crucible-zone", + "external-dns", + "internal-dns", + "nexus", + "ntp", + "oximeter", + "probe", +]; + +const HELIOS_REPO: &str = "https://pkg.oxide.computer/helios/2/dev/"; + +static WORKSPACE_DIR: Lazy = Lazy::new(|| { + // $CARGO_MANIFEST_DIR is at `.../omicron/dev-tools/releng` + let mut dir = + Utf8PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").expect( + "$CARGO_MANIFEST_DIR is not set; run this via `cargo xtask releng`", + )); + dir.pop(); + dir.pop(); + dir +}); + +#[derive(Parser)] +/// Run the Oxide release engineering process and produce a TUF repo that can be +/// used to update a rack. +/// +/// For more information, see `docs/releng.adoc` in the Omicron repository. +/// +/// Note that `--host-dataset` and `--recovery-dataset` must be set to different +/// values to build the two OS images in parallel. This is strongly recommended. +struct Args { + /// ZFS dataset to use for `helios-build` when building the host image + #[clap(long, default_value_t = Self::default_dataset("host"))] + host_dataset: String, + + /// ZFS dataset to use for `helios-build` when building the recovery + /// (trampoline) image + #[clap(long, default_value_t = Self::default_dataset("recovery"))] + recovery_dataset: String, + + /// Path to a Helios repository checkout (default: "helios" in the same + /// directory as "omicron") + #[clap(long, default_value_t = Self::default_helios_dir())] + helios_dir: Utf8PathBuf, + + /// Ignore the current HEAD of the Helios repository checkout + #[clap(long)] + ignore_helios_origin: bool, + + /// Output dir for TUF repo and log files + #[clap(long, default_value_t = Self::default_output_dir())] + output_dir: Utf8PathBuf, + + /// Path to the directory containing the rustup proxy `bin/cargo` (usually + /// set by Cargo) + #[clap(long, env = "CARGO_HOME")] + cargo_home: Option, + + /// Path to the git binary + #[clap(long, env = "GIT", default_value = "git")] + git_bin: Utf8PathBuf, + + /// Path to a pre-built omicron-package binary (skips building if set) + #[clap(long, env = "OMICRON_PACKAGE")] + omicron_package_bin: Option, +} + +impl Args { + fn default_dataset(name: &str) -> String { + format!( + "rpool/images/{}/{}", + std::env::var("LOGNAME").expect("$LOGNAME is not set"), + name + ) + } + + fn default_helios_dir() -> Utf8PathBuf { + WORKSPACE_DIR + .parent() + .expect("omicron is presumably not cloned at /") + .join("helios") + } + + fn default_output_dir() -> Utf8PathBuf { + WORKSPACE_DIR.join("out/releng") + } +} + +#[tokio::main] +async fn main() -> Result<()> { + let args = Args::parse(); + + let decorator = TermDecorator::new().build(); + let drain = FullFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).build().fuse(); + let logger = Logger::root(drain, slog::o!()); + + // Change the working directory to the workspace root. + debug!(logger, "changing working directory to {}", *WORKSPACE_DIR); + std::env::set_current_dir(&*WORKSPACE_DIR) + .context("failed to change working directory to workspace root")?; + + // Determine the target directory. + let target_dir = cargo_metadata::MetadataCommand::new() + .no_deps() + .exec() + .context("failed to get cargo metadata")? + .target_directory; + + // We build everything in Omicron with $CARGO, but we need to use the rustup + // proxy for Cargo when outside Omicron. + let rustup_cargo = match &args.cargo_home { + Some(path) => path.join("bin/cargo"), + None => Utf8PathBuf::from("cargo"), + }; + // `var_os` here is deliberate: if CARGO is set to a non-UTF-8 path we + // shouldn't do something confusing as a fallback. + let cargo = match std::env::var_os("CARGO") { + Some(path) => Utf8PathBuf::try_from(std::path::PathBuf::from(path)) + .context("$CARGO is not valid UTF-8")?, + None => rustup_cargo.clone(), + }; + + let permits = Arc::new(Semaphore::new( + std::thread::available_parallelism() + .context("couldn't get available parallelism")? + .into(), + )); + + let commit = Command::new(&args.git_bin) + .args(["rev-parse", "HEAD"]) + .ensure_stdout(&logger) + .await? + .trim() + .to_owned(); + + let mut version = BASE_VERSION.clone(); + // Differentiate between CI and local builds. We use `0.word` as the + // prerelease field because it comes before `alpha`. + version.pre = + if std::env::var_os("CI").is_some() { "0.ci" } else { "0.local" } + .parse()?; + // Set the build metadata to the current commit hash. + let mut build = String::with_capacity(14); + build.push_str("git"); + build.extend(commit.chars().take(11)); + version.build = build.parse()?; + let version_str = version.to_string(); + info!(logger, "version: {}", version_str); + + let manifest = Arc::new(omicron_zone_package::config::parse_manifest( + &fs::read_to_string(WORKSPACE_DIR.join("package-manifest.toml")) + .await?, + )?); + let opte_version = + fs::read_to_string(WORKSPACE_DIR.join("tools/opte_version")).await?; + + let client = reqwest::ClientBuilder::new() + .connect_timeout(Duration::from_secs(15)) + .timeout(Duration::from_secs(15)) + .build() + .context("failed to build reqwest client")?; + + // PREFLIGHT ============================================================== + let mut preflight_ok = true; + + for package in HOST_IMAGE_PACKAGES + .into_iter() + .chain(RECOVERY_IMAGE_PACKAGES) + .map(|(package, _)| package) + .chain(TUF_PACKAGES) + { + if !manifest.packages.contains_key(package) { + error!( + logger, + "package {} to be installed in the OS image \ + is not listed in the package manifest", + package + ); + preflight_ok = false; + } + } + + // Ensure the Helios checkout exists + if args.helios_dir.exists() { + if !args.ignore_helios_origin { + // check that our helios clone is up to date + Command::new(&args.git_bin) + .arg("-C") + .arg(&args.helios_dir) + .args(["fetch", "--no-write-fetch-head", "origin", "master"]) + .ensure_success(&logger) + .await?; + let stdout = Command::new(&args.git_bin) + .arg("-C") + .arg(&args.helios_dir) + .args(["rev-parse", "HEAD", "origin/master"]) + .ensure_stdout(&logger) + .await?; + let mut lines = stdout.lines(); + let first = + lines.next().context("git-rev-parse output was empty")?; + if !lines.all(|line| line == first) { + error!( + logger, + "helios checkout at {0} is out-of-date; run \ + `git pull -C {0}`, or run omicron-releng with \ + --ignore-helios-origin or --helios-path", + shell_words::quote(args.helios_dir.as_str()) + ); + preflight_ok = false; + } + } + } else { + info!(logger, "cloning helios to {}", args.helios_dir); + Command::new(&args.git_bin) + .args(["clone", "https://github.com/oxidecomputer/helios.git"]) + .arg(&args.helios_dir) + .ensure_success(&logger) + .await?; + } + // Record the branch and commit in the output + Command::new(&args.git_bin) + .arg("-C") + .arg(&args.helios_dir) + .args(["status", "--branch", "--porcelain=2"]) + .ensure_success(&logger) + .await?; + + // Check that the omicron1 brand is installed + if !Command::new("pkg") + .args(["verify", "-q", "/system/zones/brand/omicron1/tools"]) + .is_success(&logger) + .await? + { + error!( + logger, + "the omicron1 brand is not installed; install it with \ + `pfexec pkg install /system/zones/brand/omicron1/tools`" + ); + preflight_ok = false; + } + + // Check that the datasets for helios-image to use exist + for (dataset, option) in [ + (&args.host_dataset, "--host-dataset"), + (&args.recovery_dataset, "--recovery-dataset"), + ] { + if !Command::new("zfs") + .arg("list") + .arg(dataset) + .is_success(&logger) + .await? + { + error!( + logger, + "the dataset {0} does not exist; run `pfexec zfs create \ + -p {0}`, or specify a different one with {1}", + shell_words::quote(dataset), + option + ); + preflight_ok = false; + } + } + + if !preflight_ok { + bail!("some preflight checks failed"); + } + + fs::create_dir_all(&args.output_dir).await?; + + // DEFINE JOBS ============================================================ + let tempdir = camino_tempfile::tempdir() + .context("failed to create temporary directory")?; + let mut jobs = Jobs::new(&logger, permits.clone(), &args.output_dir); + + jobs.push_command( + "helios-setup", + Command::new("ptime") + .args(["-m", "gmake", "setup"]) + .current_dir(&args.helios_dir) + // ?!?! + // somehow, the Makefile does not see a new `$(PWD)` without this. + .env("PWD", &args.helios_dir) + // Setting `BUILD_OS` to no makes setup skip repositories we don't + // need for building the OS itself (we are just building an image + // from an already-built OS). + .env("BUILD_OS", "no") + .env_remove("CARGO") + .env_remove("RUSTUP_TOOLCHAIN"), + ); + + // Download the toolchain for phbl before we get to the image build steps. + // (This is possibly a micro-optimization.) + jobs.push_command( + "phbl-toolchain", + Command::new(&rustup_cargo) + .arg("--version") + .current_dir(args.helios_dir.join("projects/phbl")) + .env_remove("CARGO") + .env_remove("RUSTUP_TOOLCHAIN"), + ) + .after("helios-setup"); + + let omicron_package = if let Some(path) = &args.omicron_package_bin { + // omicron-package is provided, so don't build it. + jobs.push("omicron-package", std::future::ready(Ok(()))); + path.clone() + } else { + jobs.push_command( + "omicron-package", + Command::new("ptime").args([ + "-m", + cargo.as_str(), + "build", + "--locked", + "--release", + "--bin", + "omicron-package", + ]), + ); + target_dir.join("release/omicron-package") + }; + + // Generate `omicron-package stamp` jobs for a list of packages as a nested + // `Jobs`. Returns the selector for the outer job. + // + // (This could be a function but the resulting function would have too many + // confusable arguments.) + macro_rules! stamp_packages { + ($name:expr, $target:expr, $packages:expr) => {{ + let mut stamp_jobs = + Jobs::new(&logger, permits.clone(), &args.output_dir); + for package in $packages { + stamp_jobs.push_command( + format!("stamp-{}", package), + Command::new(&omicron_package) + .args([ + "--target", + $target.as_str(), + "--artifacts", + $target.artifacts_path(&args).as_str(), + "stamp", + package, + &version_str, + ]) + .env_remove("CARGO_MANIFEST_DIR"), + ); + } + jobs.push($name, stamp_jobs.run_all()) + }}; + } + + for target in [Target::Host, Target::Recovery] { + let artifacts_path = target.artifacts_path(&args); + + // omicron-package target create + jobs.push_command( + format!("{}-target", target), + Command::new(&omicron_package) + .args([ + "--target", + target.as_str(), + "--artifacts", + artifacts_path.as_str(), + "target", + "create", + ]) + .args(target.target_args()) + .env_remove("CARGO_MANIFEST_DIR"), + ) + .after("omicron-package"); + + // omicron-package package + jobs.push_command( + format!("{}-package", target), + Command::new(&omicron_package) + .args([ + "--target", + target.as_str(), + "--artifacts", + artifacts_path.as_str(), + "package", + ]) + .env_remove("CARGO_MANIFEST_DIR"), + ) + .after(format!("{}-target", target)); + + // omicron-package stamp + stamp_packages!( + format!("{}-stamp", target), + target, + target.proto_package_names() + ) + .after(format!("{}-package", target)); + + // [build proto dir, to be overlaid into disk image] + let proto_dir = tempdir.path().join("proto").join(target.as_str()); + jobs.push( + format!("{}-proto", target), + build_proto_area( + artifacts_path, + proto_dir.clone(), + target.proto_packages(), + manifest.clone(), + ), + ) + .after(format!("{}-stamp", target)); + + // The ${os_short_commit} token will be expanded by `helios-build` + let image_name = format!( + "{} {}/${{os_short_commit}} {}", + target.image_prefix(), + commit.chars().take(7).collect::(), + Utc::now().format("%Y-%m-%d %H:%M") + ); + + // helios-build experiment-image + jobs.push_command( + format!("{}-image", target), + Command::new("ptime") + .arg("-m") + .arg(args.helios_dir.join("helios-build")) + .arg("experiment-image") + .arg("-o") // output directory for image + .arg(args.output_dir.join(format!("os-{}", target))) + .arg("-p") // use an external package repository + .arg(format!("helios-dev={}", HELIOS_REPO)) + .arg("-F") // pass extra image builder features + .arg(format!("optever={}", opte_version.trim())) + .arg("-P") // include all files from extra proto area + .arg(proto_dir.join("root")) + .arg("-N") // image name + .arg(image_name) + .arg("-s") // tempdir name suffix + .arg(target.as_str()) + .args(target.image_build_args()) + .current_dir(&args.helios_dir) + .env( + "IMAGE_DATASET", + match target { + Target::Host => &args.host_dataset, + Target::Recovery => &args.recovery_dataset, + }, + ) + .env_remove("CARGO") + .env_remove("RUSTUP_TOOLCHAIN"), + ) + .after("helios-setup") + .after(format!("{}-proto", target)); + } + // Build the recovery target after we build the host target. Only one + // of these will build at a time since Cargo locks its target directory; + // since host-package and host-image both take longer than their recovery + // counterparts, this should be the fastest option to go first. + jobs.select("recovery-package").after("host-package"); + if args.host_dataset == args.recovery_dataset { + // If the datasets are the same, we can't parallelize these. + jobs.select("recovery-image").after("host-image"); + } + + // Set up /root/.profile in the host OS image. + jobs.push( + "host-profile", + host_add_root_profile(tempdir.path().join("proto/host/root/root")), + ) + .after("host-proto"); + jobs.select("host-image").after("host-profile"); + + stamp_packages!("tuf-stamp", Target::Host, TUF_PACKAGES) + .after("host-stamp"); + + for (name, base_url) in [ + ("staging", "https://permslip-staging.corp.oxide.computer"), + ("production", "https://signer-us-west.corp.oxide.computer"), + ] { + jobs.push( + format!("hubris-{}", name), + hubris::fetch_hubris_artifacts( + base_url, + client.clone(), + WORKSPACE_DIR.join(format!("tools/permslip_{}", name)), + args.output_dir.join(format!("hubris-{}", name)), + ), + ); + } + + jobs.push( + "tuf-repo", + tuf::build_tuf_repo( + logger.clone(), + args.output_dir.clone(), + version, + manifest, + ), + ) + .after("tuf-stamp") + .after("host-image") + .after("recovery-image") + .after("hubris-staging") + .after("hubris-production"); + + // RUN JOBS =============================================================== + let start = Instant::now(); + jobs.run_all().await?; + info!( + logger, + "all jobs completed in {:?}", + Instant::now().saturating_duration_since(start) + ); + Ok(()) +} + +#[derive(Clone, Copy)] +enum Target { + Host, + Recovery, +} + +impl Target { + fn as_str(self) -> &'static str { + match self { + Target::Host => "host", + Target::Recovery => "recovery", + } + } + + fn artifacts_path(self, args: &Args) -> Utf8PathBuf { + match self { + Target::Host => WORKSPACE_DIR.join("out"), + Target::Recovery => { + args.output_dir.join(format!("artifacts-{}", self)) + } + } + } + + fn target_args(self) -> &'static [&'static str] { + match self { + Target::Host => &[ + "--image", + "standard", + "--machine", + "gimlet", + "--switch", + "asic", + "--rack-topology", + "multi-sled", + ], + Target::Recovery => &["--image", "trampoline"], + } + } + + fn proto_packages(self) -> &'static [(&'static str, InstallMethod)] { + match self { + Target::Host => &HOST_IMAGE_PACKAGES, + Target::Recovery => &RECOVERY_IMAGE_PACKAGES, + } + } + + fn proto_package_names(self) -> impl Iterator { + self.proto_packages().iter().map(|(name, _)| *name) + } + + fn image_prefix(self) -> &'static str { + match self { + Target::Host => "ci", + Target::Recovery => "recovery", + } + } + + fn image_build_args(self) -> &'static [&'static str] { + match self { + Target::Host => &[ + "-B", // include omicron1 brand + ], + Target::Recovery => &[ + "-R", // recovery image + ], + } + } +} + +impl std::fmt::Display for Target { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.as_str()) + } +} + +async fn build_proto_area( + mut package_dir: Utf8PathBuf, + proto_dir: Utf8PathBuf, + packages: &'static [(&'static str, InstallMethod)], + manifest: Arc, +) -> Result<()> { + let opt_oxide = proto_dir.join("root/opt/oxide"); + let manifest_site = proto_dir.join("root/lib/svc/manifest/site"); + fs::create_dir_all(&opt_oxide).await?; + + // use the stamped packages + package_dir.push("versioned"); + + for &(package_name, method) in packages { + let package = + manifest.packages.get(package_name).expect("checked in preflight"); + match method { + InstallMethod::Install => { + let path = opt_oxide.join(&package.service_name); + fs::create_dir(&path).await?; + + let cloned_path = path.clone(); + let cloned_package_dir = package_dir.to_owned(); + tokio::task::spawn_blocking(move || -> Result<()> { + let mut archive = tar::Archive::new(std::fs::File::open( + cloned_package_dir + .join(package_name) + .with_extension("tar"), + )?); + archive.unpack(cloned_path).with_context(|| { + format!("failed to extract {}.tar.gz", package_name) + })?; + Ok(()) + }) + .await??; + + let smf_manifest = path.join("pkg").join("manifest.xml"); + if smf_manifest.exists() { + fs::create_dir_all(&manifest_site).await?; + fs::rename( + smf_manifest, + manifest_site + .join(&package.service_name) + .with_extension("xml"), + ) + .await?; + } + } + InstallMethod::Bundle => { + fs::copy( + package_dir.join(format!("{}.tar.gz", package_name)), + opt_oxide.join(format!("{}.tar.gz", package.service_name)), + ) + .await?; + } + } + } + + Ok(()) +} + +async fn host_add_root_profile(host_proto_root: Utf8PathBuf) -> Result<()> { + fs::create_dir_all(&host_proto_root).await?; + fs::write( + host_proto_root.join(".profile"), + "# Add opteadm, ddadm, oxlog to PATH\n\ + export PATH=$PATH:/opt/oxide/opte/bin:/opt/oxide/mg-ddm:/opt/oxide/oxlog\n", + ).await?; + Ok(()) +} diff --git a/dev-tools/releng/src/tuf.rs b/dev-tools/releng/src/tuf.rs new file mode 100644 index 00000000000..2a880210ebb --- /dev/null +++ b/dev-tools/releng/src/tuf.rs @@ -0,0 +1,149 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::sync::Arc; + +use anyhow::Context; +use anyhow::Result; +use camino::Utf8PathBuf; +use chrono::Duration; +use chrono::Timelike; +use chrono::Utc; +use fs_err::tokio as fs; +use fs_err::tokio::File; +use omicron_common::api::external::SemverVersion; +use omicron_common::api::internal::nexus::KnownArtifactKind; +use omicron_zone_package::config::Config; +use semver::Version; +use sha2::Digest; +use sha2::Sha256; +use slog::Logger; +use tokio::io::AsyncReadExt; +use tufaceous_lib::assemble::ArtifactManifest; +use tufaceous_lib::assemble::DeserializedArtifactData; +use tufaceous_lib::assemble::DeserializedArtifactSource; +use tufaceous_lib::assemble::DeserializedControlPlaneZoneSource; +use tufaceous_lib::assemble::DeserializedManifest; +use tufaceous_lib::assemble::OmicronRepoAssembler; +use tufaceous_lib::Key; + +pub(crate) async fn build_tuf_repo( + logger: Logger, + output_dir: Utf8PathBuf, + version: Version, + package_manifest: Arc, +) -> Result<()> { + // We currently go about this somewhat strangely; the old release + // engineering process produced a Tufaceous manifest, and (the now very many + // copies of) the TUF repo download-and-unpack script we use expects to be + // able to download a manifest. So we build up a `DeserializedManifest`, + // write it to disk, and then turn it into an `ArtifactManifest` to actually + // build the repo. + + // Start a new manifest by loading the Hubris staging manifest. + let mut manifest = DeserializedManifest::from_path( + &output_dir.join("hubris-staging/manifest.toml"), + ) + .context("failed to open intermediate hubris staging manifest")?; + // Set the version. + manifest.system_version = SemverVersion(version); + + // Load the Hubris production manifest and merge it in. + let hubris_production = DeserializedManifest::from_path( + &output_dir.join("hubris-production/manifest.toml"), + ) + .context("failed to open intermediate hubris production manifest")?; + for (kind, artifacts) in hubris_production.artifacts { + manifest.artifacts.entry(kind).or_default().extend(artifacts); + } + + // Add the OS images. + manifest.artifacts.insert( + KnownArtifactKind::Host, + vec![DeserializedArtifactData { + name: "host".to_string(), + version: manifest.system_version.clone(), + source: DeserializedArtifactSource::File { + path: output_dir.join("os-host/os.tar.gz"), + }, + }], + ); + manifest.artifacts.insert( + KnownArtifactKind::Trampoline, + vec![DeserializedArtifactData { + name: "trampoline".to_string(), + version: manifest.system_version.clone(), + source: DeserializedArtifactSource::File { + path: output_dir.join("os-recovery/os.tar.gz"), + }, + }], + ); + + // Add the control plane zones. + let mut zones = Vec::new(); + for package in crate::TUF_PACKAGES { + zones.push(DeserializedControlPlaneZoneSource::File { + file_name: Some(format!( + "{}.tar.gz", + package_manifest + .packages + .get(package) + .expect("checked in preflight") + .service_name + )), + path: crate::WORKSPACE_DIR + .join("out/versioned") + .join(format!("{}.tar.gz", package)), + }); + } + manifest.artifacts.insert( + KnownArtifactKind::ControlPlane, + vec![DeserializedArtifactData { + name: "control-plane".to_string(), + version: manifest.system_version.clone(), + source: DeserializedArtifactSource::CompositeControlPlane { zones }, + }], + ); + + // Serialize the manifest out. + fs::write( + output_dir.join("manifest.toml"), + toml::to_string_pretty(&manifest)?.into_bytes(), + ) + .await?; + + // Convert the manifest. + let manifest = ArtifactManifest::from_deserialized(&output_dir, manifest)?; + manifest.verify_all_present()?; + // Assemble the repo. + let keys = vec![Key::generate_ed25519()]; + let expiry = Utc::now().with_nanosecond(0).unwrap() + Duration::weeks(1); + OmicronRepoAssembler::new( + &logger, + manifest, + keys, + expiry, + output_dir.join("repo.zip"), + ) + .build() + .await?; + // Generate the checksum file. + let mut hasher = Sha256::new(); + let mut buf = [0; 8192]; + let mut file = File::open(output_dir.join("repo.zip")).await?; + loop { + let n = file.read(&mut buf).await?; + if n == 0 { + break; + } + hasher.update(&buf[..n]); + } + fs::write( + output_dir.join("repo.zip.sha256.txt"), + format!("{}\n", hex::encode(&hasher.finalize())), + ) + .await?; + + Ok(()) +} diff --git a/dev-tools/xtask/Cargo.toml b/dev-tools/xtask/Cargo.toml index 11fcf405bd0..2aecde57e53 100644 --- a/dev-tools/xtask/Cargo.toml +++ b/dev-tools/xtask/Cargo.toml @@ -11,7 +11,7 @@ workspace = true anyhow.workspace = true camino.workspace = true cargo_toml = "0.20" -cargo_metadata = "0.18" +cargo_metadata.workspace = true clap.workspace = true macaddr.workspace = true serde.workspace = true diff --git a/docs/releng.adoc b/docs/releng.adoc new file mode 100644 index 00000000000..31252c9a89c --- /dev/null +++ b/docs/releng.adoc @@ -0,0 +1,81 @@ +:showtitle: +:numbered: +:toc: left + += Oxide Release Engineering + +Omicron is the Oxide control plane, and thus brings together all of the +various components outside of this repo that make up the software on the +product. This includes (but definitely isn't limited to): + +- https://github.com/oxidecomputer/propolis[Propolis], our hypervisor +- https://github.com/oxidecomputer/helios[Helios], our host operating + system +- https://github.com/oxidecomputer/crucible[Crucible], our block storage + service +- https://github.com/oxidecomputer/maghemite[Maghemite], our switch + control software and routing protocol +- https://github.com/oxidecomputer/hubris[Hubris], our embedded + microcontroller operating system used on the root of trust and service + processors +- https://github.com/oxidecomputer/console[The web console] + +Each of these has their own build processes that produce some sort of +usable artifact, whether that is an illumos zone or a tarball of static +assets. + +The release engineering process builds the control plane and combines +it with the many external artifacts into a final artifact -- a Zip +archive of a TUF repository -- that contains everything necessary for +the product to operate. This process is run on each commit to ensure it +is always functional. You can also run the process locally with +`cargo xtask releng`. + +== Process overview + +`cargo xtask releng` performs all of these steps in parallel (with +the temporary exception of artifact downloads handled by +`tools/install_builder_prerequisites.sh`): + +. `tools/install_builder_prerequisites.sh` downloads several artifacts + (via the `tools/ci_*` scripts) that are necessary to build Omicron; + many of these are ultimately packaged by `omicron-package`. These + scripts are generally controlled by the `tools/*_version` and + `tools/*_checksums` files. +. `cargo xtask releng` downloads the current root of trust and + service processor images built by the Hubris release engineering + process, which are signed in https://github.com/oxidecomputer/permission-slip[Permission Slip]. + This is controlled by the `tools/permslip_production` and + `tools/permslip_staging` files. +. `omicron-package` is the heart of the release engineering process; it + reads the manifest from `package-manifest.toml`, runs an appropriate + `cargo build` command, downloads any additional artifacts, and + packages them into a series of illumos zones and tarballs. (It can + also manage installation and uninstallation of these zones; see + how-to-run.adoc.) +. Some of the illumos zones are distributed with the OS images (because + they are reliant on OS-specific APIs), and some are distributed + separately. `cargo xtask releng` unpacks the zones for the OS image + into a temporary directory that is overlaid onto the OS image in the + next step. +. `helios-build` from the https://github.com/oxidecomputer/helios[Helios] + repository then builds two images: the *host* image, which is used + during normal operation, and the *trampoline* (or *recovery*) image, + which is used to update the host image. +. Finally, `cargo xtask releng` generates a Zip archive of a + https://theupdateframework.io/[TUF] repository, which contains the + host and trampoline OS images, the ROT and SP images, and all the + illumos zones that are not installed into the OS images. This archive + can be uploaded to Wicket to perform an upgrade of the rack while the + control plane is not running. + +== Beyond `cargo xtask releng` + +Currently we use TUF repos generated in CI (by `cargo xtask releng`) +directly. These repositories use a generated throwaway key to sign +the TUF metadata. In the limit, we will have a process to sign release +builds of these TUF repositories, which will be available as a Zip +archive for an operator to upload to Nexus or Wicket, as well as an +HTTPS repository for racks connected to the internet or with access to +a proxy to perform automatic updates. The exact nature of the PKI and +trust policies for each of these update flows is under discussion. diff --git a/package-manifest.toml b/package-manifest.toml index 5da7ed68677..825aeea8a89 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -592,7 +592,7 @@ only_for_targets.image = "standard" only_for_targets.switch = "asic" [package.pumpkind-gz] -service_name = "pumpkind-gz" +service_name = "pumpkind" source.type = "prebuilt" source.repo = "pumpkind" source.commit = "3fe9c306590fb2f28f54ace7fd18b3c126323683" diff --git a/package/src/bin/omicron-package.rs b/package/src/bin/omicron-package.rs index 3b8bd249186..09fa7ab178a 100644 --- a/package/src/bin/omicron-package.rs +++ b/package/src/bin/omicron-package.rs @@ -199,6 +199,25 @@ async fn do_dot(config: &Config) -> Result<()> { Ok(()) } +async fn do_list_outputs( + config: &Config, + output_directory: &Utf8Path, + intermediate: bool, +) -> Result<()> { + for (name, package) in + config.package_config.packages_to_build(&config.target).0 + { + if !intermediate + && package.output + == (PackageOutput::Zone { intermediate_only: true }) + { + continue; + } + println!("{}", package.get_output_path(name, output_directory)); + } + Ok(()) +} + // The name reserved for the currently-in-use build target. const ACTIVE: &str = "active"; @@ -919,7 +938,7 @@ async fn main() -> Result<()> { tokio::fs::create_dir_all(&args.artifact_dir).await?; let logpath = args.artifact_dir.join("LOG"); let logfile = std::io::LineWriter::new(open_options.open(&logpath)?); - println!("Logging to: {}", std::fs::canonicalize(logpath)?.display()); + eprintln!("Logging to: {}", std::fs::canonicalize(logpath)?.display()); let drain = slog_bunyan::new(logfile).build().fuse(); let drain = slog_async::Async::new(drain).build().fuse(); @@ -981,6 +1000,10 @@ async fn main() -> Result<()> { SubCommand::Build(BuildCommand::Dot) => { do_dot(&get_config()?).await?; } + SubCommand::Build(BuildCommand::ListOutputs { intermediate }) => { + do_list_outputs(&get_config()?, &args.artifact_dir, *intermediate) + .await?; + } SubCommand::Build(BuildCommand::Package { disable_cache }) => { do_package(&get_config()?, &args.artifact_dir, *disable_cache) .await?; diff --git a/package/src/lib.rs b/package/src/lib.rs index bba1a3a0cdb..2b99cfbe07a 100644 --- a/package/src/lib.rs +++ b/package/src/lib.rs @@ -90,6 +90,11 @@ pub enum BuildCommand { }, /// Make a `dot` graph to visualize the package tree Dot, + /// List the output packages for the current target + ListOutputs { + #[clap(long)] + intermediate: bool, + }, /// Builds the packages specified in a manifest, and places them into an /// 'out' directory. Package { diff --git a/tools/build-host-image.sh b/tools/build-host-image.sh deleted file mode 100755 index e90d800849e..00000000000 --- a/tools/build-host-image.sh +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o pipefail -set -o xtrace - -function usage -{ - echo "usage: $0 [-fRB] HELIOS_PATH PACKAGES_TARBALL" - echo - echo " -f Force helios build despite git hash mismatch" - echo " -R Build recovery (trampoline) image" - echo " -B Build standard image" - exit 1 -} - -function main -{ - while getopts ":hfRBS:" opt; do - case $opt in - f) - FORCE=1 - ;; - R) - BUILD_RECOVERY=1 - HELIOS_BUILD_EXTRA_ARGS=-R - IMAGE_PREFIX=recovery - ;; - B) - BUILD_STANDARD=1 - HELIOS_BUILD_EXTRA_ARGS=-B - IMAGE_PREFIX=ci - ;; - S) - SWITCH_ZONE=$OPTARG - ;; - h | \?) - usage - ;; - esac - done - shift $((OPTIND-1)) - - # Ensure we got either -R or -B but not both - case "x$BUILD_RECOVERY$BUILD_STANDARD" in - x11) - echo "specify at most one of -R, -B" - exit 1 - ;; - x) - echo "must specify either -R or -B" - exit 1 - ;; - *) ;; - esac - - if [ "$#" != "2" ]; then - usage - fi - HELIOS_PATH=$1 - GLOBAL_ZONE_TARBALL_PATH=$2 - - TOOLS_DIR="$(pwd)/$(dirname "$0")" - - # Grab the opte version - OPTE_VER=$(cat "$TOOLS_DIR/opte_version") - - # Assemble global zone files in a temporary directory. - if ! tmp_gz=$(mktemp -d); then - exit 1 - fi - trap 'cd /; rm -rf "$tmp_gz"' EXIT - - # Extract the global zone tarball into a tmp_gz directory - echo "Extracting gz packages into $tmp_gz" - ptime -m tar xvzf "$GLOBAL_ZONE_TARBALL_PATH" -C "$tmp_gz" - - # If the user specified a switch zone (which is probably named - # `switch-SOME_VARIANT.tar.gz`), stage it in the right place and rename it - # to just `switch.tar.gz`. - if [ "x$SWITCH_ZONE" != "x" ]; then - mkdir -p "$tmp_gz/root/opt/oxide" - cp "$SWITCH_ZONE" "$tmp_gz/root/opt/oxide/switch.tar.gz" - fi - - if [ "x$BUILD_STANDARD" != "x" ]; then - mkdir -p "$tmp_gz/root/root" - echo "# Add opteadm, ddmadm, oxlog to PATH" >> "$tmp_gz/root/root/.profile" - echo 'export PATH=$PATH:/opt/oxide/opte/bin:/opt/oxide/mg-ddm:/opt/oxide/oxlog' >> "$tmp_gz/root/root/.profile" - fi - - # Move to the helios checkout - cd "$HELIOS_PATH" - - HELIOS_REPO=https://pkg.oxide.computer/helios/2/dev/ - - # Build an image name that includes the omicron and host OS hashes - IMAGE_NAME="$IMAGE_PREFIX ${GITHUB_SHA:0:7}" - # The ${os_short_commit} token will be expanded by `helios-build` - IMAGE_NAME+='/${os_short_commit}' - IMAGE_NAME+=" $(date +'%Y-%m-%d %H:%M')" - - ./helios-build experiment-image \ - -p helios-dev="$HELIOS_REPO" \ - -F optever="$OPTE_VER" \ - -P "$tmp_gz/root" \ - -N "$IMAGE_NAME" \ - $HELIOS_BUILD_EXTRA_ARGS -} - -main "$@" diff --git a/tools/hubris_checksums b/tools/hubris_checksums deleted file mode 100644 index 913cc460c48..00000000000 --- a/tools/hubris_checksums +++ /dev/null @@ -1,8 +0,0 @@ -4d38415a186fb1058c991d0e5ed44711457526e32687ff48ab6d6feadd8b4aa4 build-gimlet-c-image-default-v1.0.13.zip -ead1988cfebb4f79c364a2207f0bda741b8dd0e4f02fb34b4d341c648ecaa733 build-gimlet-d-image-default-v1.0.13.zip -85f5fc9c206c5fc61b4c2380b94a337220e944d67c0cb6bb2cb2486f8d5bc193 build-gimlet-e-image-default-v1.0.13.zip -ac7d898369e94e33b3556a405352b24a1ee107ce877d416811d9e9fae1f1a1ec build-gimlet-f-image-default-v1.0.13.zip -8cf812dc4aacc013335eb932d2bfaf8a542dec7bc29ea671d9a4235c12d61564 build-psc-b-image-default-v1.0.13.zip -85622677eef52c6d210f44e82b2b6cdc5a8357e509744abe1693883b7635b38c build-psc-c-image-default-v1.0.13.zip -87d6cd4add1aabe53756ba8f66a461cd3aa08f1a0093f94ea81a35a6a175ed21 build-sidecar-b-image-default-v1.0.13.zip -d50d6f77da6fc736843b5418359532f18b7ffa090c2a3d68b5dc1d35281385f5 build-sidecar-c-image-default-v1.0.13.zip diff --git a/tools/hubris_version b/tools/hubris_version deleted file mode 100644 index 717d36cec2f..00000000000 --- a/tools/hubris_version +++ /dev/null @@ -1 +0,0 @@ -TAGS=(gimlet-v1.0.13 psc-v1.0.13 sidecar-v1.0.13) diff --git a/tools/permslip_commit b/tools/permslip_commit deleted file mode 100644 index 58140df7da9..00000000000 --- a/tools/permslip_commit +++ /dev/null @@ -1 +0,0 @@ -COMMIT=5d44e0065f90051a28881c75e3574142ada9b695 diff --git a/tufaceous-lib/src/assemble/manifest.rs b/tufaceous-lib/src/assemble/manifest.rs index 8825327c1dc..1c4a676f4c1 100644 --- a/tufaceous-lib/src/assemble/manifest.rs +++ b/tufaceous-lib/src/assemble/manifest.rs @@ -524,6 +524,8 @@ impl DeserializedFileArtifactSource { pub enum DeserializedControlPlaneZoneSource { File { path: Utf8PathBuf, + #[serde(skip_serializing_if = "Option::is_none")] + file_name: Option, }, Fake { name: String, @@ -542,12 +544,15 @@ impl DeserializedControlPlaneZoneSource { F: FnOnce(&str, CompositeEntry<'_>) -> Result, { let (name, data, mtime_source) = match self { - DeserializedControlPlaneZoneSource::File { path } => { + DeserializedControlPlaneZoneSource::File { path, file_name } => { let data = std::fs::read(path) .with_context(|| format!("failed to read {path}"))?; - let name = path.file_name().with_context(|| { - format!("zone path missing file name: {path}") - })?; + let name = file_name + .as_deref() + .or_else(|| path.file_name()) + .with_context(|| { + format!("zone path missing file name: {path}") + })?; // For now, always use the current time as the source. (Maybe // change this to use the mtime on disk in the future?) (name, data, MtimeSource::Now) diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 43392665c71..998b45382e2 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -46,6 +46,7 @@ either = { version = "1.11.0" } elliptic-curve = { version = "0.13.8", features = ["ecdh", "hazmat", "pem", "std"] } ff = { version = "0.13.0", default-features = false, features = ["alloc"] } flate2 = { version = "1.0.30" } +fs-err = { version = "2.11.0", default-features = false, features = ["tokio"] } futures = { version = "0.3.30" } futures-channel = { version = "0.3.30", features = ["sink"] } futures-core = { version = "0.3.30" } @@ -81,11 +82,8 @@ peg-runtime = { version = "0.8.3", default-features = false, features = ["std"] pem-rfc7468 = { version = "0.7.0", default-features = false, features = ["std"] } petgraph = { version = "0.6.5", features = ["serde-1"] } postgres-types = { version = "0.2.6", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } -ppv-lite86 = { version = "0.2.17", default-features = false, features = ["simd", "std"] } predicates = { version = "3.1.0" } proc-macro2 = { version = "1.0.82" } -rand = { version = "0.8.5" } -rand_chacha = { version = "0.3.1", default-features = false, features = ["std"] } regex = { version = "1.10.4" } regex-automata = { version = "0.4.5", default-features = false, features = ["dfa", "hybrid", "meta", "nfa", "perf", "unicode"] } regex-syntax = { version = "0.8.2" } @@ -121,7 +119,6 @@ uuid = { version = "1.8.0", features = ["serde", "v4"] } yasna = { version = "0.5.2", features = ["bit-vec", "num-bigint", "std", "time"] } zerocopy = { version = "0.7.32", features = ["derive", "simd"] } zeroize = { version = "1.7.0", features = ["std", "zeroize_derive"] } -zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } [build-dependencies] ahash = { version = "0.8.8" } @@ -153,6 +150,7 @@ either = { version = "1.11.0" } elliptic-curve = { version = "0.13.8", features = ["ecdh", "hazmat", "pem", "std"] } ff = { version = "0.13.0", default-features = false, features = ["alloc"] } flate2 = { version = "1.0.30" } +fs-err = { version = "2.11.0", default-features = false, features = ["tokio"] } futures = { version = "0.3.30" } futures-channel = { version = "0.3.30", features = ["sink"] } futures-core = { version = "0.3.30" } @@ -188,11 +186,8 @@ peg-runtime = { version = "0.8.3", default-features = false, features = ["std"] pem-rfc7468 = { version = "0.7.0", default-features = false, features = ["std"] } petgraph = { version = "0.6.5", features = ["serde-1"] } postgres-types = { version = "0.2.6", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } -ppv-lite86 = { version = "0.2.17", default-features = false, features = ["simd", "std"] } predicates = { version = "3.1.0" } proc-macro2 = { version = "1.0.82" } -rand = { version = "0.8.5" } -rand_chacha = { version = "0.3.1", default-features = false, features = ["std"] } regex = { version = "1.10.4" } regex-automata = { version = "0.4.5", default-features = false, features = ["dfa", "hybrid", "meta", "nfa", "perf", "unicode"] } regex-syntax = { version = "0.8.2" } @@ -229,7 +224,6 @@ uuid = { version = "1.8.0", features = ["serde", "v4"] } yasna = { version = "0.5.2", features = ["bit-vec", "num-bigint", "std", "time"] } zerocopy = { version = "0.7.32", features = ["derive", "simd"] } zeroize = { version = "1.7.0", features = ["std", "zeroize_derive"] } -zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } [target.x86_64-unknown-linux-gnu.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["std"] } From ce4c9b4bdb77e78ee345bfb7a516fafef3ef9b64 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 14 May 2024 22:50:25 -0700 Subject: [PATCH 07/37] Update Rust crate camino to v1.1.7 (#5770) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f7cded308b3..ce3646e62b3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -787,9 +787,9 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239" dependencies = [ "serde", ] From 9aeffa0f914d915b7c838af2f5841c3d1729174a Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Wed, 15 May 2024 07:22:35 +0000 Subject: [PATCH 08/37] Update Rust crate serde_json to 1.0.117 (#5772) --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ce3646e62b3..ee751876b3c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8389,9 +8389,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.116" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" +checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" dependencies = [ "itoa", "ryu", diff --git a/Cargo.toml b/Cargo.toml index 891565f8573..4049c08eb8e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -410,7 +410,7 @@ secrecy = "0.8.0" semver = { version = "1.0.22", features = ["std", "serde"] } serde = { version = "1.0", default-features = false, features = [ "derive", "rc" ] } serde_human_bytes = { git = "http://github.com/oxidecomputer/serde_human_bytes", branch = "main" } -serde_json = "1.0.116" +serde_json = "1.0.117" serde_path_to_error = "0.1.16" serde_tokenstream = "0.2" serde_urlencoded = "0.7.1" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 998b45382e2..f23ea3f6eb6 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -93,7 +93,7 @@ schemars = { version = "0.8.17", features = ["bytes", "chrono", "uuid1"] } scopeguard = { version = "1.2.0" } semver = { version = "1.0.22", features = ["serde"] } serde = { version = "1.0.201", features = ["alloc", "derive", "rc"] } -serde_json = { version = "1.0.116", features = ["raw_value", "unbounded_depth"] } +serde_json = { version = "1.0.117", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } similar = { version = "2.4.0", features = ["inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } @@ -197,7 +197,7 @@ schemars = { version = "0.8.17", features = ["bytes", "chrono", "uuid1"] } scopeguard = { version = "1.2.0" } semver = { version = "1.0.22", features = ["serde"] } serde = { version = "1.0.201", features = ["alloc", "derive", "rc"] } -serde_json = { version = "1.0.116", features = ["raw_value", "unbounded_depth"] } +serde_json = { version = "1.0.117", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } similar = { version = "2.4.0", features = ["inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } From 4b06fe82b001958b9a5bf0ddd4c4ce5f20c0859c Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Wed, 15 May 2024 07:23:33 +0000 Subject: [PATCH 09/37] Update Rust crate semver to 1.0.23 (#5771) --- Cargo.lock | 30 +++++++++++++++--------------- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 4 ++-- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ee751876b3c..3bdac722380 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -834,7 +834,7 @@ checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" dependencies = [ "camino", "cargo-platform", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "thiserror", @@ -2895,7 +2895,7 @@ dependencies = [ "once_cell", "pathdiff", "petgraph", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "smallvec 1.13.1", @@ -4540,7 +4540,7 @@ dependencies = [ "rand 0.8.5", "ref-cast", "schemars", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "sled-agent-client", @@ -4617,7 +4617,7 @@ dependencies = [ "rustls 0.22.4", "samael", "schemars", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "serde_urlencoded", @@ -5233,7 +5233,7 @@ dependencies = [ "regress", "reqwest", "schemars", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_human_bytes", "serde_json", @@ -5447,7 +5447,7 @@ dependencies = [ "rustls-pemfile 2.1.2", "samael", "schemars", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "serde_urlencoded", @@ -5552,7 +5552,7 @@ dependencies = [ "rayon", "reqwest", "ring 0.17.8", - "semver 1.0.22", + "semver 1.0.23", "serde", "sled-hardware", "slog", @@ -5602,7 +5602,7 @@ dependencies = [ "omicron-zone-package", "once_cell", "reqwest", - "semver 1.0.22", + "semver 1.0.23", "serde", "sha2", "shell-words", @@ -5688,7 +5688,7 @@ dependencies = [ "rcgen", "reqwest", "schemars", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_human_bytes", "serde_json", @@ -5849,7 +5849,7 @@ dependencies = [ "rustix", "schemars", "scopeguard", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "sha2", @@ -5901,7 +5901,7 @@ dependencies = [ "hex", "reqwest", "ring 0.16.20", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_derive", "serde_json", @@ -7942,7 +7942,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.22", + "semver 1.0.23", ] [[package]] @@ -8302,9 +8302,9 @@ checksum = "d4f410fedcf71af0345d7607d246e7ad15faaadd49d240ee3b24e5dc21a820ac" [[package]] name = "semver" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" dependencies = [ "serde", ] @@ -9924,7 +9924,7 @@ dependencies = [ "home", "once_cell", "regex", - "semver 1.0.22", + "semver 1.0.23", "walkdir", ] diff --git a/Cargo.toml b/Cargo.toml index 4049c08eb8e..ceac9801bb2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -407,7 +407,7 @@ rustyline = "14.0.0" samael = { version = "0.0.15", features = ["xmlsec"] } schemars = "0.8.16" secrecy = "0.8.0" -semver = { version = "1.0.22", features = ["std", "serde"] } +semver = { version = "1.0.23", features = ["std", "serde"] } serde = { version = "1.0", default-features = false, features = [ "derive", "rc" ] } serde_human_bytes = { git = "http://github.com/oxidecomputer/serde_human_bytes", branch = "main" } serde_json = "1.0.117" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index f23ea3f6eb6..9bbbc28e3f1 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -91,7 +91,7 @@ reqwest = { version = "0.11.27", features = ["blocking", "cookies", "json", "rus ring = { version = "0.17.8", features = ["std"] } schemars = { version = "0.8.17", features = ["bytes", "chrono", "uuid1"] } scopeguard = { version = "1.2.0" } -semver = { version = "1.0.22", features = ["serde"] } +semver = { version = "1.0.23", features = ["serde"] } serde = { version = "1.0.201", features = ["alloc", "derive", "rc"] } serde_json = { version = "1.0.117", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } @@ -195,7 +195,7 @@ reqwest = { version = "0.11.27", features = ["blocking", "cookies", "json", "rus ring = { version = "0.17.8", features = ["std"] } schemars = { version = "0.8.17", features = ["bytes", "chrono", "uuid1"] } scopeguard = { version = "1.2.0" } -semver = { version = "1.0.22", features = ["serde"] } +semver = { version = "1.0.23", features = ["serde"] } serde = { version = "1.0.201", features = ["alloc", "derive", "rc"] } serde_json = { version = "1.0.117", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } From 14dbab759961c1243874b60f759e37810012438c Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 15 May 2024 11:55:40 -0400 Subject: [PATCH 10/37] Automatic bump of permslip manifest to gimlet-v1.0.18 (#5766) Automated bump --- tools/permslip_staging | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/permslip_staging b/tools/permslip_staging index 9ddec55a210..683cfc0ec90 100644 --- a/tools/permslip_staging +++ b/tools/permslip_staging @@ -1,4 +1,4 @@ -2138737caec2c771692389c7cc6f45110aefc7d86c49ea872d5090549d6e59c7 manifest-gimlet-v1.0.17.toml +a90127c6098a99100413f0f0d4eb97c96f825cff11a92c41b8b1c2927977b17a manifest-gimlet-v1.0.18.toml e34b2f363ed0e1399e175bfae9e5e50217255c7984154697180d8a2d4611f65d manifest-oxide-rot-1-v1.0.10.toml b56e35fae0f4ed9e84e4e4d40f6cc576ceb52e4fba400b83841eb47d35cbbf8b manifest-psc-v1.0.16.toml 9bd043382ad5c7cdb8f00a66e401a6c4b88e8d588915f304d2c261ea7df4d1b5 manifest-sidecar-v1.0.16.toml From 26aba4500c33de41552e6d99befc8babb315f21f Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Wed, 15 May 2024 12:08:43 -0400 Subject: [PATCH 11/37] Enable the oximeter producer garbage collection RPW (#5764) Closes #5284. --- dev-tools/omdb/tests/successes.out | 2 +- .../src/app/background/metrics_producer_gc.rs | 41 +++---------------- test-utils/src/dev/test_cmds.rs | 25 +++++++++-- 3 files changed, 27 insertions(+), 41 deletions(-) diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 0aa47f27126..8c68b0f4315 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -426,7 +426,7 @@ task: "metrics_producer_gc" currently executing: no last completed activation: , triggered by an explicit signal started at (s ago) and ran for ms - last completion reported error: metric producer gc disabled (omicron#5284) +warning: unknown background task: "metrics_producer_gc" (don't know how to interpret details: Object {"expiration": String(""), "pruned": Array []}) task: "phantom_disks" configured period: every 30s diff --git a/nexus/src/app/background/metrics_producer_gc.rs b/nexus/src/app/background/metrics_producer_gc.rs index 1e3b0702496..a3e5aaab32d 100644 --- a/nexus/src/app/background/metrics_producer_gc.rs +++ b/nexus/src/app/background/metrics_producer_gc.rs @@ -22,33 +22,14 @@ use std::time::Duration; pub struct MetricProducerGc { datastore: Arc, lease_duration: Duration, - disabled: bool, } impl MetricProducerGc { pub fn new(datastore: Arc, lease_duration: Duration) -> Self { - Self { - datastore, - lease_duration, - // TODO We should turn this task on as a part of landing the rest of - // the move to metric producer leases. For now, we leave it disabled - // to avoid pruning producers that don't know to renew leases, but - // make this a boolean so our unit test can enable it. - disabled: true, - } + Self { datastore, lease_duration } } async fn activate(&mut self, opctx: &OpContext) -> serde_json::Value { - if self.disabled { - warn!( - opctx.log, - "Metric producer GC: statically disabled pending omicron#5284" - ); - return json!({ - "error": "metric producer gc disabled (omicron#5284)", - }); - } - let Some(expiration) = TimeDelta::from_std(self.lease_duration) .ok() .and_then(|delta| Utc::now().checked_sub_signed(delta)) @@ -219,25 +200,13 @@ mod tests { .await .expect("failed to insert producer"); - // Activate the task. It should immediately return because our GC is - // currently statically disabled (remove this check once that is no - // longer true!). + // Create the task and activate it. Technically this is racy in that it + // could prune the producer we just added, but if it's been an hour + // since then, we have bigger problems. This should _not_ prune the + // producer, since it's been active within the last hour. let mut gc = MetricProducerGc::new(datastore.clone(), Duration::from_secs(3600)); let value = gc.activate(&opctx).await; - assert_eq!( - value, - json!({ - "error": "metric producer gc disabled (omicron#5284)", - }) - ); - - // Enable the task and activate it. Technically this is racy, but if - // it's been an hour since we inserted the producer in the previous - // statement, we have bigger problems. This should _not_ prune the - // producer, since it's been active within the last hour. - gc.disabled = false; - let value = gc.activate(&opctx).await; let value = value.as_object().expect("non-object"); assert!(!value.contains_key("failures")); assert!(value.contains_key("expiration")); diff --git a/test-utils/src/dev/test_cmds.rs b/test-utils/src/dev/test_cmds.rs index 51ade208f85..3c675ddfd99 100644 --- a/test-utils/src/dev/test_cmds.rs +++ b/test-utils/src/dev/test_cmds.rs @@ -160,10 +160,27 @@ pub fn redact_variable(input: &str) -> String { .replace_all(&s, "") .to_string(); - let s = regex::Regex::new(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z") - .unwrap() - .replace_all(&s, "") - .to_string(); + let s = { + let mut new_s = String::with_capacity(s.len()); + let mut last_match = 0; + for m in regex::Regex::new(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+Z") + .unwrap() + .find_iter(&s) + { + new_s.push_str(&s[last_match..m.start()]); + new_s.push_str("", so this subtraction can't + // underflow. Insert spaces to match widths. + for _ in 0..(m.len() - "".len()) { + new_s.push(' '); + } + new_s.push_str("TIMESTAMP>"); + last_match = m.end(); + } + new_s.push_str(&s[last_match..]); + new_s + }; // Replace formatted durations. These are pretty specific to the background // task output. From 30f56eda4c41d3841f583e5cbdc8a7fab9006a1a Mon Sep 17 00:00:00 2001 From: bnaecker Date: Wed, 15 May 2024 11:36:19 -0700 Subject: [PATCH 12/37] Remove unused `base_route` field from oximeter producer info (#5769) - Remove from DB schema, add migration to drop the column - Remove all references, except in the timeseries schema reporting oximeter self-statistics. That needs to wait for functionality to update timeseries schema. - Fixes #5658 --- clients/nexus-client/src/lib.rs | 2 -- clients/oximeter-client/src/lib.rs | 1 - common/src/api/internal/nexus.rs | 10 ---------- dev-tools/omdb/src/bin/omdb/oximeter.rs | 2 -- nexus/db-model/src/producer_endpoint.rs | 8 -------- nexus/db-model/src/schema.rs | 1 - nexus/db-model/src/schema_versions.rs | 3 ++- nexus/db-queries/src/db/datastore/oximeter.rs | 2 -- nexus/metrics-producer-gc/src/lib.rs | 2 -- .../src/app/background/metrics_producer_gc.rs | 1 - nexus/src/lib.rs | 2 -- nexus/test-utils/src/lib.rs | 1 - openapi/nexus-internal.json | 5 ----- openapi/oximeter.json | 5 ----- oximeter/collector/src/agent.rs | 19 +++---------------- oximeter/collector/src/self_stats.rs | 14 +++++++++++--- oximeter/producer/examples/producer.rs | 1 - oximeter/producer/src/lib.rs | 10 ---------- schema/crdb/dbinit.sql | 4 +--- .../remove-producer-base-route-column/up.sql | 1 + sled-agent/src/metrics.rs | 1 - sled-agent/src/sim/disk.rs | 1 - 22 files changed, 18 insertions(+), 78 deletions(-) create mode 100644 schema/crdb/remove-producer-base-route-column/up.sql diff --git a/clients/nexus-client/src/lib.rs b/clients/nexus-client/src/lib.rs index 92cc3ff27e7..ae8f0c93db7 100644 --- a/clients/nexus-client/src/lib.rs +++ b/clients/nexus-client/src/lib.rs @@ -232,7 +232,6 @@ impl From<&omicron_common::api::internal::nexus::ProducerEndpoint> ) -> Self { Self { address: s.address.to_string(), - base_route: s.base_route.clone(), id: s.id, kind: s.kind.into(), interval: s.interval.into(), @@ -415,7 +414,6 @@ impl TryFrom id: ep.id, kind: ep.kind.into(), address, - base_route: ep.base_route, interval: ep.interval.into(), }) } diff --git a/clients/oximeter-client/src/lib.rs b/clients/oximeter-client/src/lib.rs index 11aa1452f82..74fc6968e89 100644 --- a/clients/oximeter-client/src/lib.rs +++ b/clients/oximeter-client/src/lib.rs @@ -41,7 +41,6 @@ impl From<&omicron_common::api::internal::nexus::ProducerEndpoint> ) -> Self { Self { address: s.address.to_string(), - base_route: s.base_route.clone(), id: s.id, kind: s.kind.into(), interval: s.interval.into(), diff --git a/common/src/api/internal/nexus.rs b/common/src/api/internal/nexus.rs index 5a44921c26a..20516e702ba 100644 --- a/common/src/api/internal/nexus.rs +++ b/common/src/api/internal/nexus.rs @@ -110,20 +110,10 @@ pub struct ProducerEndpoint { /// The IP address and port at which `oximeter` can collect metrics from the /// producer. pub address: SocketAddr, - /// NOTE: This field is deprecated, and will be ignored. It will be removed - /// in future releases. - pub base_route: String, /// The interval on which `oximeter` should collect metrics. pub interval: Duration, } -impl ProducerEndpoint { - /// Return the route that can be used to request metric data. - pub fn collection_route(&self) -> String { - format!("{}/{}", &self.base_route, &self.id) - } -} - /// Response to a successful producer registration. #[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] pub struct ProducerRegistrationResponse { diff --git a/dev-tools/omdb/src/bin/omdb/oximeter.rs b/dev-tools/omdb/src/bin/omdb/oximeter.rs index 573a91a0935..a6dc2ce0115 100644 --- a/dev-tools/omdb/src/bin/omdb/oximeter.rs +++ b/dev-tools/omdb/src/bin/omdb/oximeter.rs @@ -90,7 +90,6 @@ impl OximeterArgs { struct Producer { id: Uuid, address: SocketAddr, - base_route: String, interval: String, } @@ -100,7 +99,6 @@ impl From for Producer { Self { id: p.id, address: p.address.parse().unwrap(), - base_route: p.base_route, interval: humantime::format_duration(interval).to_string(), } } diff --git a/nexus/db-model/src/producer_endpoint.rs b/nexus/db-model/src/producer_endpoint.rs index aea087360bc..74a7356adbd 100644 --- a/nexus/db-model/src/producer_endpoint.rs +++ b/nexus/db-model/src/producer_endpoint.rs @@ -53,7 +53,6 @@ impl From for internal::nexus::ProducerEndpoint { id: ep.id(), kind: ep.kind.into(), address: SocketAddr::new(ep.ip.ip(), *ep.port), - base_route: ep.base_route.clone(), interval: Duration::from_secs_f64(ep.interval), } } @@ -71,7 +70,6 @@ pub struct ProducerEndpoint { pub ip: ipnetwork::IpNetwork, pub port: SqlU16, pub interval: f64, - pub base_route: String, pub oximeter_id: Uuid, } @@ -87,14 +85,8 @@ impl ProducerEndpoint { kind: endpoint.kind.into(), ip: endpoint.address.ip().into(), port: endpoint.address.port().into(), - base_route: endpoint.base_route.clone(), interval: endpoint.interval.as_secs_f64(), oximeter_id, } } - - /// Return the route that can be used to request metric data. - pub fn collection_route(&self) -> String { - format!("{}/{}", &self.base_route, self.id()) - } } diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 3d16b978f62..224c461da0a 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -463,7 +463,6 @@ table! { ip -> Inet, port -> Int4, interval -> Float8, - base_route -> Text, oximeter_id -> Uuid, } } diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index afdf91074e5..cb229274fea 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -17,7 +17,7 @@ use std::collections::BTreeMap; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(62, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(63, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -29,6 +29,7 @@ static KNOWN_VERSIONS: Lazy> = Lazy::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(63, "remove-producer-base-route-column"), KnownVersion::new(62, "allocate-subnet-decommissioned-sleds"), KnownVersion::new(61, "blueprint-add-sled-state"), KnownVersion::new(60, "add-lookup-vmm-by-sled-id-index"), diff --git a/nexus/db-queries/src/db/datastore/oximeter.rs b/nexus/db-queries/src/db/datastore/oximeter.rs index 9ac16eafac6..1aa3435cb6c 100644 --- a/nexus/db-queries/src/db/datastore/oximeter.rs +++ b/nexus/db-queries/src/db/datastore/oximeter.rs @@ -109,7 +109,6 @@ impl DataStore { dsl::ip.eq(producer.ip), dsl::port.eq(producer.port), dsl::interval.eq(producer.interval), - dsl::base_route.eq(producer.base_route.clone()), )) .execute_async(&*self.pool_connection_authorized(opctx).await?) .await @@ -297,7 +296,6 @@ mod tests { id: Uuid::new_v4(), kind: nexus::ProducerKind::Service, address: "[::1]:0".parse().unwrap(), // unused - base_route: "/".to_string(), // unused interval: Duration::from_secs(0), // unused }, collector_info.id, diff --git a/nexus/metrics-producer-gc/src/lib.rs b/nexus/metrics-producer-gc/src/lib.rs index ba2cd0460bc..4ed8f1bbb53 100644 --- a/nexus/metrics-producer-gc/src/lib.rs +++ b/nexus/metrics-producer-gc/src/lib.rs @@ -244,7 +244,6 @@ mod tests { id: Uuid::new_v4(), kind: nexus::ProducerKind::Service, address: "[::1]:0".parse().unwrap(), // unused - base_route: "/".to_string(), // unused interval: Duration::from_secs(0), // unused }, collector_info.id, @@ -327,7 +326,6 @@ mod tests { id: Uuid::new_v4(), kind: nexus::ProducerKind::Service, address: "[::1]:0".parse().unwrap(), // unused - base_route: "/".to_string(), // unused interval: Duration::from_secs(0), // unused }, collector_info.id, diff --git a/nexus/src/app/background/metrics_producer_gc.rs b/nexus/src/app/background/metrics_producer_gc.rs index a3e5aaab32d..2a8464b80f5 100644 --- a/nexus/src/app/background/metrics_producer_gc.rs +++ b/nexus/src/app/background/metrics_producer_gc.rs @@ -190,7 +190,6 @@ mod tests { id: Uuid::new_v4(), kind: nexus::ProducerKind::Service, address: "[::1]:0".parse().unwrap(), // unused - base_route: "/".to_string(), // unused interval: Duration::from_secs(0), // unused }, collector_info.id, diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index e1b327de913..6a23048693b 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -468,8 +468,6 @@ fn start_producer_server( id: registry.producer_id(), kind: ProducerKind::Service, address, - // NOTE: This is now unused, and will be removed in the future. - base_route: String::new(), interval: std::time::Duration::from_secs(10), }, // Some(_) here prevents DNS resolution, using our own address to diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 8bbb6ef38c7..a078ce2a61f 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -1468,7 +1468,6 @@ pub fn start_producer_server( id, kind: ProducerKind::Service, address: producer_address, - base_route: String::new(), // Unused, will be removed. interval: Duration::from_secs(1), }; let config = oximeter_producer::Config { diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index d694e5ee0de..5ec8e584175 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -3894,10 +3894,6 @@ "description": "The IP address and port at which `oximeter` can collect metrics from the producer.", "type": "string" }, - "base_route": { - "description": "NOTE: This field is deprecated, and will be ignored. It will be removed in future releases.", - "type": "string" - }, "id": { "description": "A unique ID for this producer.", "type": "string", @@ -3922,7 +3918,6 @@ }, "required": [ "address", - "base_route", "id", "interval", "kind" diff --git a/openapi/oximeter.json b/openapi/oximeter.json index 3fa58426c3f..c567e9421d9 100644 --- a/openapi/oximeter.json +++ b/openapi/oximeter.json @@ -200,10 +200,6 @@ "description": "The IP address and port at which `oximeter` can collect metrics from the producer.", "type": "string" }, - "base_route": { - "description": "NOTE: This field is deprecated, and will be ignored. It will be removed in future releases.", - "type": "string" - }, "id": { "description": "A unique ID for this producer.", "type": "string", @@ -228,7 +224,6 @@ }, "required": [ "address", - "base_route", "id", "interval", "kind" diff --git a/oximeter/collector/src/agent.rs b/oximeter/collector/src/agent.rs index a86b528cd53..5da9a1dfa8a 100644 --- a/oximeter/collector/src/agent.rs +++ b/oximeter/collector/src/agent.rs @@ -80,11 +80,7 @@ async fn perform_collection( ) { debug!(log, "collecting from producer"); let res = client - .get(format!( - "http://{}{}", - producer.address, - producer.collection_route() - )) + .get(format!("http://{}/{}", producer.address, producer.id,)) .send() .await; match res { @@ -155,10 +151,7 @@ async fn collection_task( mut inbox: mpsc::Receiver, outbox: mpsc::Sender<(Option, ProducerResults)>, ) { - let mut log = orig_log.new(o!( - "route" => producer.collection_route(), - "address" => producer.address, - )); + let mut log = orig_log.new(o!("address" => producer.address)); let client = reqwest::Client::new(); let mut collection_timer = interval(producer.interval); collection_timer.tick().await; // completes immediately @@ -199,10 +192,7 @@ async fn collection_task( ); // Update the logger with the new information as well. - log = orig_log.new(o!( - "route" => producer.collection_route(), - "address" => producer.address, - )); + log = orig_log.new(o!("address" => producer.address)); collection_timer = interval(producer.interval); collection_timer.tick().await; // completes immediately } @@ -940,7 +930,6 @@ mod tests { id: Uuid::new_v4(), kind: ProducerKind::Service, address, - base_route: String::from("/"), interval: COLLECTION_INTERVAL, }; collector @@ -1009,7 +998,6 @@ mod tests { 0, 0, )), - base_route: String::from("/"), interval: COLLECTION_INTERVAL, }; collector @@ -1089,7 +1077,6 @@ mod tests { id: Uuid::new_v4(), kind: ProducerKind::Service, address, - base_route: String::from("/"), interval: COLLECTION_INTERVAL, }; collector diff --git a/oximeter/collector/src/self_stats.rs b/oximeter/collector/src/self_stats.rs index b72c21d0e37..ab9e5bedf4b 100644 --- a/oximeter/collector/src/self_stats.rs +++ b/oximeter/collector/src/self_stats.rs @@ -46,6 +46,10 @@ pub struct Collections { /// The base route in the producer server used to collect metrics. /// /// The full route is `{base_route}/{producer_id}`. + /// + // TODO-cleanup: This is no longer relevant, but removing it entirely + // relies on nonexistent functionality for updating timeseries schema. When + // that lands, we should remove this. pub base_route: String, pub datum: Cumulative, } @@ -98,6 +102,10 @@ pub struct FailedCollections { /// The base route in the producer server used to collect metrics. /// /// The full route is `{base_route}/{producer_id}`. + /// + // TODO-cleanup: This is no longer relevant, but removing it entirely + // relies on nonexistent functionality for updating timeseries schema. When + // that lands, we should remove this. pub base_route: String, /// The reason we could not collect. // @@ -125,7 +133,7 @@ impl CollectionTaskStats { producer_id: producer.id, producer_ip: producer.address.ip(), producer_port: producer.address.port(), - base_route: producer.base_route.clone(), + base_route: String::new(), datum: Cumulative::new(0), }, failed_collections: BTreeMap::new(), @@ -203,7 +211,7 @@ mod tests { producer_id: uuid::uuid!("718452ab-7cca-42f6-b8b1-1aaaa1b09104"), producer_ip: IpAddr::V6(Ipv6Addr::LOCALHOST), producer_port: 12345, - base_route: String::from("/"), + base_route: String::new(), datum: Cumulative::new(0), } } @@ -213,7 +221,7 @@ mod tests { producer_id: uuid::uuid!("718452ab-7cca-42f6-b8b1-1aaaa1b09104"), producer_ip: IpAddr::V6(Ipv6Addr::LOCALHOST), producer_port: 12345, - base_route: String::from("/"), + base_route: String::new(), reason: FailureReason::Unreachable.as_string(), datum: Cumulative::new(0), } diff --git a/oximeter/producer/examples/producer.rs b/oximeter/producer/examples/producer.rs index f5b00526b90..87748dd12d8 100644 --- a/oximeter/producer/examples/producer.rs +++ b/oximeter/producer/examples/producer.rs @@ -120,7 +120,6 @@ async fn main() -> anyhow::Result<()> { id: registry.producer_id(), kind: ProducerKind::Service, address: args.address, - base_route: "/collect".to_string(), interval: Duration::from_secs(10), }; let config = Config { diff --git a/oximeter/producer/src/lib.rs b/oximeter/producer/src/lib.rs index 5e9e576b12b..6bf8954ae06 100644 --- a/oximeter/producer/src/lib.rs +++ b/oximeter/producer/src/lib.rs @@ -210,14 +210,6 @@ impl Server { return Err(Error::UuidMismatch); } - // Overwrite any provided base_route. - // - // This will be removed in future releases, as users no longer have or - // need any control over the route the producer server exposes. - // - // TODO-cleanup: Remove this field entirely. - server_info.base_route = String::from("/"); - // Build the logger / server. let log = Self::build_logger(log)?; let dropshot = ConfigDropshot { @@ -284,7 +276,6 @@ impl Server { info!( log, "starting oximeter metric producer server"; - "route" => server_info.collection_route(), "producer_id" => ?registry.producer_id(), "address" => server.local_addr(), "interval" => ?server_info.interval, @@ -542,7 +533,6 @@ mod tests { id: Uuid::new_v4(), kind: ProducerKind::Service, address, - base_route: String::new(), interval: Duration::from_secs(10), }, registration_address: Some(fake_nexus.local_addr()), diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index fa0c74aac28..cc298e4565a 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1294,8 +1294,6 @@ CREATE TABLE IF NOT EXISTS omicron.public.metric_producer ( ip INET NOT NULL, port INT4 CHECK (port BETWEEN 0 AND 65535) NOT NULL, interval FLOAT NOT NULL, - /* TODO: Is this length appropriate? */ - base_route STRING(512) NOT NULL, /* Oximeter collector instance to which this metric producer is assigned. */ oximeter_id UUID NOT NULL ); @@ -3861,7 +3859,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '62.0.0', NULL) + (TRUE, NOW(), NOW(), '63.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/remove-producer-base-route-column/up.sql b/schema/crdb/remove-producer-base-route-column/up.sql new file mode 100644 index 00000000000..90798f031ab --- /dev/null +++ b/schema/crdb/remove-producer-base-route-column/up.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.metric_producer DROP COLUMN IF EXISTS base_route; diff --git a/sled-agent/src/metrics.rs b/sled-agent/src/metrics.rs index 64c5a806b48..62eaaf61545 100644 --- a/sled-agent/src/metrics.rs +++ b/sled-agent/src/metrics.rs @@ -169,7 +169,6 @@ fn start_producer_server( id: registry.producer_id(), kind: ProducerKind::SledAgent, address, - base_route: String::new(), // Unused, will be removed. interval: METRIC_COLLECTION_INTERVAL, }, registration_address, diff --git a/sled-agent/src/sim/disk.rs b/sled-agent/src/sim/disk.rs index 6e51e21311f..284e424ebff 100644 --- a/sled-agent/src/sim/disk.rs +++ b/sled-agent/src/sim/disk.rs @@ -170,7 +170,6 @@ impl SimDisk { id, kind: ProducerKind::SledAgent, address: producer_address, - base_route: String::new(), // Unused, will be removed. interval: Duration::from_millis(200), }; let config = oximeter_producer::Config { From 9ddc97c2fdee1f232e192500efc6f2e3e5711814 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 15 May 2024 15:54:52 -0400 Subject: [PATCH 13/37] Bump RoT, Sidecar, and PSC (#5778) --- tools/permslip_production | 2 +- tools/permslip_staging | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/permslip_production b/tools/permslip_production index 2cf844d9d35..c384c2cd513 100644 --- a/tools/permslip_production +++ b/tools/permslip_production @@ -1 +1 @@ -75bf4467effc6077958c926c19fe83c05a09b02795d4b0b6ad9191ed93a6d5b9 manifest-oxide-rot-1-v1.0.10.toml +3feecf35522ecc07a23fbe934c752ecbf248672ce55c29102de485927edc12e6 manifest-oxide-rot-1-v1.0.11.toml diff --git a/tools/permslip_staging b/tools/permslip_staging index 683cfc0ec90..a3f491210ad 100644 --- a/tools/permslip_staging +++ b/tools/permslip_staging @@ -1,4 +1,4 @@ a90127c6098a99100413f0f0d4eb97c96f825cff11a92c41b8b1c2927977b17a manifest-gimlet-v1.0.18.toml -e34b2f363ed0e1399e175bfae9e5e50217255c7984154697180d8a2d4611f65d manifest-oxide-rot-1-v1.0.10.toml -b56e35fae0f4ed9e84e4e4d40f6cc576ceb52e4fba400b83841eb47d35cbbf8b manifest-psc-v1.0.16.toml -9bd043382ad5c7cdb8f00a66e401a6c4b88e8d588915f304d2c261ea7df4d1b5 manifest-sidecar-v1.0.16.toml +b973cc9feb20f7bba447e7f5291c4070387fa9992deab81301f67f0a3844cd0c manifest-oxide-rot-1-v1.0.11.toml +aae829e02d79ec0fe19019c783b6426c6fcc1fe4427aea70b65afc2884f53db8 manifest-psc-v1.0.17.toml +16992e82dff635eda1e065e0f6e325c795b6e90c879c7442ae062c063940a60a manifest-sidecar-v1.0.17.toml From e0a118496b05a30bf7a5a8a0820e0dda415c485a Mon Sep 17 00:00:00 2001 From: iliana etaoin Date: Wed, 15 May 2024 13:21:17 -0700 Subject: [PATCH 14/37] move the verify-libraries CI check into releng (#5779) In the context of the build-and-test job, `cargo xtask verify-libraries` is a bit of an unrelated operation, because it's building all of the binaries in between building all of the test binaries and then running all of the test binaries. I had initially considered moving it to its own Buildomat job, but then realized that this is really a release engineering check; we already have built most of the binaries (in particular, Nexus, the one that takes the longest to build) in order to package them up, so we can manage to build the handful of extra utility binaries and perform the checks while the host OS image builds. This should improve the **build-and-test (helios)** job by about 10 minutes while having negligible impact on the **tuf-repo** job, and will also eliminate a new and exciting source of test flake we've seen where `timeout 10m` is getting hit, presumably because we continue to write software. --- .github/buildomat/build-and-test.sh | 13 ------- .github/buildomat/jobs/a4x2-deploy.sh | 1 - .github/buildomat/jobs/a4x2-prepare.sh | 2 +- .../buildomat/jobs/build-and-test-helios.sh | 2 +- .../buildomat/jobs/build-and-test-linux.sh | 2 +- .github/buildomat/jobs/clippy.sh | 2 +- dev-tools/releng/src/main.rs | 13 ++++++- dev-tools/xtask/src/main.rs | 34 ++++++++++++------- dev-tools/xtask/src/verify_libraries.rs | 27 ++++++++++++--- dev-tools/xtask/src/virtual_hardware_stub.rs | 13 ------- 10 files changed, 59 insertions(+), 50 deletions(-) delete mode 100644 dev-tools/xtask/src/virtual_hardware_stub.rs diff --git a/.github/buildomat/build-and-test.sh b/.github/buildomat/build-and-test.sh index f9798b4ddd3..cc344522db3 100755 --- a/.github/buildomat/build-and-test.sh +++ b/.github/buildomat/build-and-test.sh @@ -76,19 +76,6 @@ export RUSTC_BOOTSTRAP=1 # We report build progress to stderr, and the "--timings=json" output goes to stdout. ptime -m cargo build -Z unstable-options --timings=json --workspace --tests --locked --verbose 1> "$OUTPUT_DIR/crate-build-timings.json" -# If we are running on illumos we want to verify that we are not requiring -# system libraries outside of specific binaries. If we encounter this situation -# we bail. -# NB: `cargo xtask verify-libraries` runs `cargo build --bins` to ensure it can -# check the final executables. -if [[ $target_os == "illumos" ]]; then - banner verify-libraries - # This has a separate timeout from `cargo nextest` since `timeout` expects - # to run an external command and therefore we cannot run bash functions or - # subshells. - ptime -m timeout 10m cargo xtask verify-libraries -fi - # # We apply our own timeout to ensure that we get a normal failure on timeout # rather than a buildomat timeout. See oxidecomputer/buildomat#8. diff --git a/.github/buildomat/jobs/a4x2-deploy.sh b/.github/buildomat/jobs/a4x2-deploy.sh index 8f0f24c8d16..323b3e2e28a 100755 --- a/.github/buildomat/jobs/a4x2-deploy.sh +++ b/.github/buildomat/jobs/a4x2-deploy.sh @@ -3,7 +3,6 @@ #: name = "a4x2-deploy" #: variety = "basic" #: target = "lab-2.0-opte-0.27" -#: rust_toolchain = "stable" #: output_rules = [ #: "/out/falcon/*.log", #: "/out/falcon/*.err", diff --git a/.github/buildomat/jobs/a4x2-prepare.sh b/.github/buildomat/jobs/a4x2-prepare.sh index bc88ddd4c04..1e603fc7d9b 100755 --- a/.github/buildomat/jobs/a4x2-prepare.sh +++ b/.github/buildomat/jobs/a4x2-prepare.sh @@ -3,7 +3,7 @@ #: name = "a4x2-prepare" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "stable" +#: rust_toolchain = "1.77.2" #: output_rules = [ #: "=/out/cargo-bay-ce.tgz", #: "=/out/cargo-bay-cr1.tgz", diff --git a/.github/buildomat/jobs/build-and-test-helios.sh b/.github/buildomat/jobs/build-and-test-helios.sh index cfcbb61475f..a4cbd978a99 100755 --- a/.github/buildomat/jobs/build-and-test-helios.sh +++ b/.github/buildomat/jobs/build-and-test-helios.sh @@ -3,7 +3,7 @@ #: name = "build-and-test (helios)" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.72.1" +#: rust_toolchain = "1.77.2" #: output_rules = [ #: "%/work/*", #: "%/var/tmp/omicron_tmp/*", diff --git a/.github/buildomat/jobs/build-and-test-linux.sh b/.github/buildomat/jobs/build-and-test-linux.sh index 22332ce65c8..f10f07ff7ad 100755 --- a/.github/buildomat/jobs/build-and-test-linux.sh +++ b/.github/buildomat/jobs/build-and-test-linux.sh @@ -3,7 +3,7 @@ #: name = "build-and-test (ubuntu-22.04)" #: variety = "basic" #: target = "ubuntu-22.04" -#: rust_toolchain = "1.72.1" +#: rust_toolchain = "1.77.2" #: output_rules = [ #: "%/work/*", #: "%/var/tmp/omicron_tmp/*", diff --git a/.github/buildomat/jobs/clippy.sh b/.github/buildomat/jobs/clippy.sh index abbcda21507..a5007694ab6 100755 --- a/.github/buildomat/jobs/clippy.sh +++ b/.github/buildomat/jobs/clippy.sh @@ -3,7 +3,7 @@ #: name = "clippy (helios)" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.72.1" +#: rust_toolchain = "1.77.2" #: output_rules = [] # Run clippy on illumos (not just other systems) because a bunch of our code diff --git a/dev-tools/releng/src/main.rs b/dev-tools/releng/src/main.rs index 0fa43829313..f382f5f222e 100644 --- a/dev-tools/releng/src/main.rs +++ b/dev-tools/releng/src/main.rs @@ -544,7 +544,18 @@ async fn main() -> Result<()> { jobs.select("host-image").after("host-profile"); stamp_packages!("tuf-stamp", Target::Host, TUF_PACKAGES) - .after("host-stamp"); + .after("host-stamp") + .after("recovery-stamp"); + + // Run `cargo xtask verify-libraries --release`. (This was formerly run in + // the build-and-test Buildomat job, but this fits better here where we've + // already built most of the binaries.) + jobs.push_command( + "verify-libraries", + Command::new(&cargo).args(["xtask", "verify-libraries", "--release"]), + ) + .after("host-package") + .after("recovery-package"); for (name, base_url) in [ ("staging", "https://permslip-staging.corp.oxide.computer"), diff --git a/dev-tools/xtask/src/main.rs b/dev-tools/xtask/src/main.rs index dd090943a23..56d01d0ff0e 100644 --- a/dev-tools/xtask/src/main.rs +++ b/dev-tools/xtask/src/main.rs @@ -12,14 +12,11 @@ use clap::{Parser, Subcommand}; mod check_workspace_deps; mod clippy; + #[cfg(target_os = "illumos")] mod verify_libraries; - #[cfg(target_os = "illumos")] mod virtual_hardware; -#[cfg(not(target_os = "illumos"))] -#[path = "virtual_hardware_stub.rs"] -mod virtual_hardware; #[derive(Parser)] #[command(name = "cargo xtask", about = "Workspace-related developer tools")] @@ -35,11 +32,21 @@ enum Cmds { CheckWorkspaceDeps, /// Run configured clippy checks Clippy(clippy::ClippyArgs), + /// Verify we are not leaking library bindings outside of intended /// crates - VerifyLibraries, + #[cfg(target_os = "illumos")] + VerifyLibraries(verify_libraries::Args), /// Manage virtual hardware + #[cfg(target_os = "illumos")] VirtualHardware(virtual_hardware::Args), + + /// (this command is only available on illumos) + #[cfg(not(target_os = "illumos"))] + VerifyLibraries, + /// (this command is only available on illumos) + #[cfg(not(target_os = "illumos"))] + VirtualHardware, } fn main() -> Result<()> { @@ -47,15 +54,16 @@ fn main() -> Result<()> { match args.cmd { Cmds::Clippy(args) => clippy::run_cmd(args), Cmds::CheckWorkspaceDeps => check_workspace_deps::run_cmd(), - Cmds::VerifyLibraries => { - #[cfg(target_os = "illumos")] - return verify_libraries::run_cmd(); - #[cfg(not(target_os = "illumos"))] - unimplemented!( - "Library verification is only available on illumos!" - ); - } + + #[cfg(target_os = "illumos")] + Cmds::VerifyLibraries(args) => verify_libraries::run_cmd(args), + #[cfg(target_os = "illumos")] Cmds::VirtualHardware(args) => virtual_hardware::run_cmd(args), + + #[cfg(not(target_os = "illumos"))] + Cmds::VerifyLibraries | Cmds::VirtualHardware => { + anyhow::bail!("this command is only available on illumos"); + } } } diff --git a/dev-tools/xtask/src/verify_libraries.rs b/dev-tools/xtask/src/verify_libraries.rs index 72aa622a07f..af2b87daf17 100644 --- a/dev-tools/xtask/src/verify_libraries.rs +++ b/dev-tools/xtask/src/verify_libraries.rs @@ -5,6 +5,7 @@ use anyhow::{bail, Context, Result}; use camino::Utf8Path; use cargo_metadata::Message; +use clap::Parser; use fs_err as fs; use serde::Deserialize; use std::{ @@ -16,6 +17,13 @@ use swrite::{swriteln, SWrite}; use crate::load_workspace; +#[derive(Parser)] +pub struct Args { + /// Build in release mode + #[clap(long)] + release: bool, +} + #[derive(Deserialize, Debug)] struct LibraryConfig { binary_allow_list: Option>, @@ -83,20 +91,29 @@ fn verify_executable( Ok(()) } -pub fn run_cmd() -> Result<()> { + +pub fn run_cmd(args: Args) -> Result<()> { let metadata = load_workspace()?; let mut config_path = metadata.workspace_root; config_path.push(".cargo/xtask.toml"); let config = read_xtask_toml(&config_path)?; let cargo = std::env::var("CARGO").unwrap_or_else(|_| "cargo".to_string()); - let mut command = Command::new(cargo) - .args(["build", "--bins", "--message-format=json-render-diagnostics"]) + let mut command = Command::new(cargo); + command.args([ + "build", + "--bins", + "--message-format=json-render-diagnostics", + ]); + if args.release { + command.arg("--release"); + } + let mut child = command .stdout(Stdio::piped()) .spawn() .context("failed to spawn cargo build")?; - let reader = BufReader::new(command.stdout.take().context("take stdout")?); + let reader = BufReader::new(child.stdout.take().context("take stdout")?); let mut errors = Default::default(); for message in cargo_metadata::Message::parse_stream(reader) { @@ -108,7 +125,7 @@ pub fn run_cmd() -> Result<()> { } } - let status = command.wait()?; + let status = child.wait()?; if !status.success() { bail!("Failed to execute cargo build successfully {}", status); } diff --git a/dev-tools/xtask/src/virtual_hardware_stub.rs b/dev-tools/xtask/src/virtual_hardware_stub.rs deleted file mode 100644 index 62c0d0b0300..00000000000 --- a/dev-tools/xtask/src/virtual_hardware_stub.rs +++ /dev/null @@ -1,13 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -use anyhow::{bail, Result}; -use clap::Parser; - -#[derive(Parser)] -pub struct Args {} - -pub fn run_cmd(_args: Args) -> Result<()> { - bail!("Virtual hardware only available on illumos") -} From 509bf0727e8bb98d88ed7346ce6469d70bd3c848 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 15 May 2024 21:36:27 -0400 Subject: [PATCH 15/37] Bump gimlet and sidecar versions (#5781) This picks up a new FPGA update --- tools/permslip_staging | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/permslip_staging b/tools/permslip_staging index a3f491210ad..33b8b8c0271 100644 --- a/tools/permslip_staging +++ b/tools/permslip_staging @@ -1,4 +1,4 @@ -a90127c6098a99100413f0f0d4eb97c96f825cff11a92c41b8b1c2927977b17a manifest-gimlet-v1.0.18.toml +03df89d44ad8b653abbeb7fbb83821869f008733e9da946457e72a13cb11d6cc manifest-gimlet-v1.0.19.toml b973cc9feb20f7bba447e7f5291c4070387fa9992deab81301f67f0a3844cd0c manifest-oxide-rot-1-v1.0.11.toml aae829e02d79ec0fe19019c783b6426c6fcc1fe4427aea70b65afc2884f53db8 manifest-psc-v1.0.17.toml -16992e82dff635eda1e065e0f6e325c795b6e90c879c7442ae062c063940a60a manifest-sidecar-v1.0.17.toml +5fc2aca37c2165c57cef4321fbaa4fe03ff38dcd992b6d6a076f54c167e0ad9f manifest-sidecar-v1.0.18.toml From c3f85318b6be6cab2bee233d6f0a90dfb4b14253 Mon Sep 17 00:00:00 2001 From: Ryan Goodfellow Date: Thu, 16 May 2024 10:10:36 -0700 Subject: [PATCH 16/37] tests: fix mgd and dpd tcp port regex (#5782) --- test-utils/src/dev/dendrite.rs | 9 ++++-- test-utils/src/dev/maghemite.rs | 50 +++++++++++++++++++++++++++++++-- 2 files changed, 54 insertions(+), 5 deletions(-) diff --git a/test-utils/src/dev/dendrite.rs b/test-utils/src/dev/dendrite.rs index 8938595aa23..22f3f696011 100644 --- a/test-utils/src/dev/dendrite.rs +++ b/test-utils/src/dev/dendrite.rs @@ -139,8 +139,8 @@ async fn discover_port(logfile: String) -> Result { async fn find_dendrite_port_in_log( logfile: String, ) -> Result { - let re = regex::Regex::new(r#""local_addr":"\[::1\]:?([0-9]+)""#).unwrap(); - let reader = BufReader::new(File::open(logfile).await?); + let re = regex::Regex::new(r#""local_addr":"\[::1\]:([0-9]+)""#).unwrap(); + let mut reader = BufReader::new(File::open(&logfile).await?); let mut lines = reader.lines(); loop { match lines.next_line().await? { @@ -155,6 +155,11 @@ async fn find_dendrite_port_in_log( } None => { sleep(Duration::from_millis(10)).await; + + // We might have gotten a partial line; close the file, reopen + // it, and start reading again from the beginning. + reader = BufReader::new(File::open(&logfile).await?); + lines = reader.lines(); } } } diff --git a/test-utils/src/dev/maghemite.rs b/test-utils/src/dev/maghemite.rs index 7cddb13cb2b..7e2d8953299 100644 --- a/test-utils/src/dev/maghemite.rs +++ b/test-utils/src/dev/maghemite.rs @@ -137,9 +137,8 @@ async fn discover_port(logfile: String) -> Result { } async fn find_mgd_port_in_log(logfile: String) -> Result { - let re = - regex::Regex::new(r#""local_addr":"\[::?(1)\]:?([0-9]+)""#).unwrap(); - let reader = BufReader::new(File::open(logfile).await?); + let re = regex::Regex::new(r#""local_addr":"\[::1?\]:([0-9]+)""#).unwrap(); + let mut reader = BufReader::new(File::open(&logfile).await?); let mut lines = reader.lines(); loop { match lines.next_line().await? { @@ -154,7 +153,52 @@ async fn find_mgd_port_in_log(logfile: String) -> Result { } None => { sleep(Duration::from_millis(10)).await; + + // We might have gotten a partial line; close the file, reopen + // it, and start reading again from the beginning. + reader = BufReader::new(File::open(&logfile).await?); + lines = reader.lines(); } } } } + +#[cfg(test)] +mod tests { + use super::find_mgd_port_in_log; + use std::io::Write; + use std::process::Stdio; + use tempfile::NamedTempFile; + + const EXPECTED_PORT: u16 = 4676; + + #[tokio::test] + async fn test_mgd_in_path() { + // With no arguments, we expect to see the default help message. + tokio::process::Command::new("mgd") + .stdin(Stdio::null()) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .spawn() + .expect("Cannot find 'mgd' on PATH. Refer to README.md for installation instructions"); + } + + #[tokio::test] + async fn test_discover_local_listening_port() { + // Write some data to a fake log file + // This line is representative of the kind of output that mgd currently logs + let line = r#"msg":"registered endpoint","v":0,"name":"slog-rs","level":20,"time":"2024-05-15T19:45:52.620737793-07:00","hostname":"masaka","pid":6854,"local_addr":"[::]:4676","unit":"api-server","path":"/bfd/peers","method":"GET"}"#; + let mut file = NamedTempFile::new().unwrap(); + writeln!(file, "A garbage line").unwrap(); + writeln!(file, "{}", line).unwrap(); + writeln!(file, "Another garbage line").unwrap(); + file.flush().unwrap(); + + assert_eq!( + find_mgd_port_in_log(file.path().display().to_string()) + .await + .unwrap(), + EXPECTED_PORT + ); + } +} From a583f7519a87b297c829ee05e45bc920da2546a4 Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Thu, 16 May 2024 14:13:36 -0400 Subject: [PATCH 17/37] [omdb] Show physical disks in blueprints and inventory (#5745) This PR changes the construction and display of blueprint diffs significantly. The need for the changes arose out of the tight coupling between zones and sleds. When adding disks to the mix we needed to also take them into account when determininig when a sled was modified. We also want to display physical disks as tables along with the zone tables under the sleds. This turned out to be somewhat trickier than necesary, and so I changed how the tables were constructed and rendered. Hopefully this will also make it easier to add new tables in the future. One possibly controversial change is that I changed the way zone modifications are rendered. They are no longer three lines long. Currently only `disposition` is allowed to change, and so I used an arrow from "old" to "new" versions. Additionally, I removed the (added/ modified/removed) suffixes as they seem redundant to me, make the lines longer, and make things harder to read IMO. I'm open to discussion about both of these changes. My guess is that eventually, we'll want to be able to do row filtering, and I plan to also add some colored output, also in this PR most likely. It should be noted that the output mechanism is decoupled from the representation of the tables, `BpSledSubtable`. This allows output in other formats if desired in the future. As for the inventory collection output, I added a requirement when filtering on sled-id, that we also filter on collection-id, because filtering on sled-id does a full table scan and requires a new index. I can add the index instead if we feel it's important. Fixes #5624 --- Cargo.lock | 2 +- clients/sled-agent-client/src/lib.rs | 2 +- common/src/disk.rs | 2 +- dev-tools/omdb/src/bin/omdb/db.rs | 73 + dev-tools/omdb/src/bin/omdb/nexus.rs | 2 +- dev-tools/omdb/tests/successes.out | 125 +- dev-tools/reconfigurator-cli/src/main.rs | 8 +- .../tests/output/cmd-stdout | 20 +- .../db-queries/src/db/datastore/deployment.rs | 6 +- .../planning/src/blueprint_builder/builder.rs | 89 +- .../planning/src/blueprint_builder/zones.rs | 78 +- nexus/reconfigurator/planning/src/example.rs | 1 + nexus/reconfigurator/planning/src/planner.rs | 245 ++-- nexus/reconfigurator/planning/src/system.rs | 5 +- .../output/blueprint_builder_initial_diff.txt | 164 ++- .../output/planner_basic_add_sled_2_3.txt | 197 ++- .../output/planner_basic_add_sled_3_5.txt | 217 ++- .../planner_decommissions_sleds_1_2.txt | 195 +-- .../planner_decommissions_sleds_bp2.txt | 156 ++- .../output/planner_nonprovisionable_1_2.txt | 305 ++-- .../output/planner_nonprovisionable_2_2a.txt | 315 +++-- .../output/planner_nonprovisionable_bp2.txt | 253 ++-- nexus/types/Cargo.toml | 2 +- nexus/types/src/deployment.rs | 1244 +++-------------- nexus/types/src/deployment/blueprint_diff.rs | 854 +++++++++++ .../types/src/deployment/blueprint_display.rs | 331 +++++ nexus/types/src/lib.rs | 1 - nexus/types/src/sectioned_table.rs | 357 ----- .../tests/output/self-stat-schema.json | 6 +- sled-storage/src/disk.rs | 11 +- 30 files changed, 2992 insertions(+), 2274 deletions(-) create mode 100644 nexus/types/src/deployment/blueprint_diff.rs create mode 100644 nexus/types/src/deployment/blueprint_display.rs delete mode 100644 nexus/types/src/sectioned_table.rs diff --git a/Cargo.lock b/Cargo.lock index 3bdac722380..9ead6007ede 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4892,6 +4892,7 @@ dependencies = [ "chrono", "clap", "derive-where", + "derive_more", "dns-service-client", "futures", "gateway-client", @@ -4914,7 +4915,6 @@ dependencies = [ "slog-error-chain", "steno", "strum", - "tabled", "test-strategy", "thiserror", "uuid", diff --git a/clients/sled-agent-client/src/lib.rs b/clients/sled-agent-client/src/lib.rs index bfb97ec9cd9..a0145af9106 100644 --- a/clients/sled-agent-client/src/lib.rs +++ b/clients/sled-agent-client/src/lib.rs @@ -35,6 +35,7 @@ progenitor::generate_api!( PortConfigV1 = { derives = [PartialEq, Eq, Hash, Serialize, Deserialize] }, RouteConfig = { derives = [PartialEq, Eq, Hash, Serialize, Deserialize] }, IpNet = { derives = [PartialEq, Eq, Hash, Serialize, Deserialize] }, + OmicronPhysicalDiskConfig = { derives = [Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash, PartialOrd, Ord] } }, //TODO trade the manual transformations later in this file for the // replace directives below? @@ -62,7 +63,6 @@ progenitor::generate_api!( // We cannot easily configure progenitor to derive `Eq` on all the client- // generated types because some have floats and other types that can't impl // `Eq`. We impl it explicitly for a few types on which we need it. -impl Eq for types::OmicronPhysicalDiskConfig {} impl Eq for types::OmicronPhysicalDisksConfig {} impl Eq for types::OmicronZonesConfig {} impl Eq for types::OmicronZoneConfig {} diff --git a/common/src/disk.rs b/common/src/disk.rs index 0cf9b6e073f..c6d60c5140a 100644 --- a/common/src/disk.rs +++ b/common/src/disk.rs @@ -22,6 +22,6 @@ use serde::{Deserialize, Serialize}; )] pub struct DiskIdentity { pub vendor: String, - pub serial: String, pub model: String, + pub serial: String, } diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 0bbb232d554..5b029b09082 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -51,6 +51,7 @@ use nexus_db_model::ExternalIp; use nexus_db_model::HwBaseboardId; use nexus_db_model::Instance; use nexus_db_model::InvCollection; +use nexus_db_model::InvPhysicalDisk; use nexus_db_model::IpAttachState; use nexus_db_model::IpKind; use nexus_db_model::NetworkInterface; @@ -98,6 +99,7 @@ use omicron_common::api::external::InstanceState; use omicron_common::api::external::MacAddr; use omicron_uuid_kinds::CollectionUuid; use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; use sled_agent_client::types::VolumeConstructionRequest; use std::borrow::Cow; use std::cmp::Ordering; @@ -381,6 +383,8 @@ enum InventoryCommands { Cabooses, /// list and show details from particular collections Collections(CollectionsArgs), + /// show all physical disks every found + PhysicalDisks(PhysicalDisksArgs), /// list all root of trust pages ever found RotPages, } @@ -408,6 +412,15 @@ struct CollectionsShowArgs { show_long_strings: bool, } +#[derive(Debug, Args, Clone, Copy)] +struct PhysicalDisksArgs { + #[clap(long)] + collection_id: Option, + + #[clap(long, requires("collection_id"))] + sled_id: Option, +} + #[derive(Debug, Args)] struct ReconfiguratorSaveArgs { /// where to save the output @@ -2652,6 +2665,9 @@ async fn cmd_db_inventory( ) .await } + InventoryCommands::PhysicalDisks(args) => { + cmd_db_inventory_physical_disks(&conn, limit, args).await + } InventoryCommands::RotPages => { cmd_db_inventory_rot_pages(&conn, limit).await } @@ -2736,6 +2752,63 @@ async fn cmd_db_inventory_cabooses( Ok(()) } +async fn cmd_db_inventory_physical_disks( + conn: &DataStoreConnection<'_>, + limit: NonZeroU32, + args: PhysicalDisksArgs, +) -> Result<(), anyhow::Error> { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct DiskRow { + inv_collection_id: Uuid, + sled_id: Uuid, + slot: i64, + vendor: String, + model: String, + serial: String, + variant: String, + } + + use db::schema::inv_physical_disk::dsl; + let mut query = dsl::inv_physical_disk.into_boxed(); + query = query.limit(i64::from(u32::from(limit))); + + if let Some(collection_id) = args.collection_id { + query = query.filter( + dsl::inv_collection_id.eq(collection_id.into_untyped_uuid()), + ); + } + + if let Some(sled_id) = args.sled_id { + query = query.filter(dsl::sled_id.eq(sled_id.into_untyped_uuid())); + } + + let disks = query + .select(InvPhysicalDisk::as_select()) + .load_async(&**conn) + .await + .context("loading physical disks")?; + + let rows = disks.into_iter().map(|disk| DiskRow { + inv_collection_id: disk.inv_collection_id.into_untyped_uuid(), + sled_id: disk.sled_id.into_untyped_uuid(), + slot: disk.slot, + vendor: disk.vendor, + model: disk.model.clone(), + serial: disk.model.clone(), + variant: format!("{:?}", disk.variant), + }); + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("{}", table); + + Ok(()) +} + async fn cmd_db_inventory_rot_pages( conn: &DataStoreConnection<'_>, limit: NonZeroU32, diff --git a/dev-tools/omdb/src/bin/omdb/nexus.rs b/dev-tools/omdb/src/bin/omdb/nexus.rs index 1a12cbdf35a..22fe1894cfd 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus.rs @@ -1171,7 +1171,7 @@ async fn cmd_nexus_blueprints_diff( args.blueprint2_id.resolve_to_blueprint(client), ) .await?; - let diff = b2.diff_since_blueprint(&b1).context("diffing blueprints")?; + let diff = b2.diff_since_blueprint(&b1); println!("{}", diff.display()); Ok(()) } diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 8c68b0f4315..c4c28460b88 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -501,27 +501,28 @@ stdout: blueprint ............. parent: - ----------------------------------------------------------------------------------------- - zone type zone ID disposition underlay IP - ----------------------------------------------------------------------------------------- - - sled .....................: blueprint zones at generation 2 - (no zones) - - sled .....................: blueprint zones at generation 2 - clickhouse ..................... in service ::1 - cockroach_db ..................... in service ::1 - crucible_pantry ..................... in service ::1 - external_dns ..................... in service ::1 - internal_dns ..................... in service ::1 - nexus ..................... in service ::ffff:127.0.0.1 - -METADATA: - created by: nexus-test-utils - created at: - comment: initial test blueprint - internal DNS version: 1 - external DNS version: 2 +!..................... +WARNING: Zones exist without physical disks! + omicron zones at generation 2: + --------------------------------------------------------------------------------------- + zone type zone id disposition underlay IP + --------------------------------------------------------------------------------------- + clickhouse ..................... in service ::1 + cockroach_db ..................... in service ::1 + crucible_pantry ..................... in service ::1 + external_dns ..................... in service ::1 + internal_dns ..................... in service ::1 + nexus ..................... in service ::ffff:127.0.0.1 + + + + METADATA: + created by::::::::::: nexus-test-utils + created at::::::::::: + comment:::::::::::::: initial test blueprint + internal DNS version: 1 + external DNS version: 2 + --------------------------------------------- stderr: @@ -534,27 +535,28 @@ stdout: blueprint ............. parent: - ----------------------------------------------------------------------------------------- - zone type zone ID disposition underlay IP - ----------------------------------------------------------------------------------------- - - sled .....................: blueprint zones at generation 2 - (no zones) - - sled .....................: blueprint zones at generation 2 - clickhouse ..................... in service ::1 - cockroach_db ..................... in service ::1 - crucible_pantry ..................... in service ::1 - external_dns ..................... in service ::1 - internal_dns ..................... in service ::1 - nexus ..................... in service ::ffff:127.0.0.1 - -METADATA: - created by: nexus-test-utils - created at: - comment: initial test blueprint - internal DNS version: 1 - external DNS version: 2 +!..................... +WARNING: Zones exist without physical disks! + omicron zones at generation 2: + --------------------------------------------------------------------------------------- + zone type zone id disposition underlay IP + --------------------------------------------------------------------------------------- + clickhouse ..................... in service ::1 + cockroach_db ..................... in service ::1 + crucible_pantry ..................... in service ::1 + external_dns ..................... in service ::1 + internal_dns ..................... in service ::1 + nexus ..................... in service ::ffff:127.0.0.1 + + + + METADATA: + created by::::::::::: nexus-test-utils + created at::::::::::: + comment:::::::::::::: initial test blueprint + internal DNS version: 1 + external DNS version: 2 + --------------------------------------------- stderr: @@ -567,23 +569,28 @@ stdout: from: blueprint ............. to: blueprint ............. - --------------------------------------------------------------------------------------------------- - zone type zone ID disposition underlay IP status - --------------------------------------------------------------------------------------------------- - - UNCHANGED SLEDS: - - sled .....................: blueprint zones at generation 2 - clickhouse ..................... in service ::1 - cockroach_db ..................... in service ::1 - crucible_pantry ..................... in service ::1 - external_dns ..................... in service ::1 - internal_dns ..................... in service ::1 - nexus ..................... in service ::ffff:127.0.0.1 - - METADATA: - internal DNS version: 1 (unchanged) - external DNS version: 2 (unchanged) + UNCHANGED SLEDS: + + sled .....................: + + sled .....................: + + omicron zones at generation 2: + --------------------------------------------------------------------------------------- + zone type zone id disposition underlay IP + --------------------------------------------------------------------------------------- + clickhouse ..................... in service ::1 + cockroach_db ..................... in service ::1 + crucible_pantry ..................... in service ::1 + external_dns ..................... in service ::1 + internal_dns ..................... in service ::1 + nexus ..................... in service ::ffff:127.0.0.1 + + + METADATA: + internal DNS version: 1 (unchanged) + external DNS version: 2 (unchanged) + --------------------------------------------- stderr: diff --git a/dev-tools/reconfigurator-cli/src/main.rs b/dev-tools/reconfigurator-cli/src/main.rs index f088c9d97d7..1c9d9866a8f 100644 --- a/dev-tools/reconfigurator-cli/src/main.rs +++ b/dev-tools/reconfigurator-cli/src/main.rs @@ -780,9 +780,7 @@ fn cmd_blueprint_diff( let blueprint1 = sim.blueprint_lookup(blueprint1_id)?; let blueprint2 = sim.blueprint_lookup(blueprint2_id)?; - let sled_diff = blueprint2 - .diff_since_blueprint(&blueprint1) - .context("failed to diff blueprints")?; + let sled_diff = blueprint2.diff_since_blueprint(&blueprint1); swriteln!(rv, "{}", sled_diff.display()); // Diff'ing DNS is a little trickier. First, compute what DNS should be for @@ -897,9 +895,7 @@ fn cmd_blueprint_diff_inventory( anyhow!("no such inventory collection: {}", collection_id) })?; let blueprint = sim.blueprint_lookup(blueprint_id)?; - let diff = blueprint - .diff_since_collection(&collection) - .context("failed to diff blueprint from inventory collection")?; + let diff = blueprint.diff_since_collection(&collection); Ok(Some(diff.display().to_string())) } diff --git a/dev-tools/reconfigurator-cli/tests/output/cmd-stdout b/dev-tools/reconfigurator-cli/tests/output/cmd-stdout index 273e847a86a..a2d6d3d17bd 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmd-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmd-stdout @@ -24,25 +24,25 @@ sled ..................... subnet fd00:1122:3344:101::/64 zpools (10): ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", serial: "serial-.....................", model: "fake-model" }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", serial: "serial-.....................", model: "fake-model" }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", serial: "serial-.....................", model: "fake-model" }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", serial: "serial-.....................", model: "fake-model" }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", serial: "serial-.....................", model: "fake-model" }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", serial: "serial-.....................", model: "fake-model" }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", serial: "serial-.....................", model: "fake-model" }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", serial: "serial-.....................", model: "fake-model" }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", serial: "serial-.....................", model: "fake-model" }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } ..................... (zpool) - ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", serial: "serial-.....................", model: "fake-model" }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } > sled-add ..................... diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index 09bc2eef0f8..003b64fd78e 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -1658,7 +1658,7 @@ mod tests { let blueprint2 = builder.build(); let authz_blueprint2 = authz_blueprint_from_id(blueprint2.id); - let diff = blueprint2.diff_since_blueprint(&blueprint1).unwrap(); + let diff = blueprint2.diff_since_blueprint(&blueprint1); println!("b1 -> b2: {}", diff.display()); println!("b1 disks: {:?}", blueprint1.blueprint_disks); println!("b2 disks: {:?}", blueprint2.blueprint_disks); @@ -1699,9 +1699,7 @@ mod tests { .blueprint_read(&opctx, &authz_blueprint2) .await .expect("failed to read collection back"); - let diff = blueprint_read - .diff_since_blueprint(&blueprint2) - .expect("failed to diff blueprints"); + let diff = blueprint_read.diff_since_blueprint(&blueprint2); println!("diff: {}", diff.display()); assert_eq!(blueprint2, blueprint_read); assert_eq!(blueprint2.internal_dns_version, new_internal_dns_version); diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 7e34bf96917..8c8f4f3e29c 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -45,7 +45,7 @@ use omicron_common::api::external::MacAddr; use omicron_common::api::external::Vni; use omicron_common::api::internal::shared::NetworkInterface; use omicron_common::api::internal::shared::NetworkInterfaceKind; -use omicron_uuid_kinds::ExternalIpUuid; +use omicron_uuid_kinds::ExternalIpKind; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneKind; use omicron_uuid_kinds::OmicronZoneUuid; @@ -807,7 +807,7 @@ impl<'a> BlueprintBuilder<'a> { for _ in 0..num_nexus_to_add { let nexus_id = self.rng.zone_rng.next(); let external_ip = OmicronZoneExternalFloatingIp { - id: ExternalIpUuid::new_v4(), + id: self.rng.external_ip_rng.next(), ip: self .available_external_ips .next() @@ -992,6 +992,7 @@ struct BlueprintBuilderRng { blueprint_rng: UuidRng, zone_rng: TypedUuidRng, network_interface_rng: UuidRng, + external_ip_rng: TypedUuidRng, } impl BlueprintBuilderRng { @@ -1004,8 +1005,15 @@ impl BlueprintBuilderRng { let zone_rng = TypedUuidRng::from_parent_rng(&mut parent, "zone"); let network_interface_rng = UuidRng::from_parent_rng(&mut parent, "network_interface"); - - BlueprintBuilderRng { blueprint_rng, zone_rng, network_interface_rng } + let external_ip_rng = + TypedUuidRng::from_parent_rng(&mut parent, "external_ip"); + + BlueprintBuilderRng { + blueprint_rng, + zone_rng, + network_interface_rng, + external_ip_rng, + } } fn set_seed(&mut self, seed: H) { @@ -1220,6 +1228,7 @@ pub mod test { use crate::example::ExampleSystem; use crate::system::SledBuilder; use expectorate::assert_contents; + use nexus_types::deployment::BlueprintOrCollectionZoneConfig; use nexus_types::deployment::BlueprintZoneFilter; use nexus_types::external_api::views::SledPolicy; use omicron_common::address::IpRange; @@ -1260,8 +1269,7 @@ pub mod test { example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); verify_blueprint(&blueprint_initial); - let diff = - blueprint_initial.diff_since_collection(&collection).unwrap(); + let diff = blueprint_initial.diff_since_collection(&collection); // There are some differences with even a no-op diff between a // collection and a blueprint, such as new data being added to // blueprints like DNS generation numbers. @@ -1274,9 +1282,9 @@ pub mod test { "tests/output/blueprint_builder_initial_diff.txt", &diff.display().to_string(), ); - assert_eq!(diff.sleds_added().len(), 0); - assert_eq!(diff.sleds_removed().len(), 0); - assert_eq!(diff.sleds_modified().count(), 0); + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 0); // Test a no-op blueprint. let builder = BlueprintBuilder::new_based_on( @@ -1288,14 +1296,14 @@ pub mod test { .expect("failed to create builder"); let blueprint = builder.build(); verify_blueprint(&blueprint); - let diff = blueprint.diff_since_blueprint(&blueprint_initial).unwrap(); + let diff = blueprint.diff_since_blueprint(&blueprint_initial); println!( "initial blueprint -> next blueprint (expected no changes):\n{}", diff.display() ); - assert_eq!(diff.sleds_added().len(), 0); - assert_eq!(diff.sleds_removed().len(), 0); - assert_eq!(diff.sleds_modified().count(), 0); + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 0); logctx.cleanup_successful(); } @@ -1331,14 +1339,14 @@ pub mod test { let blueprint2 = builder.build(); verify_blueprint(&blueprint2); - let diff = blueprint2.diff_since_blueprint(&blueprint1).unwrap(); + let diff = blueprint2.diff_since_blueprint(&blueprint1); println!( "initial blueprint -> next blueprint (expected no changes):\n{}", diff.display() ); - assert_eq!(diff.sleds_added().len(), 0); - assert_eq!(diff.sleds_removed().len(), 0); - assert_eq!(diff.sleds_modified().count(), 0); + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 0); // The next step is adding these zones to a new sled. let new_sled_id = example.sled_rng.next(); @@ -1361,35 +1369,43 @@ pub mod test { let blueprint3 = builder.build(); verify_blueprint(&blueprint3); - let diff = blueprint3.diff_since_blueprint(&blueprint2).unwrap(); + let diff = blueprint3.diff_since_blueprint(&blueprint2); println!("expecting new NTP and Crucible zones:\n{}", diff.display()); // No sleds were changed or removed. - assert_eq!(diff.sleds_modified().count(), 0); - assert_eq!(diff.sleds_removed().len(), 0); + assert_eq!(diff.sleds_modified.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); // One sled was added. - let sleds: Vec<_> = diff.sleds_added().collect(); - assert_eq!(sleds.len(), 1); - let (sled_id, new_sled_zones) = sleds[0]; - assert_eq!(sled_id, new_sled_id); + assert_eq!(diff.sleds_added.len(), 1); + let sled_id = diff.sleds_added.first().unwrap(); + let new_sled_zones = diff.zones.added.get(sled_id).unwrap(); + assert_eq!(*sled_id, new_sled_id); // The generation number should be newer than the initial default. - assert!(new_sled_zones.generation > Generation::new()); + assert!(new_sled_zones.generation_after.unwrap() > Generation::new()); // All zones' underlay addresses ought to be on the sled's subnet. for z in &new_sled_zones.zones { assert!(new_sled_resources .subnet .net() - .contains(z.underlay_address)); + .contains(z.underlay_address())); } // Check for an NTP zone. Its sockaddr's IP should also be on the // sled's subnet. assert!(new_sled_zones.zones.iter().any(|z| { - if let BlueprintZoneType::InternalNtp( - blueprint_zone_type::InternalNtp { address, .. }, - ) = &z.zone_type + if let BlueprintOrCollectionZoneConfig::Blueprint( + BlueprintZoneConfig { + zone_type: + BlueprintZoneType::InternalNtp( + blueprint_zone_type::InternalNtp { + address, .. + }, + ), + .. + }, + ) = &z { assert!(new_sled_resources .subnet @@ -1404,9 +1420,18 @@ pub mod test { .zones .iter() .filter_map(|z| { - if let BlueprintZoneType::Crucible( - blueprint_zone_type::Crucible { address, dataset }, - ) = &z.zone_type + if let BlueprintOrCollectionZoneConfig::Blueprint( + BlueprintZoneConfig { + zone_type: + BlueprintZoneType::Crucible( + blueprint_zone_type::Crucible { + address, + dataset, + }, + ), + .. + }, + ) = &z { let ip = address.ip(); assert!(new_sled_resources.subnet.net().contains(*ip)); diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs b/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs index a2e577f80c5..68e2b9c2a21 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs @@ -346,16 +346,6 @@ mod tests { .expect("new zone ID should be present"); } - // Also call change_sled_zones without making any changes. This - // currently bumps the generation number, but in the future might - // become smarter and not do so (in which case this test will break). - let control_sled_id = example - .input - .all_sled_ids(SledFilter::Commissioned) - .nth(2) - .expect("at least 2 sleds present"); - _ = builder.zones.change_sled_zones(control_sled_id); - // Attempt to expunge the newly added Oximeter zone. This should fail // because we only support expunging zones that are unchanged from the // parent blueprint. @@ -376,52 +366,44 @@ mod tests { // above are present. let blueprint = builder.build(); verify_blueprint(&blueprint); - let diff = blueprint.diff_since_blueprint(&blueprint_initial).unwrap(); + let diff = blueprint.diff_since_blueprint(&blueprint_initial); println!("expecting new NTP and Oximeter zones:\n{}", diff.display()); // No sleds were removed. - assert_eq!(diff.sleds_removed().len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); // One sled was added. - let sleds: Vec<_> = diff.sleds_added().collect(); - assert_eq!(sleds.len(), 1); - let (sled_id, new_sled_zones) = sleds[0]; - assert_eq!(sled_id, new_sled_id); + assert_eq!(diff.sleds_added.len(), 1); + let sled_id = diff.sleds_added.first().unwrap(); + assert_eq!(*sled_id, new_sled_id); + let new_sled_zones = diff.zones.added.get(sled_id).unwrap(); // The generation number should be newer than the initial default. - assert_eq!(new_sled_zones.generation, Generation::new().next()); + assert_eq!( + new_sled_zones.generation_after.unwrap(), + Generation::new().next() + ); assert_eq!(new_sled_zones.zones.len(), 1); - // Two sleds were modified: existing_sled_id and control_sled_id. - let sleds = diff.sleds_modified(); - assert_eq!(sleds.len(), 2, "2 sleds modified"); - for (sled_id, sled_modified) in sleds { - if sled_id == existing_sled_id { - assert_eq!( - sled_modified.generation_after, - sled_modified.generation_before.next() - ); - assert_eq!(sled_modified.zones_added().len(), 1); - let added_zone = sled_modified.zones_added().next().unwrap(); - assert_eq!(added_zone.id, new_zone_id); - - assert_eq!(sled_modified.zones_removed().len(), 0); - assert_eq!(sled_modified.zones_modified().count(), 1); - let modified_zone = - sled_modified.zones_modified().next().unwrap(); - assert_eq!(modified_zone.zone_before.id(), existing_zone_id); - } else { - assert_eq!(sled_id, control_sled_id); - - // The generation number is bumped, but nothing else. - assert_eq!( - sled_modified.generation_after, - sled_modified.generation_before.next(), - "control sled has generation number bumped" - ); - assert_eq!(sled_modified.zones_added().len(), 0); - assert_eq!(sled_modified.zones_removed().len(), 0); - assert_eq!(sled_modified.zones_modified().count(), 0); - } + // TODO: AJS - See comment above - we don't actually use the control sled anymore + // so the comparison was changed. + // One sled was modified: existing_sled_id + assert_eq!(diff.sleds_modified.len(), 1, "1 sled modified"); + for sled_id in &diff.sleds_modified { + assert_eq!(*sled_id, existing_sled_id); + let added = diff.zones.added.get(sled_id).unwrap(); + assert_eq!( + added.generation_after.unwrap(), + added.generation_before.unwrap().next() + ); + assert_eq!(added.zones.len(), 1); + let added_zone = &added.zones[0]; + assert_eq!(added_zone.id(), new_zone_id); + + assert!(!diff.zones.removed.contains_key(sled_id)); + let modified = diff.zones.modified.get(sled_id).unwrap(); + assert_eq!(modified.zones.len(), 1); + let modified_zone = &modified.zones[0]; + assert_eq!(modified_zone.zone.id(), existing_zone_id); } logctx.cleanup_successful(); diff --git a/nexus/reconfigurator/planning/src/example.rs b/nexus/reconfigurator/planning/src/example.rs index f8748be7582..e52fe3fc4bf 100644 --- a/nexus/reconfigurator/planning/src/example.rs +++ b/nexus/reconfigurator/planning/src/example.rs @@ -79,6 +79,7 @@ impl ExampleSystem { vec![], ) .unwrap(); + let _ = builder.sled_ensure_disks(sled_id, sled_resources).unwrap(); for pool_name in sled_resources.zpools.keys() { let _ = builder .sled_ensure_zone_crucible(sled_id, *pool_name) diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 5535b28910d..0c7ee8f5cb6 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -500,10 +500,10 @@ mod test { use expectorate::assert_contents; use nexus_inventory::now_db_precision; use nexus_types::deployment::blueprint_zone_type; + use nexus_types::deployment::BlueprintDiff; use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::BlueprintZoneFilter; use nexus_types::deployment::BlueprintZoneType; - use nexus_types::deployment::DiffSledModified; use nexus_types::external_api::views::SledPolicy; use nexus_types::external_api::views::SledProvisionPolicy; use nexus_types::external_api::views::SledState; @@ -512,8 +512,10 @@ mod test { use omicron_common::disk::DiskIdentity; use omicron_test_utils::dev::test_setup_log; use omicron_uuid_kinds::PhysicalDiskUuid; + use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; - use std::collections::HashMap; + use sled_agent_client::ZoneKind; + use typed_rng::TypedUuidRng; /// Runs through a basic sequence of blueprints for adding a sled #[test] @@ -527,6 +529,8 @@ mod test { let blueprint1 = &example.blueprint; verify_blueprint(blueprint1); + println!("{}", blueprint1.display()); + // Now run the planner. It should do nothing because our initial // system didn't have any issues that the planner currently knows how to // fix. @@ -542,11 +546,17 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint2.diff_since_blueprint(blueprint1).unwrap(); + let diff = blueprint2.diff_since_blueprint(blueprint1); println!("1 -> 2 (expected no changes):\n{}", diff.display()); - assert_eq!(diff.sleds_added().len(), 0); - assert_eq!(diff.sleds_removed().len(), 0); - assert_eq!(diff.sleds_modified().count(), 0); + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 0); + assert_eq!(diff.zones.added.len(), 0); + assert_eq!(diff.zones.removed.len(), 0); + assert_eq!(diff.zones.modified.len(), 0); + assert_eq!(diff.zones.errors.len(), 0); + assert_eq!(diff.physical_disks.added.len(), 0); + assert_eq!(diff.physical_disks.removed.len(), 0); verify_blueprint(&blueprint2); // Now add a new sled. @@ -568,7 +578,7 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint3.diff_since_blueprint(&blueprint2).unwrap(); + let diff = blueprint3.diff_since_blueprint(&blueprint2); println!( "2 -> 3 (expect new NTP zone on new sled):\n{}", diff.display() @@ -577,20 +587,18 @@ mod test { "tests/output/planner_basic_add_sled_2_3.txt", &diff.display().to_string(), ); - let sleds = diff.sleds_added().collect::>(); - let (sled_id, sled_zones) = sleds[0]; + assert_eq!(diff.sleds_added.len(), 1); + let sled_id = *diff.sleds_added.first().unwrap(); + let sled_zones = diff.zones.added.get(&sled_id).unwrap(); // We have defined elsewhere that the first generation contains no // zones. So the first one with zones must be newer. See // OmicronZonesConfig::INITIAL_GENERATION. - assert!(sled_zones.generation > Generation::new()); + assert!(sled_zones.generation_after.unwrap() > Generation::new()); assert_eq!(sled_id, new_sled_id); assert_eq!(sled_zones.zones.len(), 1); - assert!(matches!( - sled_zones.zones[0].zone_type, - BlueprintZoneType::InternalNtp(_), - )); - assert_eq!(diff.sleds_removed().len(), 0); - assert_eq!(diff.sleds_modified().count(), 0); + assert!(matches!(sled_zones.zones[0].kind(), ZoneKind::InternalNtp)); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 0); verify_blueprint(&blueprint3); // Check that with no change in inventory, the planner makes no changes. @@ -607,11 +615,11 @@ mod test { .with_rng_seed((TEST_NAME, "bp4")) .plan() .expect("failed to plan"); - let diff = blueprint4.diff_since_blueprint(&blueprint3).unwrap(); + let diff = blueprint4.diff_since_blueprint(&blueprint3); println!("3 -> 4 (expected no changes):\n{}", diff.display()); - assert_eq!(diff.sleds_added().len(), 0); - assert_eq!(diff.sleds_removed().len(), 0); - assert_eq!(diff.sleds_modified().count(), 0); + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 0); verify_blueprint(&blueprint4); // Now update the inventory to have the requested NTP zone. @@ -648,28 +656,30 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint5.diff_since_blueprint(&blueprint3).unwrap(); + let diff = blueprint5.diff_since_blueprint(&blueprint3); println!("3 -> 5 (expect Crucible zones):\n{}", diff.display()); assert_contents( "tests/output/planner_basic_add_sled_3_5.txt", &diff.display().to_string(), ); - assert_eq!(diff.sleds_added().len(), 0); - assert_eq!(diff.sleds_removed().len(), 0); - let sleds = diff.sleds_modified().collect::>(); - assert_eq!(sleds.len(), 1); - let (sled_id, sled_changes) = &sleds[0]; + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 1); + let sled_id = diff.sleds_modified.first().unwrap(); + assert_eq!(*sled_id, new_sled_id); + // No removed or modified zones on this sled + assert!(!diff.zones.removed.contains_key(sled_id)); + assert!(!diff.zones.modified.contains_key(sled_id)); + // 10 crucible zones addeed + let zones_added = diff.zones.added.get(sled_id).unwrap(); assert_eq!( - sled_changes.generation_after, - sled_changes.generation_before.next() + zones_added.generation_after.unwrap(), + zones_added.generation_before.unwrap().next() ); - assert_eq!(*sled_id, new_sled_id); - assert_eq!(sled_changes.zones_removed().len(), 0); - assert_eq!(sled_changes.zones_modified().count(), 0); - let zones = sled_changes.zones_added().collect::>(); - assert_eq!(zones.len(), 10); - for zone in &zones { - if !zone.zone_type.is_crucible() { + + assert_eq!(zones_added.zones.len(), 10); + for zone in &zones_added.zones { + if zone.kind() != ZoneKind::Crucible { panic!("unexpectedly added a non-Crucible zone: {zone:?}"); } } @@ -688,11 +698,11 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint6.diff_since_blueprint(&blueprint5).unwrap(); + let diff = blueprint6.diff_since_blueprint(&blueprint5); println!("5 -> 6 (expect no changes):\n{}", diff.display()); - assert_eq!(diff.sleds_added().len(), 0); - assert_eq!(diff.sleds_removed().len(), 0); - assert_eq!(diff.sleds_modified().count(), 0); + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 0); verify_blueprint(&blueprint6); logctx.cleanup_successful(); @@ -722,6 +732,7 @@ mod test { assert_eq!(collection.sled_agents.len(), 1); assert_eq!(collection.omicron_zones.len(), 1); blueprint.blueprint_zones.retain(|k, _v| keep_sled_id == *k); + blueprint.blueprint_disks.retain(|k, _v| keep_sled_id == *k); (keep_sled_id, blueprint, collection, builder.build()) }; @@ -758,21 +769,24 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint2.diff_since_blueprint(&blueprint1).unwrap(); + let diff = blueprint2.diff_since_blueprint(&blueprint1); println!("1 -> 2 (added additional Nexus zones):\n{}", diff.display()); - assert_eq!(diff.sleds_added().len(), 0); - assert_eq!(diff.sleds_removed().len(), 0); - let mut sleds = diff.sleds_modified().collect::>(); - assert_eq!(sleds.len(), 1); - let (changed_sled_id, sled_changes) = sleds.pop().unwrap(); + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 1); + let changed_sled_id = diff.sleds_modified.first().unwrap(); + // TODO-cleanup use `TypedUuid` everywhere - assert_eq!(changed_sled_id, sled_id); - assert_eq!(sled_changes.zones_removed().len(), 0); - assert_eq!(sled_changes.zones_modified().count(), 0); - let zones = sled_changes.zones_added().collect::>(); - assert_eq!(zones.len(), input.target_nexus_zone_count() - 1); - for zone in &zones { - if !zone.zone_type.is_nexus() { + assert_eq!(*changed_sled_id, sled_id); + assert_eq!(diff.zones.removed.len(), 0); + assert_eq!(diff.zones.modified.len(), 0); + let zones_added = diff.zones.added.get(changed_sled_id).unwrap(); + assert_eq!( + zones_added.zones.len(), + input.target_nexus_zone_count() - 1 + ); + for zone in &zones_added.zones { + if zone.kind() != ZoneKind::Nexus { panic!("unexpectedly added a non-Nexus zone: {zone:?}"); } } @@ -821,22 +835,21 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint2.diff_since_blueprint(&blueprint1).unwrap(); + let diff = blueprint2.diff_since_blueprint(&blueprint1); println!("1 -> 2 (added additional Nexus zones):\n{}", diff.display()); - assert_eq!(diff.sleds_added().len(), 0); - assert_eq!(diff.sleds_removed().len(), 0); - let sleds = diff.sleds_modified().collect::>(); + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 3); // All 3 sleds should get additional Nexus zones. We expect a total of // 11 new Nexus zones, which should be spread evenly across the three // sleds (two should get 4 and one should get 3). - assert_eq!(sleds.len(), 3); let mut total_new_nexus_zones = 0; - for (sled_id, sled_changes) in sleds { - assert_eq!(sled_changes.zones_removed().len(), 0); - assert_eq!(sled_changes.zones_modified().count(), 0); - let zones = sled_changes.zones_added().collect::>(); - match zones.len() { + for sled_id in diff.sleds_modified { + assert!(!diff.zones.removed.contains_key(&sled_id)); + assert!(!diff.zones.modified.contains_key(&sled_id)); + let zones_added = &diff.zones.added.get(&sled_id).unwrap().zones; + match zones_added.len() { n @ (3 | 4) => { total_new_nexus_zones += n; } @@ -844,8 +857,8 @@ mod test { panic!("unexpected number of zones added to {sled_id}: {n}") } } - for zone in &zones { - if !zone.zone_type.is_nexus() { + for zone in zones_added { + if zone.kind() != ZoneKind::Nexus { panic!("unexpectedly added a non-Nexus zone: {zone:?}"); } } @@ -871,13 +884,16 @@ mod test { // one. builder.policy_mut().target_nexus_zone_count = 1; - let new_sled_disk = |policy| nexus_types::deployment::SledDisk { + // Make generated disk ids deterministic + let mut disk_rng = + TypedUuidRng::from_seed(TEST_NAME, "NewPhysicalDisks"); + let mut new_sled_disk = |policy| nexus_types::deployment::SledDisk { disk_identity: DiskIdentity { vendor: "test-vendor".to_string(), serial: "test-serial".to_string(), model: "test-model".to_string(), }, - disk_id: PhysicalDiskUuid::new_v4(), + disk_id: PhysicalDiskUuid::from(disk_rng.next()), policy, state: nexus_types::external_api::views::PhysicalDiskState::Active, }; @@ -892,15 +908,16 @@ mod test { const NEW_IN_SERVICE_DISKS: usize = 2; const NEW_EXPUNGED_DISKS: usize = 1; + let mut zpool_rng = TypedUuidRng::from_seed(TEST_NAME, "NewZpools"); for _ in 0..NEW_IN_SERVICE_DISKS { sled_details.resources.zpools.insert( - ZpoolUuid::new_v4(), + ZpoolUuid::from(zpool_rng.next()), new_sled_disk(nexus_types::external_api::views::PhysicalDiskPolicy::InService), ); } for _ in 0..NEW_EXPUNGED_DISKS { sled_details.resources.zpools.insert( - ZpoolUuid::new_v4(), + ZpoolUuid::from(zpool_rng.next()), new_sled_disk(nexus_types::external_api::views::PhysicalDiskPolicy::Expunged), ); } @@ -919,15 +936,17 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint2.diff_since_blueprint(&blueprint1).unwrap(); + let diff = blueprint2.diff_since_blueprint(&blueprint1); println!("1 -> 2 (some new disks, one expunged):\n{}", diff.display()); - let mut modified_sleds = diff.sleds_modified(); - assert_eq!(modified_sleds.len(), 1); - let (_, diff_modified) = modified_sleds.next().unwrap(); + assert_eq!(diff.sleds_modified.len(), 1); + let sled_id = diff.sleds_modified.first().unwrap(); // We should be adding a Crucible zone for each new in-service disk. - assert_eq!(diff_modified.zones_added().count(), NEW_IN_SERVICE_DISKS); - assert_eq!(diff_modified.zones_removed().len(), 0); + assert_eq!( + diff.zones.added.get(sled_id).unwrap().zones.len(), + NEW_IN_SERVICE_DISKS + ); + assert!(!diff.zones.removed.contains_key(sled_id)); logctx.cleanup_successful(); } @@ -1031,7 +1050,7 @@ mod test { &blueprint2.display().to_string(), ); - let diff = blueprint2.diff_since_blueprint(&blueprint1).unwrap(); + let diff = blueprint2.diff_since_blueprint(&blueprint1); println!( "1 -> 2 (added additional Nexus zones, take 2 sleds out of service):\n{}", diff.display() @@ -1049,28 +1068,30 @@ mod test { // cleanup, and we aren't performing garbage collection on zones or // sleds at the moment. - assert_eq!(diff.sleds_added().len(), 0); - assert_eq!(diff.sleds_removed().len(), 0); - let mut sleds = diff.sleds_modified().collect::>(); + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); - let expunged_modified = sleds.remove(&expunged_sled_id).unwrap(); - assert_all_zones_expunged(&expunged_modified, "expunged sled"); + assert_all_zones_expunged(&diff, expunged_sled_id, "expunged sled"); // Only 2 of the 3 remaining sleds (not the non-provisionable sled) // should get additional Nexus zones. We expect a total of 6 new Nexus // zones, which should be split evenly between the two sleds, while the // non-provisionable sled should be unchanged. - assert_eq!(sleds.len(), 2); + let mut remaining_modified_sleds = diff.sleds_modified.clone(); + remaining_modified_sleds.remove(&expunged_sled_id); + remaining_modified_sleds.remove(&decommissioned_sled_id); + + assert_eq!(remaining_modified_sleds.len(), 2); let mut total_new_nexus_zones = 0; - for (sled_id, sled_changes) in sleds { + for sled_id in remaining_modified_sleds { assert!(sled_id != nonprovisionable_sled_id); assert!(sled_id != expunged_sled_id); assert!(sled_id != decommissioned_sled_id); - assert_eq!(sled_changes.zones_removed().len(), 0); - assert_eq!(sled_changes.zones_modified().count(), 0); - let zones = sled_changes.zones_added().collect::>(); - for zone in &zones { - let BlueprintZoneType::Nexus(_) = zone.zone_type else { + assert!(!diff.zones.removed.contains_key(&sled_id)); + assert!(!diff.zones.modified.contains_key(&sled_id)); + let zones = &diff.zones.added.get(&sled_id).unwrap().zones; + for zone in zones { + if ZoneKind::Nexus != zone.kind() { panic!("unexpectedly added a non-Crucible zone: {zone:?}"); }; } @@ -1161,7 +1182,7 @@ mod test { blueprint2a.external_dns_version = blueprint2a.external_dns_version.next(); - let diff = blueprint2a.diff_since_blueprint(&blueprint2).unwrap(); + let diff = blueprint2a.diff_since_blueprint(&blueprint2); println!("2 -> 2a (manually modified zones):\n{}", diff.display()); assert_contents( "tests/output/planner_nonprovisionable_2_2a.txt", @@ -1173,16 +1194,13 @@ mod test { logctx.cleanup_successful(); } - fn assert_all_zones_expunged(modified: &DiffSledModified, desc: &str) { - assert_eq!( - modified.generation_before.next(), - modified.generation_after, - "for {desc}, generation should have been bumped" - ); - - assert_eq!( - modified.zones_added().count(), - 0, + fn assert_all_zones_expunged( + diff: &BlueprintDiff, + expunged_sled_id: SledUuid, + desc: &str, + ) { + assert!( + !diff.zones.added.contains_key(&expunged_sled_id), "for {desc}, no zones should have been added to blueprint" ); @@ -1191,20 +1209,27 @@ mod test { // zone removal will be part of some future garbage collection // process that isn't currently defined. - assert_eq!( - modified.zones_removed().len(), - 0, + assert!( + !diff.zones.removed.contains_key(&expunged_sled_id), "for {desc}, no zones should have been removed from blueprint" ); // Run through all the common zones and ensure that all of them // have been marked expunged. - for zone in modified.zones_modified() { + let modified_zones = + diff.zones.modified.get(&expunged_sled_id).unwrap(); + assert_eq!( + modified_zones.generation_before.next(), + modified_zones.generation_after, + "for {desc}, generation should have been bumped" + ); + + for modified_zone in &modified_zones.zones { assert_eq!( - zone.zone_after.disposition, + modified_zone.zone.disposition(), BlueprintZoneDisposition::Expunged, "for {desc}, zone {} should have been marked expunged", - zone.zone_after.id + modified_zone.zone.id() ); } } @@ -1248,7 +1273,7 @@ mod test { "tests/output/planner_decommissions_sleds_bp2.txt", &blueprint2.display().to_string(), ); - let diff = blueprint2.diff_since_blueprint(&blueprint1).unwrap(); + let diff = blueprint2.diff_since_blueprint(&blueprint1); println!("1 -> 2 (expunged {expunged_sled_id}):\n{}", diff.display()); assert_contents( "tests/output/planner_decommissions_sleds_1_2.txt", @@ -1285,15 +1310,15 @@ mod test { // collect zones, so we should still have the sled's expunged zones // (even though the sled itself is no longer present in the list of // commissioned sleds). - let diff = blueprint3.diff_since_blueprint(&blueprint2).unwrap(); + let diff = blueprint3.diff_since_blueprint(&blueprint2); println!( "2 -> 3 (decommissioned {expunged_sled_id}):\n{}", diff.display() ); - assert_eq!(diff.sleds_added().count(), 0); - assert_eq!(diff.sleds_removed().count(), 0); - assert_eq!(diff.sleds_modified().count(), 0); - assert_eq!(diff.sleds_unchanged().count(), DEFAULT_N_SLEDS); + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 0); + assert_eq!(diff.sleds_unchanged.len(), DEFAULT_N_SLEDS); logctx.cleanup_successful(); } diff --git a/nexus/reconfigurator/planning/src/system.rs b/nexus/reconfigurator/planning/src/system.rs index 15aefb7344f..e28b96dda50 100644 --- a/nexus/reconfigurator/planning/src/system.rs +++ b/nexus/reconfigurator/planning/src/system.rs @@ -440,13 +440,16 @@ impl Sled { hardware_slot: u16, nzpools: u8, ) -> Sled { + use typed_rng::TypedUuidRng; let unique = unique.unwrap_or_else(|| hardware_slot.to_string()); let model = format!("model{}", unique); let serial = format!("serial{}", unique); let revision = 0; + let mut zpool_rng = + TypedUuidRng::from_seed("SystemSimultatedSled", "ZpoolUuid"); let zpools: BTreeMap<_, _> = (0..nzpools) .map(|_| { - let zpool = ZpoolUuid::new_v4(); + let zpool = ZpoolUuid::from(zpool_rng.next()); let disk = SledDisk { disk_identity: DiskIdentity { vendor: String::from("fake-vendor"), diff --git a/nexus/reconfigurator/planning/tests/output/blueprint_builder_initial_diff.txt b/nexus/reconfigurator/planning/tests/output/blueprint_builder_initial_diff.txt index b421b8f383a..8bce7cec989 100644 --- a/nexus/reconfigurator/planning/tests/output/blueprint_builder_initial_diff.txt +++ b/nexus/reconfigurator/planning/tests/output/blueprint_builder_initial_diff.txt @@ -1,54 +1,116 @@ from: collection 094d362b-7d79-49e7-a244-134276cca8fe to: blueprint e4aeb3b3-272f-4967-be34-2d34daa46aa1 + UNCHANGED SLEDS: + + sled 08c7046b-c9c4-4368-881f-19a72df22143: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 44afce85-3377-4b20-a398-517c1579df4d in service fd00:1122:3344:103::23 + crucible 4644ea0c-0ec3-41be-a356-660308e1c3fc in service fd00:1122:3344:103::2c + crucible 55f4d117-0b9d-4256-a2c0-f46d3ed5fff9 in service fd00:1122:3344:103::25 + crucible 5c6a4628-8831-483b-995f-79b9126c4d04 in service fd00:1122:3344:103::28 + crucible 6a01210c-45ed-41a5-9230-8e05ecf5dd8f in service fd00:1122:3344:103::29 + crucible 7004cab9-dfc0-43ba-92d3-58d4ced66025 in service fd00:1122:3344:103::24 + crucible 79552859-fbd3-43bb-a9d3-6baba25558f8 in service fd00:1122:3344:103::26 + crucible 90696819-9b53-485a-9c65-ca63602e843e in service fd00:1122:3344:103::27 + crucible c99525b3-3680-4df6-9214-2ee3e1020e8b in service fd00:1122:3344:103::2a + crucible f42959d3-9eef-4e3b-b404-6177ce3ec7a1 in service fd00:1122:3344:103::2b + internal_ntp c81c9d4a-36d7-4796-9151-f564d3735152 in service fd00:1122:3344:103::21 + nexus b2573120-9c91-4ed7-8b4f-a7bfe8dbc807 in service fd00:1122:3344:103::22 + + + sled 84ac367e-9b03-4e9d-a846-df1a08deee6c: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 0faa9350-2c02-47c7-a0a6-9f4afd69152c in service fd00:1122:3344:101::2c + crucible 5b44003e-1a3d-4152-b606-872c72efce0e in service fd00:1122:3344:101::25 + crucible 943fea7a-9458-4935-9dc7-01ee5cfe5a02 in service fd00:1122:3344:101::29 + crucible 95c3b6d1-2592-4252-b5c1-5d0faf3ce9c9 in service fd00:1122:3344:101::24 + crucible a5a0b7a9-37c9-4dbd-8393-ec7748ada3b0 in service fd00:1122:3344:101::2b + crucible a9a6a974-8953-4783-b815-da46884f2c02 in service fd00:1122:3344:101::23 + crucible aa25add8-60b0-4ace-ac60-15adcdd32d50 in service fd00:1122:3344:101::2a + crucible b6f2dd1e-7f98-4a68-9df2-b33c69d1f7ea in service fd00:1122:3344:101::27 + crucible dc22d470-dc46-436b-9750-25c8d7d369e2 in service fd00:1122:3344:101::26 + crucible f7e434f9-6d4a-476b-a9e2-48d6ee28a08e in service fd00:1122:3344:101::28 + internal_ntp 38b047ea-e3de-4859-b8e0-70cac5871446 in service fd00:1122:3344:101::21 + nexus fb36b9dc-273a-4bc3-aaa9-19ee4d0ef552 in service fd00:1122:3344:101::22 + + + sled be7f4375-2a6b-457f-b1a4-3074a715e5fe: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 248db330-56e6-4c7e-b5ff-9cd6cbcb210a in service fd00:1122:3344:102::2c + crucible 353b0aff-4c71-4fae-a6bd-adcb1d2a1a1d in service fd00:1122:3344:102::29 + crucible 4330134c-41b9-4097-aa0b-3eaefa06d473 in service fd00:1122:3344:102::24 + crucible 65d03287-e43f-45f4-902e-0a5e4638f31a in service fd00:1122:3344:102::25 + crucible 6a5901b1-f9d7-425c-8ecb-a786c900f217 in service fd00:1122:3344:102::27 + crucible 9b722fea-a186-4bc3-bc37-ce7f6de6a796 in service fd00:1122:3344:102::23 + crucible b3583b5f-4a62-4471-9be7-41e61578de4c in service fd00:1122:3344:102::2a + crucible bac92034-b9e6-4e8b-9ffb-dbba9caec88d in service fd00:1122:3344:102::28 + crucible d9653001-f671-4905-a410-6a7abc358318 in service fd00:1122:3344:102::2b + crucible edaca77e-5806-446a-b00c-125962cd551d in service fd00:1122:3344:102::26 + internal_ntp aac3ab51-9e2b-4605-9bf6-e3eb3681c2b5 in service fd00:1122:3344:102::21 + nexus 29278a22-1ba1-4117-bfdb-39fcb9ae7fd1 in service fd00:1122:3344:102::22 + + + METADATA: ++ internal DNS version: (not present in collection) -> 1 ++ external DNS version: (not present in collection) -> 1 - ------------------------------------------------------------------------------------------------------ - zone type zone ID disposition underlay IP status - ------------------------------------------------------------------------------------------------------ - - UNCHANGED SLEDS: - - sled 08c7046b-c9c4-4368-881f-19a72df22143: blueprint zones at generation 2 - crucible 44afce85-3377-4b20-a398-517c1579df4d in service fd00:1122:3344:103::23 - crucible 4644ea0c-0ec3-41be-a356-660308e1c3fc in service fd00:1122:3344:103::2c - crucible 55f4d117-0b9d-4256-a2c0-f46d3ed5fff9 in service fd00:1122:3344:103::25 - crucible 5c6a4628-8831-483b-995f-79b9126c4d04 in service fd00:1122:3344:103::28 - crucible 6a01210c-45ed-41a5-9230-8e05ecf5dd8f in service fd00:1122:3344:103::29 - crucible 7004cab9-dfc0-43ba-92d3-58d4ced66025 in service fd00:1122:3344:103::24 - crucible 79552859-fbd3-43bb-a9d3-6baba25558f8 in service fd00:1122:3344:103::26 - crucible 90696819-9b53-485a-9c65-ca63602e843e in service fd00:1122:3344:103::27 - crucible c99525b3-3680-4df6-9214-2ee3e1020e8b in service fd00:1122:3344:103::2a - crucible f42959d3-9eef-4e3b-b404-6177ce3ec7a1 in service fd00:1122:3344:103::2b - internal_ntp c81c9d4a-36d7-4796-9151-f564d3735152 in service fd00:1122:3344:103::21 - nexus b2573120-9c91-4ed7-8b4f-a7bfe8dbc807 in service fd00:1122:3344:103::22 - - sled 84ac367e-9b03-4e9d-a846-df1a08deee6c: blueprint zones at generation 2 - crucible 0faa9350-2c02-47c7-a0a6-9f4afd69152c in service fd00:1122:3344:101::2c - crucible 5b44003e-1a3d-4152-b606-872c72efce0e in service fd00:1122:3344:101::25 - crucible 943fea7a-9458-4935-9dc7-01ee5cfe5a02 in service fd00:1122:3344:101::29 - crucible 95c3b6d1-2592-4252-b5c1-5d0faf3ce9c9 in service fd00:1122:3344:101::24 - crucible a5a0b7a9-37c9-4dbd-8393-ec7748ada3b0 in service fd00:1122:3344:101::2b - crucible a9a6a974-8953-4783-b815-da46884f2c02 in service fd00:1122:3344:101::23 - crucible aa25add8-60b0-4ace-ac60-15adcdd32d50 in service fd00:1122:3344:101::2a - crucible b6f2dd1e-7f98-4a68-9df2-b33c69d1f7ea in service fd00:1122:3344:101::27 - crucible dc22d470-dc46-436b-9750-25c8d7d369e2 in service fd00:1122:3344:101::26 - crucible f7e434f9-6d4a-476b-a9e2-48d6ee28a08e in service fd00:1122:3344:101::28 - internal_ntp 38b047ea-e3de-4859-b8e0-70cac5871446 in service fd00:1122:3344:101::21 - nexus fb36b9dc-273a-4bc3-aaa9-19ee4d0ef552 in service fd00:1122:3344:101::22 - - sled be7f4375-2a6b-457f-b1a4-3074a715e5fe: blueprint zones at generation 2 - crucible 248db330-56e6-4c7e-b5ff-9cd6cbcb210a in service fd00:1122:3344:102::2c - crucible 353b0aff-4c71-4fae-a6bd-adcb1d2a1a1d in service fd00:1122:3344:102::29 - crucible 4330134c-41b9-4097-aa0b-3eaefa06d473 in service fd00:1122:3344:102::24 - crucible 65d03287-e43f-45f4-902e-0a5e4638f31a in service fd00:1122:3344:102::25 - crucible 6a5901b1-f9d7-425c-8ecb-a786c900f217 in service fd00:1122:3344:102::27 - crucible 9b722fea-a186-4bc3-bc37-ce7f6de6a796 in service fd00:1122:3344:102::23 - crucible b3583b5f-4a62-4471-9be7-41e61578de4c in service fd00:1122:3344:102::2a - crucible bac92034-b9e6-4e8b-9ffb-dbba9caec88d in service fd00:1122:3344:102::28 - crucible d9653001-f671-4905-a410-6a7abc358318 in service fd00:1122:3344:102::2b - crucible edaca77e-5806-446a-b00c-125962cd551d in service fd00:1122:3344:102::26 - internal_ntp aac3ab51-9e2b-4605-9bf6-e3eb3681c2b5 in service fd00:1122:3344:102::21 - nexus 29278a22-1ba1-4117-bfdb-39fcb9ae7fd1 in service fd00:1122:3344:102::22 - - METADATA: -+ internal DNS version: (not present in collection) -> 1 -+ external DNS version: (not present in collection) -> 1 diff --git a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt index b135303ead1..5b72615bd74 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt @@ -1,59 +1,144 @@ from: blueprint 979ef428-0bdd-4622-8a72-0719e942b415 to: blueprint 4171ad05-89dd-474b-846b-b007e4346366 - ------------------------------------------------------------------------------------------------------ - zone type zone ID disposition underlay IP status - ------------------------------------------------------------------------------------------------------ - - UNCHANGED SLEDS: - - sled 41f45d9f-766e-4ca6-a881-61ee45c80f57: blueprint zones at generation 2 - crucible 322ee9f1-8903-4542-a0a8-a54cefabdeca in service fd00:1122:3344:103::24 - crucible 4ab1650f-32c5-447f-939d-64b8103a7645 in service fd00:1122:3344:103::2a - crucible 64aa65f8-1ccb-4cd6-9953-027aebdac8ff in service fd00:1122:3344:103::27 - crucible 6e811d86-8aa7-4660-935b-84b4b7721b10 in service fd00:1122:3344:103::2b - crucible 747d2426-68bf-4c22-8806-41d290b5d5f5 in service fd00:1122:3344:103::25 - crucible 7fbd2c38-5dc3-48c4-b061-558a2041d70f in service fd00:1122:3344:103::2c - crucible 8e9e923e-62b1-4cbc-9f59-d6397e338b6b in service fd00:1122:3344:103::29 - crucible b14d5478-1a0e-4b90-b526-36b06339dfc4 in service fd00:1122:3344:103::28 - crucible b40f7c7b-526c-46c8-ae33-67280c280eb7 in service fd00:1122:3344:103::23 - crucible be97b92b-38d6-422a-8c76-d37060f75bd2 in service fd00:1122:3344:103::26 - internal_ntp 267ed614-92af-4b9d-bdba-c2881c2e43a2 in service fd00:1122:3344:103::21 - nexus cc816cfe-3869-4dde-b596-397d41198628 in service fd00:1122:3344:103::22 - - sled 43677374-8d2f-4deb-8a41-eeea506db8e0: blueprint zones at generation 2 - crucible 02acbe6a-1c88-47e3-94c3-94084cbde098 in service fd00:1122:3344:101::27 - crucible 07c3c805-8888-4fe5-9543-3d2479dbe6f3 in service fd00:1122:3344:101::26 - crucible 10d98a73-ec88-4aff-a7e8-7db6a87880e6 in service fd00:1122:3344:101::24 - crucible 2a455c35-eb3c-4c73-ab6c-d0a706e25316 in service fd00:1122:3344:101::29 - crucible 3eda924f-22a9-4f3e-9a1b-91d1c47601ab in service fd00:1122:3344:101::23 - crucible 587be699-a320-4c79-b320-128d9ecddc0b in service fd00:1122:3344:101::2b - crucible 6fa06115-4959-4913-8e7b-dd70d7651f07 in service fd00:1122:3344:101::2c - crucible 8f3a1cc5-9195-4a30-ad02-b804278fe639 in service fd00:1122:3344:101::28 - crucible a1696cd4-588c-484a-b95b-66e824c0ce05 in service fd00:1122:3344:101::25 - crucible a2079cbc-a69e-41a1-b1e0-fbcb972d03f6 in service fd00:1122:3344:101::2a - internal_ntp 08c7f8aa-1ea9-469b-8cac-2fdbfc11ebcb in service fd00:1122:3344:101::21 - nexus c66ab6d5-ff7a-46d1-9fd0-70cefa352d25 in service fd00:1122:3344:101::22 - - sled 590e3034-d946-4166-b0e5-2d0034197a07: blueprint zones at generation 2 - crucible 18f8fe40-646e-4962-b17a-20e201f3a6e5 in service fd00:1122:3344:102::2a - crucible 56d5d7cf-db2c-40a3-a775-003241ad4820 in service fd00:1122:3344:102::29 - crucible 6af7f4d6-33b6-4eb3-a146-d8e9e4ae9d66 in service fd00:1122:3344:102::2b - crucible 7a9f60d3-2b66-4547-9b63-7d4f7a8b6382 in service fd00:1122:3344:102::26 - crucible 93f2f40c-5616-4d8d-8519-ec6debdcede0 in service fd00:1122:3344:102::2c - crucible ab7ba6df-d401-40bd-940e-faf57c57aa2a in service fd00:1122:3344:102::28 - crucible af322036-371f-437c-8c08-7f40f3f1403b in service fd00:1122:3344:102::23 - crucible d637264f-6f40-44c2-8b7e-a179430210d2 in service fd00:1122:3344:102::25 - crucible dce226c9-7373-4bfa-8a94-79dc472857a6 in service fd00:1122:3344:102::27 - crucible edabedf3-839c-488d-ad6f-508ffa864674 in service fd00:1122:3344:102::24 - internal_ntp 47199d48-534c-4267-a654-d2d90e64b498 in service fd00:1122:3344:102::21 - nexus 704e1fed-f8d6-4cfa-a470-bad27fdc06d1 in service fd00:1122:3344:102::22 - - ADDED SLEDS: - -+ sled b59ec570-2abb-4017-80ce-129d94e7a025: blueprint zones at generation 2 -+ internal_ntp 2d73d30e-ca47-46a8-9c12-917d4ab824b6 in service fd00:1122:3344:104::21 added - - METADATA: - internal DNS version: 1 (unchanged) - external DNS version: 1 (unchanged) + UNCHANGED SLEDS: + + sled 41f45d9f-766e-4ca6-a881-61ee45c80f57: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 322ee9f1-8903-4542-a0a8-a54cefabdeca in service fd00:1122:3344:103::24 + crucible 4ab1650f-32c5-447f-939d-64b8103a7645 in service fd00:1122:3344:103::2a + crucible 64aa65f8-1ccb-4cd6-9953-027aebdac8ff in service fd00:1122:3344:103::27 + crucible 6e811d86-8aa7-4660-935b-84b4b7721b10 in service fd00:1122:3344:103::2b + crucible 747d2426-68bf-4c22-8806-41d290b5d5f5 in service fd00:1122:3344:103::25 + crucible 7fbd2c38-5dc3-48c4-b061-558a2041d70f in service fd00:1122:3344:103::2c + crucible 8e9e923e-62b1-4cbc-9f59-d6397e338b6b in service fd00:1122:3344:103::29 + crucible b14d5478-1a0e-4b90-b526-36b06339dfc4 in service fd00:1122:3344:103::28 + crucible b40f7c7b-526c-46c8-ae33-67280c280eb7 in service fd00:1122:3344:103::23 + crucible be97b92b-38d6-422a-8c76-d37060f75bd2 in service fd00:1122:3344:103::26 + internal_ntp 267ed614-92af-4b9d-bdba-c2881c2e43a2 in service fd00:1122:3344:103::21 + nexus cc816cfe-3869-4dde-b596-397d41198628 in service fd00:1122:3344:103::22 + + + sled 43677374-8d2f-4deb-8a41-eeea506db8e0: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 02acbe6a-1c88-47e3-94c3-94084cbde098 in service fd00:1122:3344:101::27 + crucible 07c3c805-8888-4fe5-9543-3d2479dbe6f3 in service fd00:1122:3344:101::26 + crucible 10d98a73-ec88-4aff-a7e8-7db6a87880e6 in service fd00:1122:3344:101::24 + crucible 2a455c35-eb3c-4c73-ab6c-d0a706e25316 in service fd00:1122:3344:101::29 + crucible 3eda924f-22a9-4f3e-9a1b-91d1c47601ab in service fd00:1122:3344:101::23 + crucible 587be699-a320-4c79-b320-128d9ecddc0b in service fd00:1122:3344:101::2b + crucible 6fa06115-4959-4913-8e7b-dd70d7651f07 in service fd00:1122:3344:101::2c + crucible 8f3a1cc5-9195-4a30-ad02-b804278fe639 in service fd00:1122:3344:101::28 + crucible a1696cd4-588c-484a-b95b-66e824c0ce05 in service fd00:1122:3344:101::25 + crucible a2079cbc-a69e-41a1-b1e0-fbcb972d03f6 in service fd00:1122:3344:101::2a + internal_ntp 08c7f8aa-1ea9-469b-8cac-2fdbfc11ebcb in service fd00:1122:3344:101::21 + nexus c66ab6d5-ff7a-46d1-9fd0-70cefa352d25 in service fd00:1122:3344:101::22 + + + sled 590e3034-d946-4166-b0e5-2d0034197a07: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 18f8fe40-646e-4962-b17a-20e201f3a6e5 in service fd00:1122:3344:102::2a + crucible 56d5d7cf-db2c-40a3-a775-003241ad4820 in service fd00:1122:3344:102::29 + crucible 6af7f4d6-33b6-4eb3-a146-d8e9e4ae9d66 in service fd00:1122:3344:102::2b + crucible 7a9f60d3-2b66-4547-9b63-7d4f7a8b6382 in service fd00:1122:3344:102::26 + crucible 93f2f40c-5616-4d8d-8519-ec6debdcede0 in service fd00:1122:3344:102::2c + crucible ab7ba6df-d401-40bd-940e-faf57c57aa2a in service fd00:1122:3344:102::28 + crucible af322036-371f-437c-8c08-7f40f3f1403b in service fd00:1122:3344:102::23 + crucible d637264f-6f40-44c2-8b7e-a179430210d2 in service fd00:1122:3344:102::25 + crucible dce226c9-7373-4bfa-8a94-79dc472857a6 in service fd00:1122:3344:102::27 + crucible edabedf3-839c-488d-ad6f-508ffa864674 in service fd00:1122:3344:102::24 + internal_ntp 47199d48-534c-4267-a654-d2d90e64b498 in service fd00:1122:3344:102::21 + nexus 704e1fed-f8d6-4cfa-a470-bad27fdc06d1 in service fd00:1122:3344:102::22 + + + ADDED SLEDS: + + sled b59ec570-2abb-4017-80ce-129d94e7a025: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- ++ fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 ++ fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 ++ fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 ++ fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 ++ fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 ++ fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 ++ fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 ++ fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c ++ fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b ++ fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ ++ internal_ntp 2d73d30e-ca47-46a8-9c12-917d4ab824b6 in service fd00:1122:3344:104::21 + + + METADATA: + internal DNS version: 1 (unchanged) + external DNS version: 1 (unchanged) + diff --git a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt index 89120cf3778..468303a56a5 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt @@ -1,69 +1,154 @@ from: blueprint 4171ad05-89dd-474b-846b-b007e4346366 to: blueprint f432fcd5-1284-4058-8b4a-9286a3de6163 - ------------------------------------------------------------------------------------------------------ - zone type zone ID disposition underlay IP status - ------------------------------------------------------------------------------------------------------ - - UNCHANGED SLEDS: - - sled 41f45d9f-766e-4ca6-a881-61ee45c80f57: blueprint zones at generation 2 - crucible 322ee9f1-8903-4542-a0a8-a54cefabdeca in service fd00:1122:3344:103::24 - crucible 4ab1650f-32c5-447f-939d-64b8103a7645 in service fd00:1122:3344:103::2a - crucible 64aa65f8-1ccb-4cd6-9953-027aebdac8ff in service fd00:1122:3344:103::27 - crucible 6e811d86-8aa7-4660-935b-84b4b7721b10 in service fd00:1122:3344:103::2b - crucible 747d2426-68bf-4c22-8806-41d290b5d5f5 in service fd00:1122:3344:103::25 - crucible 7fbd2c38-5dc3-48c4-b061-558a2041d70f in service fd00:1122:3344:103::2c - crucible 8e9e923e-62b1-4cbc-9f59-d6397e338b6b in service fd00:1122:3344:103::29 - crucible b14d5478-1a0e-4b90-b526-36b06339dfc4 in service fd00:1122:3344:103::28 - crucible b40f7c7b-526c-46c8-ae33-67280c280eb7 in service fd00:1122:3344:103::23 - crucible be97b92b-38d6-422a-8c76-d37060f75bd2 in service fd00:1122:3344:103::26 - internal_ntp 267ed614-92af-4b9d-bdba-c2881c2e43a2 in service fd00:1122:3344:103::21 - nexus cc816cfe-3869-4dde-b596-397d41198628 in service fd00:1122:3344:103::22 - - sled 43677374-8d2f-4deb-8a41-eeea506db8e0: blueprint zones at generation 2 - crucible 02acbe6a-1c88-47e3-94c3-94084cbde098 in service fd00:1122:3344:101::27 - crucible 07c3c805-8888-4fe5-9543-3d2479dbe6f3 in service fd00:1122:3344:101::26 - crucible 10d98a73-ec88-4aff-a7e8-7db6a87880e6 in service fd00:1122:3344:101::24 - crucible 2a455c35-eb3c-4c73-ab6c-d0a706e25316 in service fd00:1122:3344:101::29 - crucible 3eda924f-22a9-4f3e-9a1b-91d1c47601ab in service fd00:1122:3344:101::23 - crucible 587be699-a320-4c79-b320-128d9ecddc0b in service fd00:1122:3344:101::2b - crucible 6fa06115-4959-4913-8e7b-dd70d7651f07 in service fd00:1122:3344:101::2c - crucible 8f3a1cc5-9195-4a30-ad02-b804278fe639 in service fd00:1122:3344:101::28 - crucible a1696cd4-588c-484a-b95b-66e824c0ce05 in service fd00:1122:3344:101::25 - crucible a2079cbc-a69e-41a1-b1e0-fbcb972d03f6 in service fd00:1122:3344:101::2a - internal_ntp 08c7f8aa-1ea9-469b-8cac-2fdbfc11ebcb in service fd00:1122:3344:101::21 - nexus c66ab6d5-ff7a-46d1-9fd0-70cefa352d25 in service fd00:1122:3344:101::22 - - sled 590e3034-d946-4166-b0e5-2d0034197a07: blueprint zones at generation 2 - crucible 18f8fe40-646e-4962-b17a-20e201f3a6e5 in service fd00:1122:3344:102::2a - crucible 56d5d7cf-db2c-40a3-a775-003241ad4820 in service fd00:1122:3344:102::29 - crucible 6af7f4d6-33b6-4eb3-a146-d8e9e4ae9d66 in service fd00:1122:3344:102::2b - crucible 7a9f60d3-2b66-4547-9b63-7d4f7a8b6382 in service fd00:1122:3344:102::26 - crucible 93f2f40c-5616-4d8d-8519-ec6debdcede0 in service fd00:1122:3344:102::2c - crucible ab7ba6df-d401-40bd-940e-faf57c57aa2a in service fd00:1122:3344:102::28 - crucible af322036-371f-437c-8c08-7f40f3f1403b in service fd00:1122:3344:102::23 - crucible d637264f-6f40-44c2-8b7e-a179430210d2 in service fd00:1122:3344:102::25 - crucible dce226c9-7373-4bfa-8a94-79dc472857a6 in service fd00:1122:3344:102::27 - crucible edabedf3-839c-488d-ad6f-508ffa864674 in service fd00:1122:3344:102::24 - internal_ntp 47199d48-534c-4267-a654-d2d90e64b498 in service fd00:1122:3344:102::21 - nexus 704e1fed-f8d6-4cfa-a470-bad27fdc06d1 in service fd00:1122:3344:102::22 - - MODIFIED SLEDS: - -* sled b59ec570-2abb-4017-80ce-129d94e7a025: blueprint zones at generation: 2 -> 3 - internal_ntp 2d73d30e-ca47-46a8-9c12-917d4ab824b6 in service fd00:1122:3344:104::21 -+ crucible 1a20ee3c-f66e-4fca-ab85-2a248aa3d79d in service fd00:1122:3344:104::2b added -+ crucible 28852beb-d0e5-4cba-9adb-e7f0cd4bb864 in service fd00:1122:3344:104::29 added -+ crucible 45556184-7092-4a3d-873f-637976bb133b in service fd00:1122:3344:104::22 added -+ crucible 8215bf7a-10d6-4f40-aeb7-27a196307c37 in service fd00:1122:3344:104::25 added -+ crucible 9d75abfe-47ab-434a-93dd-af50dc0dddde in service fd00:1122:3344:104::23 added -+ crucible a36d291c-7f68-462f-830e-bc29e5841ce2 in service fd00:1122:3344:104::27 added -+ crucible b3a4d434-aaee-4752-8c99-69d88fbcb8c5 in service fd00:1122:3344:104::2a added -+ crucible cf5b636b-a505-4db6-bc32-baf9f53f4371 in service fd00:1122:3344:104::28 added -+ crucible f6125d45-b9cc-4721-ba60-ed4dbb177e41 in service fd00:1122:3344:104::26 added -+ crucible f86e19d2-9145-41cf-be89-6aaa34a73873 in service fd00:1122:3344:104::24 added - - METADATA: - internal DNS version: 1 (unchanged) - external DNS version: 1 (unchanged) + UNCHANGED SLEDS: + + sled 41f45d9f-766e-4ca6-a881-61ee45c80f57: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 322ee9f1-8903-4542-a0a8-a54cefabdeca in service fd00:1122:3344:103::24 + crucible 4ab1650f-32c5-447f-939d-64b8103a7645 in service fd00:1122:3344:103::2a + crucible 64aa65f8-1ccb-4cd6-9953-027aebdac8ff in service fd00:1122:3344:103::27 + crucible 6e811d86-8aa7-4660-935b-84b4b7721b10 in service fd00:1122:3344:103::2b + crucible 747d2426-68bf-4c22-8806-41d290b5d5f5 in service fd00:1122:3344:103::25 + crucible 7fbd2c38-5dc3-48c4-b061-558a2041d70f in service fd00:1122:3344:103::2c + crucible 8e9e923e-62b1-4cbc-9f59-d6397e338b6b in service fd00:1122:3344:103::29 + crucible b14d5478-1a0e-4b90-b526-36b06339dfc4 in service fd00:1122:3344:103::28 + crucible b40f7c7b-526c-46c8-ae33-67280c280eb7 in service fd00:1122:3344:103::23 + crucible be97b92b-38d6-422a-8c76-d37060f75bd2 in service fd00:1122:3344:103::26 + internal_ntp 267ed614-92af-4b9d-bdba-c2881c2e43a2 in service fd00:1122:3344:103::21 + nexus cc816cfe-3869-4dde-b596-397d41198628 in service fd00:1122:3344:103::22 + + + sled 43677374-8d2f-4deb-8a41-eeea506db8e0: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 02acbe6a-1c88-47e3-94c3-94084cbde098 in service fd00:1122:3344:101::27 + crucible 07c3c805-8888-4fe5-9543-3d2479dbe6f3 in service fd00:1122:3344:101::26 + crucible 10d98a73-ec88-4aff-a7e8-7db6a87880e6 in service fd00:1122:3344:101::24 + crucible 2a455c35-eb3c-4c73-ab6c-d0a706e25316 in service fd00:1122:3344:101::29 + crucible 3eda924f-22a9-4f3e-9a1b-91d1c47601ab in service fd00:1122:3344:101::23 + crucible 587be699-a320-4c79-b320-128d9ecddc0b in service fd00:1122:3344:101::2b + crucible 6fa06115-4959-4913-8e7b-dd70d7651f07 in service fd00:1122:3344:101::2c + crucible 8f3a1cc5-9195-4a30-ad02-b804278fe639 in service fd00:1122:3344:101::28 + crucible a1696cd4-588c-484a-b95b-66e824c0ce05 in service fd00:1122:3344:101::25 + crucible a2079cbc-a69e-41a1-b1e0-fbcb972d03f6 in service fd00:1122:3344:101::2a + internal_ntp 08c7f8aa-1ea9-469b-8cac-2fdbfc11ebcb in service fd00:1122:3344:101::21 + nexus c66ab6d5-ff7a-46d1-9fd0-70cefa352d25 in service fd00:1122:3344:101::22 + + + sled 590e3034-d946-4166-b0e5-2d0034197a07: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 18f8fe40-646e-4962-b17a-20e201f3a6e5 in service fd00:1122:3344:102::2a + crucible 56d5d7cf-db2c-40a3-a775-003241ad4820 in service fd00:1122:3344:102::29 + crucible 6af7f4d6-33b6-4eb3-a146-d8e9e4ae9d66 in service fd00:1122:3344:102::2b + crucible 7a9f60d3-2b66-4547-9b63-7d4f7a8b6382 in service fd00:1122:3344:102::26 + crucible 93f2f40c-5616-4d8d-8519-ec6debdcede0 in service fd00:1122:3344:102::2c + crucible ab7ba6df-d401-40bd-940e-faf57c57aa2a in service fd00:1122:3344:102::28 + crucible af322036-371f-437c-8c08-7f40f3f1403b in service fd00:1122:3344:102::23 + crucible d637264f-6f40-44c2-8b7e-a179430210d2 in service fd00:1122:3344:102::25 + crucible dce226c9-7373-4bfa-8a94-79dc472857a6 in service fd00:1122:3344:102::27 + crucible edabedf3-839c-488d-ad6f-508ffa864674 in service fd00:1122:3344:102::24 + internal_ntp 47199d48-534c-4267-a654-d2d90e64b498 in service fd00:1122:3344:102::21 + nexus 704e1fed-f8d6-4cfa-a470-bad27fdc06d1 in service fd00:1122:3344:102::22 + + + MODIFIED SLEDS: + + sled b59ec570-2abb-4017-80ce-129d94e7a025: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones generation 2 -> 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + internal_ntp 2d73d30e-ca47-46a8-9c12-917d4ab824b6 in service fd00:1122:3344:104::21 ++ crucible 1a20ee3c-f66e-4fca-ab85-2a248aa3d79d in service fd00:1122:3344:104::2b ++ crucible 28852beb-d0e5-4cba-9adb-e7f0cd4bb864 in service fd00:1122:3344:104::29 ++ crucible 45556184-7092-4a3d-873f-637976bb133b in service fd00:1122:3344:104::22 ++ crucible 8215bf7a-10d6-4f40-aeb7-27a196307c37 in service fd00:1122:3344:104::25 ++ crucible 9d75abfe-47ab-434a-93dd-af50dc0dddde in service fd00:1122:3344:104::23 ++ crucible a36d291c-7f68-462f-830e-bc29e5841ce2 in service fd00:1122:3344:104::27 ++ crucible b3a4d434-aaee-4752-8c99-69d88fbcb8c5 in service fd00:1122:3344:104::2a ++ crucible cf5b636b-a505-4db6-bc32-baf9f53f4371 in service fd00:1122:3344:104::28 ++ crucible f6125d45-b9cc-4721-ba60-ed4dbb177e41 in service fd00:1122:3344:104::26 ++ crucible f86e19d2-9145-41cf-be89-6aaa34a73873 in service fd00:1122:3344:104::24 + + + METADATA: + internal DNS version: 1 (unchanged) + external DNS version: 1 (unchanged) + diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt index 28f08c9c781..8c94c97188d 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt @@ -1,81 +1,120 @@ from: blueprint 516e80a3-b362-4fac-bd3c-4559717120dd to: blueprint 1ac2d88f-27dd-4506-8585-6b2be832528e - -------------------------------------------------------------------------------------------------------- - zone type zone ID disposition underlay IP status - -------------------------------------------------------------------------------------------------------- - - UNCHANGED SLEDS: - - sled d67ce8f0-a691-4010-b414-420d82e80527: blueprint zones at generation 2 - crucible 15dbaa30-1539-49d6-970d-ba5962960f33 in service fd00:1122:3344:101::27 - crucible 1ec4cc7b-2f00-4d13-8176-3b9815533ae9 in service fd00:1122:3344:101::24 - crucible 2e65b765-5c41-4519-bf4e-e2a68569afc1 in service fd00:1122:3344:101::23 - crucible 3d4143df-e212-4774-9258-7d9b421fac2e in service fd00:1122:3344:101::25 - crucible 5d9d8fa7-8379-470b-90ba-fe84a3c45512 in service fd00:1122:3344:101::2a - crucible 70232a6d-6c9d-4fa6-a34d-9c73d940db33 in service fd00:1122:3344:101::28 - crucible 8567a616-a709-4c8c-a323-4474675dad5c in service fd00:1122:3344:101::2c - crucible 8b0b8623-930a-41af-9f9b-ca28b1b11139 in service fd00:1122:3344:101::29 - crucible cf87d2a3-d323-44a3-a87e-adc4ef6c75f4 in service fd00:1122:3344:101::2b - crucible eac6c0a0-baa5-4490-9cee-65198b7fbd9c in service fd00:1122:3344:101::26 - internal_ntp ad76d200-5675-444b-b19c-684689ff421f in service fd00:1122:3344:101::21 - nexus e9bf2525-5fa0-4c1b-b52d-481225083845 in service fd00:1122:3344:101::22 - - MODIFIED SLEDS: - -* sled a1b477db-b629-48eb-911d-1ccdafca75b9: blueprint zones at generation: 2 -> 3 -- crucible 1e1ed0cc-1adc-410f-943a-d1a3107de619 in service fd00:1122:3344:103::27 modified -+ ├─ expunged fd00:1122:3344:103::27 -* └─ changed: disposition -- crucible 2307bbed-02ba-493b-89e3-46585c74c8fc in service fd00:1122:3344:103::28 modified -+ ├─ expunged fd00:1122:3344:103::28 -* └─ changed: disposition -- crucible 4e36b7ef-5684-4304-b7c3-3c31aaf83d4f in service fd00:1122:3344:103::23 modified -+ ├─ expunged fd00:1122:3344:103::23 -* └─ changed: disposition -- crucible 603e629d-2599-400e-b879-4134d4cc426e in service fd00:1122:3344:103::2c modified -+ ├─ expunged fd00:1122:3344:103::2c -* └─ changed: disposition -- crucible 9179d6dc-387d-424e-8d62-ed59b2c728f6 in service fd00:1122:3344:103::2a modified -+ ├─ expunged fd00:1122:3344:103::2a -* └─ changed: disposition -- crucible c28d7b4b-a259-45ad-945d-f19ca3c6964c in service fd00:1122:3344:103::29 modified -+ ├─ expunged fd00:1122:3344:103::29 -* └─ changed: disposition -- crucible e29998e7-9ed2-46b6-bb70-4118159fe07f in service fd00:1122:3344:103::26 modified -+ ├─ expunged fd00:1122:3344:103::26 -* └─ changed: disposition -- crucible f06e91a1-0c17-4cca-adbc-1c9b67bdb11d in service fd00:1122:3344:103::2b modified -+ ├─ expunged fd00:1122:3344:103::2b -* └─ changed: disposition -- crucible f11f5c60-1ac7-4630-9a3a-a9bc85c75203 in service fd00:1122:3344:103::25 modified -+ ├─ expunged fd00:1122:3344:103::25 -* └─ changed: disposition -- crucible f231e4eb-3fc9-4964-9d71-2c41644852d9 in service fd00:1122:3344:103::24 modified -+ ├─ expunged fd00:1122:3344:103::24 -* └─ changed: disposition -- internal_ntp c62b87b6-b98d-4d22-ba4f-cee4499e2ba8 in service fd00:1122:3344:103::21 modified -+ ├─ expunged fd00:1122:3344:103::21 -* └─ changed: disposition -- nexus 6a70a233-1900-43c0-9c00-aa9d1f7adfbc in service fd00:1122:3344:103::22 modified -+ ├─ expunged fd00:1122:3344:103::22 -* └─ changed: disposition - -* sled fefcf4cf-f7e7-46b3-b629-058526ce440e: blueprint zones at generation: 2 -> 3 - crucible 0e2b035e-1de1-48af-8ac0-5316418e3de1 in service fd00:1122:3344:102::2a - crucible 4f8ce495-21dd-48a1-859c-80d34ce394ed in service fd00:1122:3344:102::23 - crucible 5c78756d-6182-4c27-a507-3419e8dbe76b in service fd00:1122:3344:102::28 - crucible a1ae92ac-e1f1-4654-ab54-5b75ba7c44d6 in service fd00:1122:3344:102::24 - crucible a308d3e1-118c-440a-947a-8b6ab7d833ab in service fd00:1122:3344:102::25 - crucible b7402110-d88f-4ca4-8391-4a2fda6ad271 in service fd00:1122:3344:102::29 - crucible b7ae596e-0c85-40b2-bb47-df9f76db3cca in service fd00:1122:3344:102::2b - crucible c552280f-ba02-4f8d-9049-bd269e6b7845 in service fd00:1122:3344:102::26 - crucible cf13b878-47f1-4ba0-b8c2-9f3e15f2ee87 in service fd00:1122:3344:102::2c - crucible e6d0df1f-9f98-4c5a-9540-8444d1185c7d in service fd00:1122:3344:102::27 - internal_ntp f68846ad-4619-4747-8293-a2b4aeeafc5b in service fd00:1122:3344:102::21 - nexus 99c6401d-9796-4ae1-bf0c-9a097cf21c33 in service fd00:1122:3344:102::22 -+ nexus c8851a11-a4f7-4b21-9281-6182fd15dc8d in service fd00:1122:3344:102::2d added - - METADATA: - internal DNS version: 1 (unchanged) - external DNS version: 1 (unchanged) + UNCHANGED SLEDS: + + sled d67ce8f0-a691-4010-b414-420d82e80527: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 15dbaa30-1539-49d6-970d-ba5962960f33 in service fd00:1122:3344:101::27 + crucible 1ec4cc7b-2f00-4d13-8176-3b9815533ae9 in service fd00:1122:3344:101::24 + crucible 2e65b765-5c41-4519-bf4e-e2a68569afc1 in service fd00:1122:3344:101::23 + crucible 3d4143df-e212-4774-9258-7d9b421fac2e in service fd00:1122:3344:101::25 + crucible 5d9d8fa7-8379-470b-90ba-fe84a3c45512 in service fd00:1122:3344:101::2a + crucible 70232a6d-6c9d-4fa6-a34d-9c73d940db33 in service fd00:1122:3344:101::28 + crucible 8567a616-a709-4c8c-a323-4474675dad5c in service fd00:1122:3344:101::2c + crucible 8b0b8623-930a-41af-9f9b-ca28b1b11139 in service fd00:1122:3344:101::29 + crucible cf87d2a3-d323-44a3-a87e-adc4ef6c75f4 in service fd00:1122:3344:101::2b + crucible eac6c0a0-baa5-4490-9cee-65198b7fbd9c in service fd00:1122:3344:101::26 + internal_ntp ad76d200-5675-444b-b19c-684689ff421f in service fd00:1122:3344:101::21 + nexus e9bf2525-5fa0-4c1b-b52d-481225083845 in service fd00:1122:3344:101::22 + + + MODIFIED SLEDS: + + sled a1b477db-b629-48eb-911d-1ccdafca75b9: + + physical disks from generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- +- fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 +- fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 +- fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 +- fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 +- fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 +- fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 +- fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 +- fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c +- fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b +- fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones generation 2 -> 3: + ----------------------------------------------------------------------------------------------------- + zone type zone id disposition underlay IP + ----------------------------------------------------------------------------------------------------- +* crucible 1e1ed0cc-1adc-410f-943a-d1a3107de619 in service -> expunged fd00:1122:3344:103::27 +* crucible 2307bbed-02ba-493b-89e3-46585c74c8fc in service -> expunged fd00:1122:3344:103::28 +* crucible 4e36b7ef-5684-4304-b7c3-3c31aaf83d4f in service -> expunged fd00:1122:3344:103::23 +* crucible 603e629d-2599-400e-b879-4134d4cc426e in service -> expunged fd00:1122:3344:103::2c +* crucible 9179d6dc-387d-424e-8d62-ed59b2c728f6 in service -> expunged fd00:1122:3344:103::2a +* crucible c28d7b4b-a259-45ad-945d-f19ca3c6964c in service -> expunged fd00:1122:3344:103::29 +* crucible e29998e7-9ed2-46b6-bb70-4118159fe07f in service -> expunged fd00:1122:3344:103::26 +* crucible f06e91a1-0c17-4cca-adbc-1c9b67bdb11d in service -> expunged fd00:1122:3344:103::2b +* crucible f11f5c60-1ac7-4630-9a3a-a9bc85c75203 in service -> expunged fd00:1122:3344:103::25 +* crucible f231e4eb-3fc9-4964-9d71-2c41644852d9 in service -> expunged fd00:1122:3344:103::24 +* internal_ntp c62b87b6-b98d-4d22-ba4f-cee4499e2ba8 in service -> expunged fd00:1122:3344:103::21 +* nexus 6a70a233-1900-43c0-9c00-aa9d1f7adfbc in service -> expunged fd00:1122:3344:103::22 + + + sled fefcf4cf-f7e7-46b3-b629-058526ce440e: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones generation 2 -> 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 0e2b035e-1de1-48af-8ac0-5316418e3de1 in service fd00:1122:3344:102::2a + crucible 4f8ce495-21dd-48a1-859c-80d34ce394ed in service fd00:1122:3344:102::23 + crucible 5c78756d-6182-4c27-a507-3419e8dbe76b in service fd00:1122:3344:102::28 + crucible a1ae92ac-e1f1-4654-ab54-5b75ba7c44d6 in service fd00:1122:3344:102::24 + crucible a308d3e1-118c-440a-947a-8b6ab7d833ab in service fd00:1122:3344:102::25 + crucible b7402110-d88f-4ca4-8391-4a2fda6ad271 in service fd00:1122:3344:102::29 + crucible b7ae596e-0c85-40b2-bb47-df9f76db3cca in service fd00:1122:3344:102::2b + crucible c552280f-ba02-4f8d-9049-bd269e6b7845 in service fd00:1122:3344:102::26 + crucible cf13b878-47f1-4ba0-b8c2-9f3e15f2ee87 in service fd00:1122:3344:102::2c + crucible e6d0df1f-9f98-4c5a-9540-8444d1185c7d in service fd00:1122:3344:102::27 + internal_ntp f68846ad-4619-4747-8293-a2b4aeeafc5b in service fd00:1122:3344:102::21 + nexus 99c6401d-9796-4ae1-bf0c-9a097cf21c33 in service fd00:1122:3344:102::22 ++ nexus c8851a11-a4f7-4b21-9281-6182fd15dc8d in service fd00:1122:3344:102::2d + + + METADATA: + internal DNS version: 1 (unchanged) + external DNS version: 1 (unchanged) + diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt index ca08bd5c33a..ec94d5d9246 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt @@ -1,56 +1,106 @@ blueprint 1ac2d88f-27dd-4506-8585-6b2be832528e parent: 516e80a3-b362-4fac-bd3c-4559717120dd - -------------------------------------------------------------------------------------------- - zone type zone ID disposition underlay IP - -------------------------------------------------------------------------------------------- - - sled a1b477db-b629-48eb-911d-1ccdafca75b9: blueprint zones at generation 3 - crucible 1e1ed0cc-1adc-410f-943a-d1a3107de619 expunged fd00:1122:3344:103::27 - crucible 2307bbed-02ba-493b-89e3-46585c74c8fc expunged fd00:1122:3344:103::28 - crucible 4e36b7ef-5684-4304-b7c3-3c31aaf83d4f expunged fd00:1122:3344:103::23 - crucible 603e629d-2599-400e-b879-4134d4cc426e expunged fd00:1122:3344:103::2c - crucible 9179d6dc-387d-424e-8d62-ed59b2c728f6 expunged fd00:1122:3344:103::2a - crucible c28d7b4b-a259-45ad-945d-f19ca3c6964c expunged fd00:1122:3344:103::29 - crucible e29998e7-9ed2-46b6-bb70-4118159fe07f expunged fd00:1122:3344:103::26 - crucible f06e91a1-0c17-4cca-adbc-1c9b67bdb11d expunged fd00:1122:3344:103::2b - crucible f11f5c60-1ac7-4630-9a3a-a9bc85c75203 expunged fd00:1122:3344:103::25 - crucible f231e4eb-3fc9-4964-9d71-2c41644852d9 expunged fd00:1122:3344:103::24 - internal_ntp c62b87b6-b98d-4d22-ba4f-cee4499e2ba8 expunged fd00:1122:3344:103::21 - nexus 6a70a233-1900-43c0-9c00-aa9d1f7adfbc expunged fd00:1122:3344:103::22 - - sled d67ce8f0-a691-4010-b414-420d82e80527: blueprint zones at generation 2 - crucible 15dbaa30-1539-49d6-970d-ba5962960f33 in service fd00:1122:3344:101::27 - crucible 1ec4cc7b-2f00-4d13-8176-3b9815533ae9 in service fd00:1122:3344:101::24 - crucible 2e65b765-5c41-4519-bf4e-e2a68569afc1 in service fd00:1122:3344:101::23 - crucible 3d4143df-e212-4774-9258-7d9b421fac2e in service fd00:1122:3344:101::25 - crucible 5d9d8fa7-8379-470b-90ba-fe84a3c45512 in service fd00:1122:3344:101::2a - crucible 70232a6d-6c9d-4fa6-a34d-9c73d940db33 in service fd00:1122:3344:101::28 - crucible 8567a616-a709-4c8c-a323-4474675dad5c in service fd00:1122:3344:101::2c - crucible 8b0b8623-930a-41af-9f9b-ca28b1b11139 in service fd00:1122:3344:101::29 - crucible cf87d2a3-d323-44a3-a87e-adc4ef6c75f4 in service fd00:1122:3344:101::2b - crucible eac6c0a0-baa5-4490-9cee-65198b7fbd9c in service fd00:1122:3344:101::26 - internal_ntp ad76d200-5675-444b-b19c-684689ff421f in service fd00:1122:3344:101::21 - nexus e9bf2525-5fa0-4c1b-b52d-481225083845 in service fd00:1122:3344:101::22 - - sled fefcf4cf-f7e7-46b3-b629-058526ce440e: blueprint zones at generation 3 - crucible 0e2b035e-1de1-48af-8ac0-5316418e3de1 in service fd00:1122:3344:102::2a - crucible 4f8ce495-21dd-48a1-859c-80d34ce394ed in service fd00:1122:3344:102::23 - crucible 5c78756d-6182-4c27-a507-3419e8dbe76b in service fd00:1122:3344:102::28 - crucible a1ae92ac-e1f1-4654-ab54-5b75ba7c44d6 in service fd00:1122:3344:102::24 - crucible a308d3e1-118c-440a-947a-8b6ab7d833ab in service fd00:1122:3344:102::25 - crucible b7402110-d88f-4ca4-8391-4a2fda6ad271 in service fd00:1122:3344:102::29 - crucible b7ae596e-0c85-40b2-bb47-df9f76db3cca in service fd00:1122:3344:102::2b - crucible c552280f-ba02-4f8d-9049-bd269e6b7845 in service fd00:1122:3344:102::26 - crucible cf13b878-47f1-4ba0-b8c2-9f3e15f2ee87 in service fd00:1122:3344:102::2c - crucible e6d0df1f-9f98-4c5a-9540-8444d1185c7d in service fd00:1122:3344:102::27 - internal_ntp f68846ad-4619-4747-8293-a2b4aeeafc5b in service fd00:1122:3344:102::21 - nexus 99c6401d-9796-4ae1-bf0c-9a097cf21c33 in service fd00:1122:3344:102::22 - nexus c8851a11-a4f7-4b21-9281-6182fd15dc8d in service fd00:1122:3344:102::2d - -METADATA: - created by: test_blueprint2 - created at: 1970-01-01T00:00:00.000Z - comment: sled a1b477db-b629-48eb-911d-1ccdafca75b9 (sled policy is expunged): 12 zones expunged, sled d67ce8f0-a691-4010-b414-420d82e80527: altered disks, sled fefcf4cf-f7e7-46b3-b629-058526ce440e: altered disks - internal DNS version: 1 - external DNS version: 1 + sled: d67ce8f0-a691-4010-b414-420d82e80527 + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 15dbaa30-1539-49d6-970d-ba5962960f33 in service fd00:1122:3344:101::27 + crucible 1ec4cc7b-2f00-4d13-8176-3b9815533ae9 in service fd00:1122:3344:101::24 + crucible 2e65b765-5c41-4519-bf4e-e2a68569afc1 in service fd00:1122:3344:101::23 + crucible 3d4143df-e212-4774-9258-7d9b421fac2e in service fd00:1122:3344:101::25 + crucible 5d9d8fa7-8379-470b-90ba-fe84a3c45512 in service fd00:1122:3344:101::2a + crucible 70232a6d-6c9d-4fa6-a34d-9c73d940db33 in service fd00:1122:3344:101::28 + crucible 8567a616-a709-4c8c-a323-4474675dad5c in service fd00:1122:3344:101::2c + crucible 8b0b8623-930a-41af-9f9b-ca28b1b11139 in service fd00:1122:3344:101::29 + crucible cf87d2a3-d323-44a3-a87e-adc4ef6c75f4 in service fd00:1122:3344:101::2b + crucible eac6c0a0-baa5-4490-9cee-65198b7fbd9c in service fd00:1122:3344:101::26 + internal_ntp ad76d200-5675-444b-b19c-684689ff421f in service fd00:1122:3344:101::21 + nexus e9bf2525-5fa0-4c1b-b52d-481225083845 in service fd00:1122:3344:101::22 + + + + sled: fefcf4cf-f7e7-46b3-b629-058526ce440e + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 0e2b035e-1de1-48af-8ac0-5316418e3de1 in service fd00:1122:3344:102::2a + crucible 4f8ce495-21dd-48a1-859c-80d34ce394ed in service fd00:1122:3344:102::23 + crucible 5c78756d-6182-4c27-a507-3419e8dbe76b in service fd00:1122:3344:102::28 + crucible a1ae92ac-e1f1-4654-ab54-5b75ba7c44d6 in service fd00:1122:3344:102::24 + crucible a308d3e1-118c-440a-947a-8b6ab7d833ab in service fd00:1122:3344:102::25 + crucible b7402110-d88f-4ca4-8391-4a2fda6ad271 in service fd00:1122:3344:102::29 + crucible b7ae596e-0c85-40b2-bb47-df9f76db3cca in service fd00:1122:3344:102::2b + crucible c552280f-ba02-4f8d-9049-bd269e6b7845 in service fd00:1122:3344:102::26 + crucible cf13b878-47f1-4ba0-b8c2-9f3e15f2ee87 in service fd00:1122:3344:102::2c + crucible e6d0df1f-9f98-4c5a-9540-8444d1185c7d in service fd00:1122:3344:102::27 + internal_ntp f68846ad-4619-4747-8293-a2b4aeeafc5b in service fd00:1122:3344:102::21 + nexus 99c6401d-9796-4ae1-bf0c-9a097cf21c33 in service fd00:1122:3344:102::22 + nexus c8851a11-a4f7-4b21-9281-6182fd15dc8d in service fd00:1122:3344:102::2d + + + +!a1b477db-b629-48eb-911d-1ccdafca75b9 +WARNING: Zones exist without physical disks! + omicron zones at generation 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 1e1ed0cc-1adc-410f-943a-d1a3107de619 expunged fd00:1122:3344:103::27 + crucible 2307bbed-02ba-493b-89e3-46585c74c8fc expunged fd00:1122:3344:103::28 + crucible 4e36b7ef-5684-4304-b7c3-3c31aaf83d4f expunged fd00:1122:3344:103::23 + crucible 603e629d-2599-400e-b879-4134d4cc426e expunged fd00:1122:3344:103::2c + crucible 9179d6dc-387d-424e-8d62-ed59b2c728f6 expunged fd00:1122:3344:103::2a + crucible c28d7b4b-a259-45ad-945d-f19ca3c6964c expunged fd00:1122:3344:103::29 + crucible e29998e7-9ed2-46b6-bb70-4118159fe07f expunged fd00:1122:3344:103::26 + crucible f06e91a1-0c17-4cca-adbc-1c9b67bdb11d expunged fd00:1122:3344:103::2b + crucible f11f5c60-1ac7-4630-9a3a-a9bc85c75203 expunged fd00:1122:3344:103::25 + crucible f231e4eb-3fc9-4964-9d71-2c41644852d9 expunged fd00:1122:3344:103::24 + internal_ntp c62b87b6-b98d-4d22-ba4f-cee4499e2ba8 expunged fd00:1122:3344:103::21 + nexus 6a70a233-1900-43c0-9c00-aa9d1f7adfbc expunged fd00:1122:3344:103::22 + + + + METADATA: + created by::::::::::: test_blueprint2 + created at::::::::::: 1970-01-01T00:00:00.000Z + comment:::::::::::::: sled a1b477db-b629-48eb-911d-1ccdafca75b9 (sled policy is expunged): 12 zones expunged + internal DNS version: 1 + external DNS version: 1 + diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt index a87243733f3..bc4d2abf710 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt @@ -1,114 +1,197 @@ from: blueprint 4d4e6c38-cd95-4c4e-8f45-6af4d686964b to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 - -------------------------------------------------------------------------------------------------------- - zone type zone ID disposition underlay IP status - -------------------------------------------------------------------------------------------------------- - - UNCHANGED SLEDS: - - sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: blueprint zones at generation 2 - crucible 19fbc4f8-a683-4f22-8f5a-e74782b935be in service fd00:1122:3344:105::26 - crucible 4f1ce8a2-d3a5-4a38-be4c-9817de52db37 in service fd00:1122:3344:105::2c - crucible 6b53ab2e-d98c-485f-87a3-4d5df595390f in service fd00:1122:3344:105::27 - crucible 93b137a1-a1d6-4b5b-b2cb-21a9f11e2883 in service fd00:1122:3344:105::23 - crucible 9f0abbad-dbd3-4d43-9675-78092217ffd9 in service fd00:1122:3344:105::25 - crucible b0c63f48-01ea-4aae-bb26-fb0dd59d1662 in service fd00:1122:3344:105::28 - crucible c406da50-34b9-4bb4-a460-8f49875d2a6a in service fd00:1122:3344:105::24 - crucible d660d7ed-28c0-45ae-9ace-dc3ecf7e8786 in service fd00:1122:3344:105::2a - crucible e98cc0de-abf6-4da4-a20d-d05c7a9bb1d7 in service fd00:1122:3344:105::2b - crucible f55e6aaf-e8fc-4913-9e3c-8cd1bd4bdad3 in service fd00:1122:3344:105::29 - internal_ntp 7f4e9f9f-08f8-4d14-885d-e977c05525ad in service fd00:1122:3344:105::21 - nexus 6dff7633-66bb-4924-a6ff-2c896e66964b in service fd00:1122:3344:105::22 - - sled 68d24ac5-f341-49ea-a92a-0381b52ab387: blueprint zones at generation 2 - crucible 3b3c14b6-a8e2-4054-a577-8d96cb576230 expunged fd00:1122:3344:102::2c - crucible 47a87c6e-ef45-4d52-9a3e-69cdd96737cc expunged fd00:1122:3344:102::23 - crucible 6464d025-4652-4948-919e-740bec5699b1 expunged fd00:1122:3344:102::24 - crucible 6939ce48-b17c-4616-b176-8a419a7697be expunged fd00:1122:3344:102::29 - crucible 878dfddd-3113-4197-a3ea-e0d4dbe9b476 expunged fd00:1122:3344:102::25 - crucible 8d4d2b28-82bb-4e36-80da-1408d8c35d82 expunged fd00:1122:3344:102::2b - crucible 9fd52961-426f-4e62-a644-b70871103fca expunged fd00:1122:3344:102::26 - crucible b44cdbc0-0ce0-46eb-8b21-a09e113aa1d0 expunged fd00:1122:3344:102::27 - crucible b6b759d0-f60d-42b7-bbbc-9d61c9e895a9 expunged fd00:1122:3344:102::28 - crucible c407795c-6c8b-428e-8ab8-b962913c447f expunged fd00:1122:3344:102::2a - internal_ntp f3f2e4f3-0985-4ef6-8336-ce479382d05d expunged fd00:1122:3344:102::21 - nexus 01d58626-e1b0-480f-96be-ac784863c7dc expunged fd00:1122:3344:102::22 - - MODIFIED SLEDS: - -* sled 48d95fef-bc9f-4f50-9a53-1e075836291d: blueprint zones at generation: 2 -> 3 -- crucible 094f27af-1acb-4d1e-ba97-1fc1377d4bf2 in service fd00:1122:3344:103::2c modified -+ ├─ expunged fd00:1122:3344:103::2c -* └─ changed: disposition -- crucible 0dcfdfc5-481e-4153-b97c-11cf02b648ea in service fd00:1122:3344:103::25 modified -+ ├─ expunged fd00:1122:3344:103::25 -* └─ changed: disposition -- crucible 2f5e8010-a94d-43a4-9c5c-3f52832f5f7f in service fd00:1122:3344:103::27 modified -+ ├─ expunged fd00:1122:3344:103::27 -* └─ changed: disposition -- crucible 4a9a0a9d-87f0-4f1d-9181-27f6b435e637 in service fd00:1122:3344:103::28 modified -+ ├─ expunged fd00:1122:3344:103::28 -* └─ changed: disposition -- crucible 56ac1706-9e2a-49ba-bd6f-a99c44cb2ccb in service fd00:1122:3344:103::24 modified -+ ├─ expunged fd00:1122:3344:103::24 -* └─ changed: disposition -- crucible 67622d61-2df4-414d-aa0e-d1277265f405 in service fd00:1122:3344:103::23 modified -+ ├─ expunged fd00:1122:3344:103::23 -* └─ changed: disposition -- crucible b91b271d-8d80-4f49-99a0-34006ae86063 in service fd00:1122:3344:103::2a modified -+ ├─ expunged fd00:1122:3344:103::2a -* └─ changed: disposition -- crucible d6ee1338-3127-43ec-9aaa-b973ccf05496 in service fd00:1122:3344:103::26 modified -+ ├─ expunged fd00:1122:3344:103::26 -* └─ changed: disposition -- crucible e39d7c9e-182b-48af-af87-58079d723583 in service fd00:1122:3344:103::29 modified -+ ├─ expunged fd00:1122:3344:103::29 -* └─ changed: disposition -- crucible f69f92a1-5007-4bb0-a85b-604dc217154b in service fd00:1122:3344:103::2b modified -+ ├─ expunged fd00:1122:3344:103::2b -* └─ changed: disposition -- internal_ntp 67d913e0-0005-4599-9b28-0abbf6cc2916 in service fd00:1122:3344:103::21 modified -+ ├─ expunged fd00:1122:3344:103::21 -* └─ changed: disposition -- nexus 2aa0ea4f-3561-4989-a98c-9ab7d9a240fb in service fd00:1122:3344:103::22 modified -+ ├─ expunged fd00:1122:3344:103::22 -* └─ changed: disposition - -* sled 75bc286f-2b4b-482c-9431-59272af529da: blueprint zones at generation: 2 -> 3 - crucible 15bb9def-69b8-4d2e-b04f-9fee1143387c in service fd00:1122:3344:104::25 - crucible 23a8fa2b-ef3e-4017-a43f-f7a83953bd7c in service fd00:1122:3344:104::2c - crucible 621509d6-3772-4009-aca1-35eefd1098fb in service fd00:1122:3344:104::28 - crucible 85b8c68a-160d-461d-94dd-1baf175fa75c in service fd00:1122:3344:104::2a - crucible 996d7570-b0df-46d5-aaa4-0c97697cf484 in service fd00:1122:3344:104::26 - crucible a732c489-d29a-4f75-b900-5966385943af in service fd00:1122:3344:104::29 - crucible b1783e95-9598-451d-b6ba-c50b52b428c3 in service fd00:1122:3344:104::24 - crucible c6dd531e-2d1d-423b-acc8-358533dab78c in service fd00:1122:3344:104::27 - crucible e4b3e159-3dbe-48cb-8497-e3da92a90e5a in service fd00:1122:3344:104::23 - crucible f0ff59e8-4105-4980-a4bb-a1f4c58de1e3 in service fd00:1122:3344:104::2b - internal_ntp 57b96d5c-b71e-43e4-8869-7d514003d00d in service fd00:1122:3344:104::21 - nexus b4947d31-f70e-4ee0-8817-0ca6cea9b16b in service fd00:1122:3344:104::22 -+ nexus 2ec75441-3d7d-4b4b-9614-af03de5a3666 in service fd00:1122:3344:104::2d added -+ nexus 508abd03-cbfe-4654-9a6d-7f15a1ad32e5 in service fd00:1122:3344:104::2e added -+ nexus 59950bc8-1497-44dd-8cbf-b6502ba921b2 in service fd00:1122:3344:104::2f added - -* sled affab35f-600a-4109-8ea0-34a067a4e0bc: blueprint zones at generation: 2 -> 3 - crucible 0dfbf374-9ef9-430f-b06d-f271bf7f84c4 in service fd00:1122:3344:101::27 - crucible 3aa07966-5899-4789-ace5-f8eeb375c6c3 in service fd00:1122:3344:101::24 - crucible 4ad0e9da-08f8-4d40-b4d3-d17e711b5bbf in service fd00:1122:3344:101::29 - crucible 72c5a909-077d-4ec1-a9d5-ae64ef9d716e in service fd00:1122:3344:101::26 - crucible 95482c25-1e7f-43e8-adf1-e3548a1b3ae0 in service fd00:1122:3344:101::23 - crucible a1c03689-fc62-4ea5-bb72-4d01f5138614 in service fd00:1122:3344:101::2a - crucible a568e92e-4fbd-4b69-acd8-f16277073031 in service fd00:1122:3344:101::2c - crucible bf79a56a-97af-4cc4-94a5-8b20d64c2cda in service fd00:1122:3344:101::28 - crucible c60379ba-4e30-4628-a79a-0ae509aef4c5 in service fd00:1122:3344:101::25 - crucible d47f4996-fac0-4657-bcea-01b1fee6404d in service fd00:1122:3344:101::2b - internal_ntp f1a7b9a7-fc6a-4b23-b829-045ff33117ff in service fd00:1122:3344:101::21 - nexus 15c103f0-ac63-423b-ba5d-1b5fcd563ba3 in service fd00:1122:3344:101::22 -+ nexus 3ca5292f-8a59-4475-bb72-0f43714d0fff in service fd00:1122:3344:101::2e added -+ nexus 99f6d544-8599-4e2b-a55a-82d9e0034662 in service fd00:1122:3344:101::2d added -+ nexus c26b3bda-5561-44a1-a69f-22103fe209a1 in service fd00:1122:3344:101::2f added - - METADATA: - internal DNS version: 1 (unchanged) - external DNS version: 1 (unchanged) + UNCHANGED SLEDS: + + sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 19fbc4f8-a683-4f22-8f5a-e74782b935be in service fd00:1122:3344:105::26 + crucible 4f1ce8a2-d3a5-4a38-be4c-9817de52db37 in service fd00:1122:3344:105::2c + crucible 6b53ab2e-d98c-485f-87a3-4d5df595390f in service fd00:1122:3344:105::27 + crucible 93b137a1-a1d6-4b5b-b2cb-21a9f11e2883 in service fd00:1122:3344:105::23 + crucible 9f0abbad-dbd3-4d43-9675-78092217ffd9 in service fd00:1122:3344:105::25 + crucible b0c63f48-01ea-4aae-bb26-fb0dd59d1662 in service fd00:1122:3344:105::28 + crucible c406da50-34b9-4bb4-a460-8f49875d2a6a in service fd00:1122:3344:105::24 + crucible d660d7ed-28c0-45ae-9ace-dc3ecf7e8786 in service fd00:1122:3344:105::2a + crucible e98cc0de-abf6-4da4-a20d-d05c7a9bb1d7 in service fd00:1122:3344:105::2b + crucible f55e6aaf-e8fc-4913-9e3c-8cd1bd4bdad3 in service fd00:1122:3344:105::29 + internal_ntp 7f4e9f9f-08f8-4d14-885d-e977c05525ad in service fd00:1122:3344:105::21 + nexus 6dff7633-66bb-4924-a6ff-2c896e66964b in service fd00:1122:3344:105::22 + + + MODIFIED SLEDS: + + sled 48d95fef-bc9f-4f50-9a53-1e075836291d: + + physical disks from generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- +- fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 +- fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 +- fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 +- fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 +- fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 +- fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 +- fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 +- fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c +- fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b +- fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones generation 2 -> 3: + ----------------------------------------------------------------------------------------------------- + zone type zone id disposition underlay IP + ----------------------------------------------------------------------------------------------------- +* crucible 094f27af-1acb-4d1e-ba97-1fc1377d4bf2 in service -> expunged fd00:1122:3344:103::2c +* crucible 0dcfdfc5-481e-4153-b97c-11cf02b648ea in service -> expunged fd00:1122:3344:103::25 +* crucible 2f5e8010-a94d-43a4-9c5c-3f52832f5f7f in service -> expunged fd00:1122:3344:103::27 +* crucible 4a9a0a9d-87f0-4f1d-9181-27f6b435e637 in service -> expunged fd00:1122:3344:103::28 +* crucible 56ac1706-9e2a-49ba-bd6f-a99c44cb2ccb in service -> expunged fd00:1122:3344:103::24 +* crucible 67622d61-2df4-414d-aa0e-d1277265f405 in service -> expunged fd00:1122:3344:103::23 +* crucible b91b271d-8d80-4f49-99a0-34006ae86063 in service -> expunged fd00:1122:3344:103::2a +* crucible d6ee1338-3127-43ec-9aaa-b973ccf05496 in service -> expunged fd00:1122:3344:103::26 +* crucible e39d7c9e-182b-48af-af87-58079d723583 in service -> expunged fd00:1122:3344:103::29 +* crucible f69f92a1-5007-4bb0-a85b-604dc217154b in service -> expunged fd00:1122:3344:103::2b +* internal_ntp 67d913e0-0005-4599-9b28-0abbf6cc2916 in service -> expunged fd00:1122:3344:103::21 +* nexus 2aa0ea4f-3561-4989-a98c-9ab7d9a240fb in service -> expunged fd00:1122:3344:103::22 + + + sled 68d24ac5-f341-49ea-a92a-0381b52ab387: + + physical disks from generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- +- fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 +- fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 +- fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 +- fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 +- fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 +- fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 +- fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 +- fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c +- fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b +- fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 3b3c14b6-a8e2-4054-a577-8d96cb576230 expunged fd00:1122:3344:102::2c + crucible 47a87c6e-ef45-4d52-9a3e-69cdd96737cc expunged fd00:1122:3344:102::23 + crucible 6464d025-4652-4948-919e-740bec5699b1 expunged fd00:1122:3344:102::24 + crucible 6939ce48-b17c-4616-b176-8a419a7697be expunged fd00:1122:3344:102::29 + crucible 878dfddd-3113-4197-a3ea-e0d4dbe9b476 expunged fd00:1122:3344:102::25 + crucible 8d4d2b28-82bb-4e36-80da-1408d8c35d82 expunged fd00:1122:3344:102::2b + crucible 9fd52961-426f-4e62-a644-b70871103fca expunged fd00:1122:3344:102::26 + crucible b44cdbc0-0ce0-46eb-8b21-a09e113aa1d0 expunged fd00:1122:3344:102::27 + crucible b6b759d0-f60d-42b7-bbbc-9d61c9e895a9 expunged fd00:1122:3344:102::28 + crucible c407795c-6c8b-428e-8ab8-b962913c447f expunged fd00:1122:3344:102::2a + internal_ntp f3f2e4f3-0985-4ef6-8336-ce479382d05d expunged fd00:1122:3344:102::21 + nexus 01d58626-e1b0-480f-96be-ac784863c7dc expunged fd00:1122:3344:102::22 + + + sled 75bc286f-2b4b-482c-9431-59272af529da: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones generation 2 -> 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 15bb9def-69b8-4d2e-b04f-9fee1143387c in service fd00:1122:3344:104::25 + crucible 23a8fa2b-ef3e-4017-a43f-f7a83953bd7c in service fd00:1122:3344:104::2c + crucible 621509d6-3772-4009-aca1-35eefd1098fb in service fd00:1122:3344:104::28 + crucible 85b8c68a-160d-461d-94dd-1baf175fa75c in service fd00:1122:3344:104::2a + crucible 996d7570-b0df-46d5-aaa4-0c97697cf484 in service fd00:1122:3344:104::26 + crucible a732c489-d29a-4f75-b900-5966385943af in service fd00:1122:3344:104::29 + crucible b1783e95-9598-451d-b6ba-c50b52b428c3 in service fd00:1122:3344:104::24 + crucible c6dd531e-2d1d-423b-acc8-358533dab78c in service fd00:1122:3344:104::27 + crucible e4b3e159-3dbe-48cb-8497-e3da92a90e5a in service fd00:1122:3344:104::23 + crucible f0ff59e8-4105-4980-a4bb-a1f4c58de1e3 in service fd00:1122:3344:104::2b + internal_ntp 57b96d5c-b71e-43e4-8869-7d514003d00d in service fd00:1122:3344:104::21 + nexus b4947d31-f70e-4ee0-8817-0ca6cea9b16b in service fd00:1122:3344:104::22 ++ nexus 2ec75441-3d7d-4b4b-9614-af03de5a3666 in service fd00:1122:3344:104::2d ++ nexus 508abd03-cbfe-4654-9a6d-7f15a1ad32e5 in service fd00:1122:3344:104::2e ++ nexus 59950bc8-1497-44dd-8cbf-b6502ba921b2 in service fd00:1122:3344:104::2f + + + sled affab35f-600a-4109-8ea0-34a067a4e0bc: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones generation 2 -> 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 0dfbf374-9ef9-430f-b06d-f271bf7f84c4 in service fd00:1122:3344:101::27 + crucible 3aa07966-5899-4789-ace5-f8eeb375c6c3 in service fd00:1122:3344:101::24 + crucible 4ad0e9da-08f8-4d40-b4d3-d17e711b5bbf in service fd00:1122:3344:101::29 + crucible 72c5a909-077d-4ec1-a9d5-ae64ef9d716e in service fd00:1122:3344:101::26 + crucible 95482c25-1e7f-43e8-adf1-e3548a1b3ae0 in service fd00:1122:3344:101::23 + crucible a1c03689-fc62-4ea5-bb72-4d01f5138614 in service fd00:1122:3344:101::2a + crucible a568e92e-4fbd-4b69-acd8-f16277073031 in service fd00:1122:3344:101::2c + crucible bf79a56a-97af-4cc4-94a5-8b20d64c2cda in service fd00:1122:3344:101::28 + crucible c60379ba-4e30-4628-a79a-0ae509aef4c5 in service fd00:1122:3344:101::25 + crucible d47f4996-fac0-4657-bcea-01b1fee6404d in service fd00:1122:3344:101::2b + internal_ntp f1a7b9a7-fc6a-4b23-b829-045ff33117ff in service fd00:1122:3344:101::21 + nexus 15c103f0-ac63-423b-ba5d-1b5fcd563ba3 in service fd00:1122:3344:101::22 ++ nexus 3ca5292f-8a59-4475-bb72-0f43714d0fff in service fd00:1122:3344:101::2e ++ nexus 99f6d544-8599-4e2b-a55a-82d9e0034662 in service fd00:1122:3344:101::2d ++ nexus c26b3bda-5561-44a1-a69f-22103fe209a1 in service fd00:1122:3344:101::2f + + + METADATA: + internal DNS version: 1 (unchanged) + external DNS version: 1 (unchanged) + diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt index ec6c505c876..c46b3c488eb 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt @@ -1,99 +1,222 @@ from: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 - -------------------------------------------------------------------------------------------------------- - zone type zone ID disposition underlay IP status - -------------------------------------------------------------------------------------------------------- - - UNCHANGED SLEDS: - - sled 75bc286f-2b4b-482c-9431-59272af529da: blueprint zones at generation 3 - crucible 15bb9def-69b8-4d2e-b04f-9fee1143387c in service fd00:1122:3344:104::25 - crucible 23a8fa2b-ef3e-4017-a43f-f7a83953bd7c in service fd00:1122:3344:104::2c - crucible 621509d6-3772-4009-aca1-35eefd1098fb in service fd00:1122:3344:104::28 - crucible 85b8c68a-160d-461d-94dd-1baf175fa75c in service fd00:1122:3344:104::2a - crucible 996d7570-b0df-46d5-aaa4-0c97697cf484 in service fd00:1122:3344:104::26 - crucible a732c489-d29a-4f75-b900-5966385943af in service fd00:1122:3344:104::29 - crucible b1783e95-9598-451d-b6ba-c50b52b428c3 in service fd00:1122:3344:104::24 - crucible c6dd531e-2d1d-423b-acc8-358533dab78c in service fd00:1122:3344:104::27 - crucible e4b3e159-3dbe-48cb-8497-e3da92a90e5a in service fd00:1122:3344:104::23 - crucible f0ff59e8-4105-4980-a4bb-a1f4c58de1e3 in service fd00:1122:3344:104::2b - internal_ntp 57b96d5c-b71e-43e4-8869-7d514003d00d in service fd00:1122:3344:104::21 - nexus 2ec75441-3d7d-4b4b-9614-af03de5a3666 in service fd00:1122:3344:104::2d - nexus 508abd03-cbfe-4654-9a6d-7f15a1ad32e5 in service fd00:1122:3344:104::2e - nexus 59950bc8-1497-44dd-8cbf-b6502ba921b2 in service fd00:1122:3344:104::2f - nexus b4947d31-f70e-4ee0-8817-0ca6cea9b16b in service fd00:1122:3344:104::22 - - sled affab35f-600a-4109-8ea0-34a067a4e0bc: blueprint zones at generation 3 - crucible 0dfbf374-9ef9-430f-b06d-f271bf7f84c4 in service fd00:1122:3344:101::27 - crucible 3aa07966-5899-4789-ace5-f8eeb375c6c3 in service fd00:1122:3344:101::24 - crucible 4ad0e9da-08f8-4d40-b4d3-d17e711b5bbf in service fd00:1122:3344:101::29 - crucible 72c5a909-077d-4ec1-a9d5-ae64ef9d716e in service fd00:1122:3344:101::26 - crucible 95482c25-1e7f-43e8-adf1-e3548a1b3ae0 in service fd00:1122:3344:101::23 - crucible a1c03689-fc62-4ea5-bb72-4d01f5138614 in service fd00:1122:3344:101::2a - crucible a568e92e-4fbd-4b69-acd8-f16277073031 in service fd00:1122:3344:101::2c - crucible bf79a56a-97af-4cc4-94a5-8b20d64c2cda in service fd00:1122:3344:101::28 - crucible c60379ba-4e30-4628-a79a-0ae509aef4c5 in service fd00:1122:3344:101::25 - crucible d47f4996-fac0-4657-bcea-01b1fee6404d in service fd00:1122:3344:101::2b - internal_ntp f1a7b9a7-fc6a-4b23-b829-045ff33117ff in service fd00:1122:3344:101::21 - nexus 15c103f0-ac63-423b-ba5d-1b5fcd563ba3 in service fd00:1122:3344:101::22 - nexus 3ca5292f-8a59-4475-bb72-0f43714d0fff in service fd00:1122:3344:101::2e - nexus 99f6d544-8599-4e2b-a55a-82d9e0034662 in service fd00:1122:3344:101::2d - nexus c26b3bda-5561-44a1-a69f-22103fe209a1 in service fd00:1122:3344:101::2f - - REMOVED SLEDS: - -- sled 68d24ac5-f341-49ea-a92a-0381b52ab387: blueprint zones at generation 2 -- crucible 3b3c14b6-a8e2-4054-a577-8d96cb576230 expunged fd00:1122:3344:102::2c removed -- crucible 47a87c6e-ef45-4d52-9a3e-69cdd96737cc expunged fd00:1122:3344:102::23 removed -- crucible 6464d025-4652-4948-919e-740bec5699b1 expunged fd00:1122:3344:102::24 removed -- crucible 6939ce48-b17c-4616-b176-8a419a7697be expunged fd00:1122:3344:102::29 removed -- crucible 878dfddd-3113-4197-a3ea-e0d4dbe9b476 expunged fd00:1122:3344:102::25 removed -- crucible 8d4d2b28-82bb-4e36-80da-1408d8c35d82 expunged fd00:1122:3344:102::2b removed -- crucible 9fd52961-426f-4e62-a644-b70871103fca expunged fd00:1122:3344:102::26 removed -- crucible b44cdbc0-0ce0-46eb-8b21-a09e113aa1d0 expunged fd00:1122:3344:102::27 removed -- crucible b6b759d0-f60d-42b7-bbbc-9d61c9e895a9 expunged fd00:1122:3344:102::28 removed -- crucible c407795c-6c8b-428e-8ab8-b962913c447f expunged fd00:1122:3344:102::2a removed -- internal_ntp f3f2e4f3-0985-4ef6-8336-ce479382d05d expunged fd00:1122:3344:102::21 removed -- nexus 01d58626-e1b0-480f-96be-ac784863c7dc expunged fd00:1122:3344:102::22 removed - - MODIFIED SLEDS: - -* sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: blueprint zones at generation: 2 -! warning: generation should have changed - crucible 6b53ab2e-d98c-485f-87a3-4d5df595390f in service fd00:1122:3344:105::27 - crucible 93b137a1-a1d6-4b5b-b2cb-21a9f11e2883 in service fd00:1122:3344:105::23 - crucible 9f0abbad-dbd3-4d43-9675-78092217ffd9 in service fd00:1122:3344:105::25 - crucible b0c63f48-01ea-4aae-bb26-fb0dd59d1662 in service fd00:1122:3344:105::28 - crucible c406da50-34b9-4bb4-a460-8f49875d2a6a in service fd00:1122:3344:105::24 - crucible d660d7ed-28c0-45ae-9ace-dc3ecf7e8786 in service fd00:1122:3344:105::2a - crucible e98cc0de-abf6-4da4-a20d-d05c7a9bb1d7 in service fd00:1122:3344:105::2b - crucible f55e6aaf-e8fc-4913-9e3c-8cd1bd4bdad3 in service fd00:1122:3344:105::29 -- crucible 4f1ce8a2-d3a5-4a38-be4c-9817de52db37 in service fd00:1122:3344:105::2c removed -- crucible 19fbc4f8-a683-4f22-8f5a-e74782b935be in service fd00:1122:3344:105::26 modified -+ ├─ quiesced fd00:1122:3344:105::26 -* └─ changed: disposition -- internal_ntp 7f4e9f9f-08f8-4d14-885d-e977c05525ad in service fd00:1122:3344:105::21 modified -+ ├─ in service fd01:1122:3344:105::21 -* └─ changed: underlay IP -- nexus 6dff7633-66bb-4924-a6ff-2c896e66964b in service fd00:1122:3344:105::22 modified -+ ├─ in service fd00:1122:3344:105::22 -* └─ changed: zone type config - -* sled 48d95fef-bc9f-4f50-9a53-1e075836291d: blueprint zones at generation: 3 -> 4 -- crucible 094f27af-1acb-4d1e-ba97-1fc1377d4bf2 expunged fd00:1122:3344:103::2c removed -- crucible 0dcfdfc5-481e-4153-b97c-11cf02b648ea expunged fd00:1122:3344:103::25 removed -- crucible 2f5e8010-a94d-43a4-9c5c-3f52832f5f7f expunged fd00:1122:3344:103::27 removed -- crucible 4a9a0a9d-87f0-4f1d-9181-27f6b435e637 expunged fd00:1122:3344:103::28 removed -- crucible 56ac1706-9e2a-49ba-bd6f-a99c44cb2ccb expunged fd00:1122:3344:103::24 removed -- crucible 67622d61-2df4-414d-aa0e-d1277265f405 expunged fd00:1122:3344:103::23 removed -- crucible b91b271d-8d80-4f49-99a0-34006ae86063 expunged fd00:1122:3344:103::2a removed -- crucible d6ee1338-3127-43ec-9aaa-b973ccf05496 expunged fd00:1122:3344:103::26 removed -- crucible e39d7c9e-182b-48af-af87-58079d723583 expunged fd00:1122:3344:103::29 removed -- crucible f69f92a1-5007-4bb0-a85b-604dc217154b expunged fd00:1122:3344:103::2b removed -- internal_ntp 67d913e0-0005-4599-9b28-0abbf6cc2916 expunged fd00:1122:3344:103::21 removed -- nexus 2aa0ea4f-3561-4989-a98c-9ab7d9a240fb expunged fd00:1122:3344:103::22 removed - - METADATA: - internal DNS version: 1 (unchanged) -* external DNS version: 1 -> 2 + UNCHANGED SLEDS: + + sled 75bc286f-2b4b-482c-9431-59272af529da: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 15bb9def-69b8-4d2e-b04f-9fee1143387c in service fd00:1122:3344:104::25 + crucible 23a8fa2b-ef3e-4017-a43f-f7a83953bd7c in service fd00:1122:3344:104::2c + crucible 621509d6-3772-4009-aca1-35eefd1098fb in service fd00:1122:3344:104::28 + crucible 85b8c68a-160d-461d-94dd-1baf175fa75c in service fd00:1122:3344:104::2a + crucible 996d7570-b0df-46d5-aaa4-0c97697cf484 in service fd00:1122:3344:104::26 + crucible a732c489-d29a-4f75-b900-5966385943af in service fd00:1122:3344:104::29 + crucible b1783e95-9598-451d-b6ba-c50b52b428c3 in service fd00:1122:3344:104::24 + crucible c6dd531e-2d1d-423b-acc8-358533dab78c in service fd00:1122:3344:104::27 + crucible e4b3e159-3dbe-48cb-8497-e3da92a90e5a in service fd00:1122:3344:104::23 + crucible f0ff59e8-4105-4980-a4bb-a1f4c58de1e3 in service fd00:1122:3344:104::2b + internal_ntp 57b96d5c-b71e-43e4-8869-7d514003d00d in service fd00:1122:3344:104::21 + nexus 2ec75441-3d7d-4b4b-9614-af03de5a3666 in service fd00:1122:3344:104::2d + nexus 508abd03-cbfe-4654-9a6d-7f15a1ad32e5 in service fd00:1122:3344:104::2e + nexus 59950bc8-1497-44dd-8cbf-b6502ba921b2 in service fd00:1122:3344:104::2f + nexus b4947d31-f70e-4ee0-8817-0ca6cea9b16b in service fd00:1122:3344:104::22 + + + sled affab35f-600a-4109-8ea0-34a067a4e0bc: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 0dfbf374-9ef9-430f-b06d-f271bf7f84c4 in service fd00:1122:3344:101::27 + crucible 3aa07966-5899-4789-ace5-f8eeb375c6c3 in service fd00:1122:3344:101::24 + crucible 4ad0e9da-08f8-4d40-b4d3-d17e711b5bbf in service fd00:1122:3344:101::29 + crucible 72c5a909-077d-4ec1-a9d5-ae64ef9d716e in service fd00:1122:3344:101::26 + crucible 95482c25-1e7f-43e8-adf1-e3548a1b3ae0 in service fd00:1122:3344:101::23 + crucible a1c03689-fc62-4ea5-bb72-4d01f5138614 in service fd00:1122:3344:101::2a + crucible a568e92e-4fbd-4b69-acd8-f16277073031 in service fd00:1122:3344:101::2c + crucible bf79a56a-97af-4cc4-94a5-8b20d64c2cda in service fd00:1122:3344:101::28 + crucible c60379ba-4e30-4628-a79a-0ae509aef4c5 in service fd00:1122:3344:101::25 + crucible d47f4996-fac0-4657-bcea-01b1fee6404d in service fd00:1122:3344:101::2b + internal_ntp f1a7b9a7-fc6a-4b23-b829-045ff33117ff in service fd00:1122:3344:101::21 + nexus 15c103f0-ac63-423b-ba5d-1b5fcd563ba3 in service fd00:1122:3344:101::22 + nexus 3ca5292f-8a59-4475-bb72-0f43714d0fff in service fd00:1122:3344:101::2e + nexus 99f6d544-8599-4e2b-a55a-82d9e0034662 in service fd00:1122:3344:101::2d + nexus c26b3bda-5561-44a1-a69f-22103fe209a1 in service fd00:1122:3344:101::2f + + + REMOVED SLEDS: + + sled 68d24ac5-f341-49ea-a92a-0381b52ab387: + + omicron zones from generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ +- crucible 3b3c14b6-a8e2-4054-a577-8d96cb576230 expunged fd00:1122:3344:102::2c +- crucible 47a87c6e-ef45-4d52-9a3e-69cdd96737cc expunged fd00:1122:3344:102::23 +- crucible 6464d025-4652-4948-919e-740bec5699b1 expunged fd00:1122:3344:102::24 +- crucible 6939ce48-b17c-4616-b176-8a419a7697be expunged fd00:1122:3344:102::29 +- crucible 878dfddd-3113-4197-a3ea-e0d4dbe9b476 expunged fd00:1122:3344:102::25 +- crucible 8d4d2b28-82bb-4e36-80da-1408d8c35d82 expunged fd00:1122:3344:102::2b +- crucible 9fd52961-426f-4e62-a644-b70871103fca expunged fd00:1122:3344:102::26 +- crucible b44cdbc0-0ce0-46eb-8b21-a09e113aa1d0 expunged fd00:1122:3344:102::27 +- crucible b6b759d0-f60d-42b7-bbbc-9d61c9e895a9 expunged fd00:1122:3344:102::28 +- crucible c407795c-6c8b-428e-8ab8-b962913c447f expunged fd00:1122:3344:102::2a +- internal_ntp f3f2e4f3-0985-4ef6-8336-ce479382d05d expunged fd00:1122:3344:102::21 +- nexus 01d58626-e1b0-480f-96be-ac784863c7dc expunged fd00:1122:3344:102::22 + + + MODIFIED SLEDS: + + sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + -------------------------------------------------------------------------------------------------- + zone type zone id disposition underlay IP + -------------------------------------------------------------------------------------------------- + crucible 6b53ab2e-d98c-485f-87a3-4d5df595390f in service fd00:1122:3344:105::27 + crucible 93b137a1-a1d6-4b5b-b2cb-21a9f11e2883 in service fd00:1122:3344:105::23 + crucible 9f0abbad-dbd3-4d43-9675-78092217ffd9 in service fd00:1122:3344:105::25 + crucible b0c63f48-01ea-4aae-bb26-fb0dd59d1662 in service fd00:1122:3344:105::28 + crucible c406da50-34b9-4bb4-a460-8f49875d2a6a in service fd00:1122:3344:105::24 + crucible d660d7ed-28c0-45ae-9ace-dc3ecf7e8786 in service fd00:1122:3344:105::2a + crucible e98cc0de-abf6-4da4-a20d-d05c7a9bb1d7 in service fd00:1122:3344:105::2b + crucible f55e6aaf-e8fc-4913-9e3c-8cd1bd4bdad3 in service fd00:1122:3344:105::29 +- crucible 4f1ce8a2-d3a5-4a38-be4c-9817de52db37 in service fd00:1122:3344:105::2c +* crucible 19fbc4f8-a683-4f22-8f5a-e74782b935be in service -> quiesced fd00:1122:3344:105::26 + + + sled 48d95fef-bc9f-4f50-9a53-1e075836291d: + + omicron zones generation 3 -> 4: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ +- crucible 094f27af-1acb-4d1e-ba97-1fc1377d4bf2 expunged fd00:1122:3344:103::2c +- crucible 0dcfdfc5-481e-4153-b97c-11cf02b648ea expunged fd00:1122:3344:103::25 +- crucible 2f5e8010-a94d-43a4-9c5c-3f52832f5f7f expunged fd00:1122:3344:103::27 +- crucible 4a9a0a9d-87f0-4f1d-9181-27f6b435e637 expunged fd00:1122:3344:103::28 +- crucible 56ac1706-9e2a-49ba-bd6f-a99c44cb2ccb expunged fd00:1122:3344:103::24 +- crucible 67622d61-2df4-414d-aa0e-d1277265f405 expunged fd00:1122:3344:103::23 +- crucible b91b271d-8d80-4f49-99a0-34006ae86063 expunged fd00:1122:3344:103::2a +- crucible d6ee1338-3127-43ec-9aaa-b973ccf05496 expunged fd00:1122:3344:103::26 +- crucible e39d7c9e-182b-48af-af87-58079d723583 expunged fd00:1122:3344:103::29 +- crucible f69f92a1-5007-4bb0-a85b-604dc217154b expunged fd00:1122:3344:103::2b +- internal_ntp 67d913e0-0005-4599-9b28-0abbf6cc2916 expunged fd00:1122:3344:103::21 +- nexus 2aa0ea4f-3561-4989-a98c-9ab7d9a240fb expunged fd00:1122:3344:103::22 + + +ERRORS: + + sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9 + + zone diff errors: before gen 2, after gen 2 + + zone id: 6dff7633-66bb-4924-a6ff-2c896e66964b + reason: mismatched zone type: after: Nexus( + Nexus { + internal_address: [fd01:1122:3344:105::22]:12221, + external_ip: OmicronZoneExternalFloatingIp { + id: cd63774a-2e2f-49ce-a3df-33e3b5d02650 (external_ip), + ip: 192.0.2.2, + }, + nic: NetworkInterface { + id: 99402426-92dd-4975-9347-907e130d6b79, + kind: Service { + id: 6dff7633-66bb-4924-a6ff-2c896e66964b, + }, + name: Name( + "nexus-6dff7633-66bb-4924-a6ff-2c896e66964b", + ), + ip: 172.30.2.5, + mac: MacAddr( + MacAddr6( + [ + 168, + 64, + 37, + 255, + 128, + 0, + ], + ), + ), + subnet: V4( + Ipv4Net( + Ipv4Network { + addr: 172.30.2.0, + prefix: 24, + }, + ), + ), + vni: Vni( + 100, + ), + primary: true, + slot: 0, + }, + external_tls: false, + external_dns_servers: [], + }, +) + + zone id: 7f4e9f9f-08f8-4d14-885d-e977c05525ad + reason: mismatched underlay address: before: fd00:1122:3344:105::21, after: fd01:1122:3344:105::21 + + METADATA: + internal DNS version: 1 (unchanged) +* external DNS version: 1 -> 2 + diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt index 294a12f77a1..454ce6779e3 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt @@ -1,89 +1,170 @@ blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 parent: 4d4e6c38-cd95-4c4e-8f45-6af4d686964b - -------------------------------------------------------------------------------------------- - zone type zone ID disposition underlay IP - -------------------------------------------------------------------------------------------- - - sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: blueprint zones at generation 2 - crucible 19fbc4f8-a683-4f22-8f5a-e74782b935be in service fd00:1122:3344:105::26 - crucible 4f1ce8a2-d3a5-4a38-be4c-9817de52db37 in service fd00:1122:3344:105::2c - crucible 6b53ab2e-d98c-485f-87a3-4d5df595390f in service fd00:1122:3344:105::27 - crucible 93b137a1-a1d6-4b5b-b2cb-21a9f11e2883 in service fd00:1122:3344:105::23 - crucible 9f0abbad-dbd3-4d43-9675-78092217ffd9 in service fd00:1122:3344:105::25 - crucible b0c63f48-01ea-4aae-bb26-fb0dd59d1662 in service fd00:1122:3344:105::28 - crucible c406da50-34b9-4bb4-a460-8f49875d2a6a in service fd00:1122:3344:105::24 - crucible d660d7ed-28c0-45ae-9ace-dc3ecf7e8786 in service fd00:1122:3344:105::2a - crucible e98cc0de-abf6-4da4-a20d-d05c7a9bb1d7 in service fd00:1122:3344:105::2b - crucible f55e6aaf-e8fc-4913-9e3c-8cd1bd4bdad3 in service fd00:1122:3344:105::29 - internal_ntp 7f4e9f9f-08f8-4d14-885d-e977c05525ad in service fd00:1122:3344:105::21 - nexus 6dff7633-66bb-4924-a6ff-2c896e66964b in service fd00:1122:3344:105::22 - - sled 48d95fef-bc9f-4f50-9a53-1e075836291d: blueprint zones at generation 3 - crucible 094f27af-1acb-4d1e-ba97-1fc1377d4bf2 expunged fd00:1122:3344:103::2c - crucible 0dcfdfc5-481e-4153-b97c-11cf02b648ea expunged fd00:1122:3344:103::25 - crucible 2f5e8010-a94d-43a4-9c5c-3f52832f5f7f expunged fd00:1122:3344:103::27 - crucible 4a9a0a9d-87f0-4f1d-9181-27f6b435e637 expunged fd00:1122:3344:103::28 - crucible 56ac1706-9e2a-49ba-bd6f-a99c44cb2ccb expunged fd00:1122:3344:103::24 - crucible 67622d61-2df4-414d-aa0e-d1277265f405 expunged fd00:1122:3344:103::23 - crucible b91b271d-8d80-4f49-99a0-34006ae86063 expunged fd00:1122:3344:103::2a - crucible d6ee1338-3127-43ec-9aaa-b973ccf05496 expunged fd00:1122:3344:103::26 - crucible e39d7c9e-182b-48af-af87-58079d723583 expunged fd00:1122:3344:103::29 - crucible f69f92a1-5007-4bb0-a85b-604dc217154b expunged fd00:1122:3344:103::2b - internal_ntp 67d913e0-0005-4599-9b28-0abbf6cc2916 expunged fd00:1122:3344:103::21 - nexus 2aa0ea4f-3561-4989-a98c-9ab7d9a240fb expunged fd00:1122:3344:103::22 - - sled 68d24ac5-f341-49ea-a92a-0381b52ab387: blueprint zones at generation 2 - crucible 3b3c14b6-a8e2-4054-a577-8d96cb576230 expunged fd00:1122:3344:102::2c - crucible 47a87c6e-ef45-4d52-9a3e-69cdd96737cc expunged fd00:1122:3344:102::23 - crucible 6464d025-4652-4948-919e-740bec5699b1 expunged fd00:1122:3344:102::24 - crucible 6939ce48-b17c-4616-b176-8a419a7697be expunged fd00:1122:3344:102::29 - crucible 878dfddd-3113-4197-a3ea-e0d4dbe9b476 expunged fd00:1122:3344:102::25 - crucible 8d4d2b28-82bb-4e36-80da-1408d8c35d82 expunged fd00:1122:3344:102::2b - crucible 9fd52961-426f-4e62-a644-b70871103fca expunged fd00:1122:3344:102::26 - crucible b44cdbc0-0ce0-46eb-8b21-a09e113aa1d0 expunged fd00:1122:3344:102::27 - crucible b6b759d0-f60d-42b7-bbbc-9d61c9e895a9 expunged fd00:1122:3344:102::28 - crucible c407795c-6c8b-428e-8ab8-b962913c447f expunged fd00:1122:3344:102::2a - internal_ntp f3f2e4f3-0985-4ef6-8336-ce479382d05d expunged fd00:1122:3344:102::21 - nexus 01d58626-e1b0-480f-96be-ac784863c7dc expunged fd00:1122:3344:102::22 - - sled 75bc286f-2b4b-482c-9431-59272af529da: blueprint zones at generation 3 - crucible 15bb9def-69b8-4d2e-b04f-9fee1143387c in service fd00:1122:3344:104::25 - crucible 23a8fa2b-ef3e-4017-a43f-f7a83953bd7c in service fd00:1122:3344:104::2c - crucible 621509d6-3772-4009-aca1-35eefd1098fb in service fd00:1122:3344:104::28 - crucible 85b8c68a-160d-461d-94dd-1baf175fa75c in service fd00:1122:3344:104::2a - crucible 996d7570-b0df-46d5-aaa4-0c97697cf484 in service fd00:1122:3344:104::26 - crucible a732c489-d29a-4f75-b900-5966385943af in service fd00:1122:3344:104::29 - crucible b1783e95-9598-451d-b6ba-c50b52b428c3 in service fd00:1122:3344:104::24 - crucible c6dd531e-2d1d-423b-acc8-358533dab78c in service fd00:1122:3344:104::27 - crucible e4b3e159-3dbe-48cb-8497-e3da92a90e5a in service fd00:1122:3344:104::23 - crucible f0ff59e8-4105-4980-a4bb-a1f4c58de1e3 in service fd00:1122:3344:104::2b - internal_ntp 57b96d5c-b71e-43e4-8869-7d514003d00d in service fd00:1122:3344:104::21 - nexus 2ec75441-3d7d-4b4b-9614-af03de5a3666 in service fd00:1122:3344:104::2d - nexus 508abd03-cbfe-4654-9a6d-7f15a1ad32e5 in service fd00:1122:3344:104::2e - nexus 59950bc8-1497-44dd-8cbf-b6502ba921b2 in service fd00:1122:3344:104::2f - nexus b4947d31-f70e-4ee0-8817-0ca6cea9b16b in service fd00:1122:3344:104::22 - - sled affab35f-600a-4109-8ea0-34a067a4e0bc: blueprint zones at generation 3 - crucible 0dfbf374-9ef9-430f-b06d-f271bf7f84c4 in service fd00:1122:3344:101::27 - crucible 3aa07966-5899-4789-ace5-f8eeb375c6c3 in service fd00:1122:3344:101::24 - crucible 4ad0e9da-08f8-4d40-b4d3-d17e711b5bbf in service fd00:1122:3344:101::29 - crucible 72c5a909-077d-4ec1-a9d5-ae64ef9d716e in service fd00:1122:3344:101::26 - crucible 95482c25-1e7f-43e8-adf1-e3548a1b3ae0 in service fd00:1122:3344:101::23 - crucible a1c03689-fc62-4ea5-bb72-4d01f5138614 in service fd00:1122:3344:101::2a - crucible a568e92e-4fbd-4b69-acd8-f16277073031 in service fd00:1122:3344:101::2c - crucible bf79a56a-97af-4cc4-94a5-8b20d64c2cda in service fd00:1122:3344:101::28 - crucible c60379ba-4e30-4628-a79a-0ae509aef4c5 in service fd00:1122:3344:101::25 - crucible d47f4996-fac0-4657-bcea-01b1fee6404d in service fd00:1122:3344:101::2b - internal_ntp f1a7b9a7-fc6a-4b23-b829-045ff33117ff in service fd00:1122:3344:101::21 - nexus 15c103f0-ac63-423b-ba5d-1b5fcd563ba3 in service fd00:1122:3344:101::22 - nexus 3ca5292f-8a59-4475-bb72-0f43714d0fff in service fd00:1122:3344:101::2e - nexus 99f6d544-8599-4e2b-a55a-82d9e0034662 in service fd00:1122:3344:101::2d - nexus c26b3bda-5561-44a1-a69f-22103fe209a1 in service fd00:1122:3344:101::2f - -METADATA: - created by: test_blueprint2 - created at: 1970-01-01T00:00:00.000Z - comment: sled 48d95fef-bc9f-4f50-9a53-1e075836291d (sled policy is expunged): 12 zones expunged, sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: altered disks, sled 75bc286f-2b4b-482c-9431-59272af529da: altered disks, sled affab35f-600a-4109-8ea0-34a067a4e0bc: altered disks - internal DNS version: 1 - external DNS version: 1 + sled: 2d1cb4f2-cf44-40fc-b118-85036eb732a9 + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 19fbc4f8-a683-4f22-8f5a-e74782b935be in service fd00:1122:3344:105::26 + crucible 4f1ce8a2-d3a5-4a38-be4c-9817de52db37 in service fd00:1122:3344:105::2c + crucible 6b53ab2e-d98c-485f-87a3-4d5df595390f in service fd00:1122:3344:105::27 + crucible 93b137a1-a1d6-4b5b-b2cb-21a9f11e2883 in service fd00:1122:3344:105::23 + crucible 9f0abbad-dbd3-4d43-9675-78092217ffd9 in service fd00:1122:3344:105::25 + crucible b0c63f48-01ea-4aae-bb26-fb0dd59d1662 in service fd00:1122:3344:105::28 + crucible c406da50-34b9-4bb4-a460-8f49875d2a6a in service fd00:1122:3344:105::24 + crucible d660d7ed-28c0-45ae-9ace-dc3ecf7e8786 in service fd00:1122:3344:105::2a + crucible e98cc0de-abf6-4da4-a20d-d05c7a9bb1d7 in service fd00:1122:3344:105::2b + crucible f55e6aaf-e8fc-4913-9e3c-8cd1bd4bdad3 in service fd00:1122:3344:105::29 + internal_ntp 7f4e9f9f-08f8-4d14-885d-e977c05525ad in service fd00:1122:3344:105::21 + nexus 6dff7633-66bb-4924-a6ff-2c896e66964b in service fd00:1122:3344:105::22 + + + + sled: 75bc286f-2b4b-482c-9431-59272af529da + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 15bb9def-69b8-4d2e-b04f-9fee1143387c in service fd00:1122:3344:104::25 + crucible 23a8fa2b-ef3e-4017-a43f-f7a83953bd7c in service fd00:1122:3344:104::2c + crucible 621509d6-3772-4009-aca1-35eefd1098fb in service fd00:1122:3344:104::28 + crucible 85b8c68a-160d-461d-94dd-1baf175fa75c in service fd00:1122:3344:104::2a + crucible 996d7570-b0df-46d5-aaa4-0c97697cf484 in service fd00:1122:3344:104::26 + crucible a732c489-d29a-4f75-b900-5966385943af in service fd00:1122:3344:104::29 + crucible b1783e95-9598-451d-b6ba-c50b52b428c3 in service fd00:1122:3344:104::24 + crucible c6dd531e-2d1d-423b-acc8-358533dab78c in service fd00:1122:3344:104::27 + crucible e4b3e159-3dbe-48cb-8497-e3da92a90e5a in service fd00:1122:3344:104::23 + crucible f0ff59e8-4105-4980-a4bb-a1f4c58de1e3 in service fd00:1122:3344:104::2b + internal_ntp 57b96d5c-b71e-43e4-8869-7d514003d00d in service fd00:1122:3344:104::21 + nexus 2ec75441-3d7d-4b4b-9614-af03de5a3666 in service fd00:1122:3344:104::2d + nexus 508abd03-cbfe-4654-9a6d-7f15a1ad32e5 in service fd00:1122:3344:104::2e + nexus 59950bc8-1497-44dd-8cbf-b6502ba921b2 in service fd00:1122:3344:104::2f + nexus b4947d31-f70e-4ee0-8817-0ca6cea9b16b in service fd00:1122:3344:104::22 + + + + sled: affab35f-600a-4109-8ea0-34a067a4e0bc + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 0dfbf374-9ef9-430f-b06d-f271bf7f84c4 in service fd00:1122:3344:101::27 + crucible 3aa07966-5899-4789-ace5-f8eeb375c6c3 in service fd00:1122:3344:101::24 + crucible 4ad0e9da-08f8-4d40-b4d3-d17e711b5bbf in service fd00:1122:3344:101::29 + crucible 72c5a909-077d-4ec1-a9d5-ae64ef9d716e in service fd00:1122:3344:101::26 + crucible 95482c25-1e7f-43e8-adf1-e3548a1b3ae0 in service fd00:1122:3344:101::23 + crucible a1c03689-fc62-4ea5-bb72-4d01f5138614 in service fd00:1122:3344:101::2a + crucible a568e92e-4fbd-4b69-acd8-f16277073031 in service fd00:1122:3344:101::2c + crucible bf79a56a-97af-4cc4-94a5-8b20d64c2cda in service fd00:1122:3344:101::28 + crucible c60379ba-4e30-4628-a79a-0ae509aef4c5 in service fd00:1122:3344:101::25 + crucible d47f4996-fac0-4657-bcea-01b1fee6404d in service fd00:1122:3344:101::2b + internal_ntp f1a7b9a7-fc6a-4b23-b829-045ff33117ff in service fd00:1122:3344:101::21 + nexus 15c103f0-ac63-423b-ba5d-1b5fcd563ba3 in service fd00:1122:3344:101::22 + nexus 3ca5292f-8a59-4475-bb72-0f43714d0fff in service fd00:1122:3344:101::2e + nexus 99f6d544-8599-4e2b-a55a-82d9e0034662 in service fd00:1122:3344:101::2d + nexus c26b3bda-5561-44a1-a69f-22103fe209a1 in service fd00:1122:3344:101::2f + + + +!48d95fef-bc9f-4f50-9a53-1e075836291d +WARNING: Zones exist without physical disks! + omicron zones at generation 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 094f27af-1acb-4d1e-ba97-1fc1377d4bf2 expunged fd00:1122:3344:103::2c + crucible 0dcfdfc5-481e-4153-b97c-11cf02b648ea expunged fd00:1122:3344:103::25 + crucible 2f5e8010-a94d-43a4-9c5c-3f52832f5f7f expunged fd00:1122:3344:103::27 + crucible 4a9a0a9d-87f0-4f1d-9181-27f6b435e637 expunged fd00:1122:3344:103::28 + crucible 56ac1706-9e2a-49ba-bd6f-a99c44cb2ccb expunged fd00:1122:3344:103::24 + crucible 67622d61-2df4-414d-aa0e-d1277265f405 expunged fd00:1122:3344:103::23 + crucible b91b271d-8d80-4f49-99a0-34006ae86063 expunged fd00:1122:3344:103::2a + crucible d6ee1338-3127-43ec-9aaa-b973ccf05496 expunged fd00:1122:3344:103::26 + crucible e39d7c9e-182b-48af-af87-58079d723583 expunged fd00:1122:3344:103::29 + crucible f69f92a1-5007-4bb0-a85b-604dc217154b expunged fd00:1122:3344:103::2b + internal_ntp 67d913e0-0005-4599-9b28-0abbf6cc2916 expunged fd00:1122:3344:103::21 + nexus 2aa0ea4f-3561-4989-a98c-9ab7d9a240fb expunged fd00:1122:3344:103::22 + + + + +!68d24ac5-f341-49ea-a92a-0381b52ab387 +WARNING: Zones exist without physical disks! + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 3b3c14b6-a8e2-4054-a577-8d96cb576230 expunged fd00:1122:3344:102::2c + crucible 47a87c6e-ef45-4d52-9a3e-69cdd96737cc expunged fd00:1122:3344:102::23 + crucible 6464d025-4652-4948-919e-740bec5699b1 expunged fd00:1122:3344:102::24 + crucible 6939ce48-b17c-4616-b176-8a419a7697be expunged fd00:1122:3344:102::29 + crucible 878dfddd-3113-4197-a3ea-e0d4dbe9b476 expunged fd00:1122:3344:102::25 + crucible 8d4d2b28-82bb-4e36-80da-1408d8c35d82 expunged fd00:1122:3344:102::2b + crucible 9fd52961-426f-4e62-a644-b70871103fca expunged fd00:1122:3344:102::26 + crucible b44cdbc0-0ce0-46eb-8b21-a09e113aa1d0 expunged fd00:1122:3344:102::27 + crucible b6b759d0-f60d-42b7-bbbc-9d61c9e895a9 expunged fd00:1122:3344:102::28 + crucible c407795c-6c8b-428e-8ab8-b962913c447f expunged fd00:1122:3344:102::2a + internal_ntp f3f2e4f3-0985-4ef6-8336-ce479382d05d expunged fd00:1122:3344:102::21 + nexus 01d58626-e1b0-480f-96be-ac784863c7dc expunged fd00:1122:3344:102::22 + + + + METADATA: + created by::::::::::: test_blueprint2 + created at::::::::::: 1970-01-01T00:00:00.000Z + comment:::::::::::::: sled 48d95fef-bc9f-4f50-9a53-1e075836291d (sled policy is expunged): 12 zones expunged + internal DNS version: 1 + external DNS version: 1 + diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index 8f766334169..802727b1ab9 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -13,6 +13,7 @@ chrono.workspace = true clap.workspace = true base64.workspace = true derive-where.workspace = true +derive_more.workspace = true futures.workspace = true humantime.workspace = true ipnetwork.workspace = true @@ -27,7 +28,6 @@ slog.workspace = true slog-error-chain.workspace = true steno.workspace = true strum.workspace = true -tabled.workspace = true thiserror.workspace = true newtype-uuid.workspace = true uuid.workspace = true diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index a577c4978c4..aafa631320b 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -21,8 +21,10 @@ pub use crate::inventory::OmicronZoneType; pub use crate::inventory::OmicronZonesConfig; pub use crate::inventory::SourceNatConfig; pub use crate::inventory::ZpoolName; +use derive_more::From; use newtype_uuid::GenericUuid; use omicron_common::api::external::Generation; +use omicron_common::disk::DiskIdentity; use omicron_uuid_kinds::CollectionUuid; use omicron_uuid_kinds::ExternalIpUuid; use omicron_uuid_kinds::OmicronZoneUuid; @@ -30,10 +32,11 @@ use omicron_uuid_kinds::SledUuid; use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; +use sled_agent_client::types::OmicronPhysicalDisksConfig; use sled_agent_client::ZoneKind; use slog_error_chain::SlogInlineError; use std::collections::BTreeMap; -use std::collections::HashMap; +use std::collections::BTreeSet; use std::fmt; use std::net::AddrParseError; use std::net::Ipv6Addr; @@ -42,6 +45,8 @@ use strum::IntoEnumIterator; use thiserror::Error; use uuid::Uuid; +mod blueprint_diff; +mod blueprint_display; mod network_resources; mod planning_input; mod tri_map; @@ -70,6 +75,14 @@ pub use planning_input::ZpoolFilter; pub use zone_type::blueprint_zone_type; pub use zone_type::BlueprintZoneType; +use blueprint_display::{ + constants::*, BpDiffState, BpGeneration, BpOmicronZonesSubtableSchema, + BpPhysicalDisksSubtableSchema, BpSledSubtable, BpSledSubtableData, + BpSledSubtableRow, KvListWithHeading, +}; + +pub use blueprint_diff::BlueprintDiff; + /// Describes a complete set of software and configuration for the system // Blueprints are a fundamental part of how the system modifies itself. Each // blueprint completely describes all of the software and configuration @@ -206,10 +219,7 @@ impl Blueprint { /// The argument provided is the "before" side, and `self` is the "after" /// side. This matches the order of arguments to /// [`Blueprint::diff_since_collection`]. - pub fn diff_since_blueprint( - &self, - before: &Blueprint, - ) -> Result { + pub fn diff_since_blueprint(&self, before: &Blueprint) -> BlueprintDiff { BlueprintDiff::new( DiffBeforeMetadata::Blueprint(Box::new(before.metadata())), before @@ -219,6 +229,12 @@ impl Blueprint { .collect(), self.metadata(), self.blueprint_zones.clone(), + before + .blueprint_disks + .iter() + .map(|(sled_id, disks)| (*sled_id, disks.clone().into())) + .collect(), + self.blueprint_disks.clone(), ) } @@ -231,10 +247,7 @@ impl Blueprint { /// Note that collections do not include information about zone /// disposition, so it is assumed that all zones in the collection have the /// [`InService`](BlueprintZoneDisposition::InService) disposition. - pub fn diff_since_collection( - &self, - before: &Collection, - ) -> Result { + pub fn diff_since_collection(&self, before: &Collection) -> BlueprintDiff { let before_zones = before .omicron_zones .iter() @@ -243,11 +256,31 @@ impl Blueprint { }) .collect(); + let before_disks = before + .sled_agents + .iter() + .map(|(sled_id, sa)| { + ( + *sled_id, + CollectionPhysicalDisksConfig { + disks: sa + .disks + .iter() + .map(|d| d.identity.clone()) + .collect::>(), + } + .into(), + ) + }) + .collect(); + BlueprintDiff::new( DiffBeforeMetadata::Collection { id: before.id }, before_zones, self.metadata(), self.blueprint_zones.clone(), + before_disks, + self.blueprint_disks.clone(), ) } @@ -258,6 +291,47 @@ impl Blueprint { } } +impl BpSledSubtableData for &OmicronPhysicalDisksConfig { + fn bp_generation(&self) -> BpGeneration { + BpGeneration::Value(self.generation) + } + + fn rows( + &self, + state: BpDiffState, + ) -> impl Iterator { + let sorted_disk_ids: BTreeSet = + self.disks.iter().map(|d| d.identity.clone()).collect(); + + sorted_disk_ids.into_iter().map(move |d| { + BpSledSubtableRow::new(state, vec![d.vendor, d.model, d.serial]) + }) + } +} + +impl BpSledSubtableData for BlueprintOrCollectionZonesConfig { + fn bp_generation(&self) -> BpGeneration { + BpGeneration::Value(self.generation()) + } + + fn rows( + &self, + state: BpDiffState, + ) -> impl Iterator { + self.zones().map(move |zone| { + BpSledSubtableRow::new( + state, + vec![ + zone.kind().to_string(), + zone.id().to_string(), + zone.disposition().to_string(), + zone.underlay_address().to_string(), + ], + ) + }) + } +} + /// Wrapper to allow a [`Blueprint`] to be displayed with information. /// /// Returned by [`Blueprint::display()`]. @@ -268,6 +342,39 @@ pub struct BlueprintDisplay<'a> { // TODO: add colorization with a stylesheet } +impl<'a> BlueprintDisplay<'a> { + pub(super) fn make_metadata_table(&self) -> KvListWithHeading { + let comment = if self.blueprint.comment.is_empty() { + NONE_PARENS.to_string() + } else { + self.blueprint.comment.clone() + }; + + KvListWithHeading::new_unchanged( + METADATA_HEADING, + vec![ + (CREATED_BY, self.blueprint.creator.clone()), + ( + CREATED_AT, + humantime::format_rfc3339_millis( + self.blueprint.time_created.into(), + ) + .to_string(), + ), + (COMMENT, comment), + ( + INTERNAL_DNS_VERSION, + self.blueprint.internal_dns_version.to_string(), + ), + ( + EXTERNAL_DNS_VERSION, + self.blueprint.external_dns_version.to_string(), + ), + ], + ) + } +} + impl<'a> fmt::Display for BlueprintDisplay<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let b = self.blueprint; @@ -280,9 +387,62 @@ impl<'a> fmt::Display for BlueprintDisplay<'a> { .unwrap_or_else(|| String::from("")) )?; - writeln!(f, "\n{}", self.make_zone_table())?; + // Keep track of any sled_ids that have been seen in the first loop. + let mut seen_sleds = BTreeSet::new(); + + // Loop through all sleds that have physical disks and print a table of + // those physical disks. + // + // If there are corresponding zones, print those as well. + for (sled_id, disks) in &self.blueprint.blueprint_disks { + // Construct the disks subtable + let disks_table = BpSledSubtable::new( + BpPhysicalDisksSubtableSchema {}, + disks.bp_generation(), + disks.rows(BpDiffState::Unchanged).collect(), + ); + + // Construct the zones subtable + match self.blueprint.blueprint_zones.get(sled_id) { + Some(zones) => { + let zones = + BlueprintOrCollectionZonesConfig::from(zones.clone()); + let zones_tab = BpSledSubtable::new( + BpOmicronZonesSubtableSchema {}, + zones.bp_generation(), + zones.rows(BpDiffState::Unchanged).collect(), + ); + writeln!( + f, + "\n sled: {sled_id}\n\n{disks_table}\n\n{zones_tab}\n" + )?; + } + None => writeln!(f, "\n sled: {sled_id}\n\n{disks_table}\n")?, + } + seen_sleds.insert(sled_id); + } + + // Now create and display a table of zones on sleds that don't + // yet have physical disks. + // + // This should basically be impossible, so we warn if it occurs. + for (sled_id, zones) in &self.blueprint.blueprint_zones { + if !seen_sleds.contains(sled_id) && !zones.zones.is_empty() { + let zones = + BlueprintOrCollectionZonesConfig::from(zones.clone()); + writeln!( + f, + "\n!{sled_id}\n{}\n{}\n\n", + "WARNING: Zones exist without physical disks!", + BpSledSubtable::new( + BpOmicronZonesSubtableSchema {}, + zones.bp_generation(), + zones.rows(BpDiffState::Unchanged).collect() + ) + )?; + } + } - writeln!(f, "\n{}", table_display::metadata_heading())?; writeln!(f, "{}", self.make_metadata_table())?; Ok(()) @@ -872,244 +1032,6 @@ pub struct BlueprintTargetSet { pub enabled: bool, } -/// Summarizes the differences between two blueprints -#[derive(Debug)] -pub struct BlueprintDiff { - before_meta: DiffBeforeMetadata, - after_meta: BlueprintMetadata, - sleds: DiffSleds, -} - -impl BlueprintDiff { - /// Build a diff with the provided contents, verifying that the provided - /// data is valid. - fn new( - before_meta: DiffBeforeMetadata, - before_zones: BTreeMap, - after_meta: BlueprintMetadata, - after_zones: BTreeMap, - ) -> Result { - let mut errors = Vec::new(); - - let sleds = DiffSleds::new(before_zones, after_zones, &mut errors); - - if errors.is_empty() { - Ok(Self { before_meta, after_meta, sleds }) - } else { - Err(BlueprintDiffError { - before_meta, - after_meta: Box::new(after_meta), - errors, - }) - } - } - - /// Returns metadata about the source of the "before" data. - pub fn before_meta(&self) -> &DiffBeforeMetadata { - &self.before_meta - } - - /// Returns metadata about the source of the "after" data. - pub fn after_meta(&self) -> &BlueprintMetadata { - &self.after_meta - } - - /// Iterate over sleds only present in the second blueprint of a diff - pub fn sleds_added( - &self, - ) -> impl ExactSizeIterator + '_ - { - self.sleds.added.iter().map(|(sled_id, zones)| (*sled_id, zones)) - } - - /// Iterate over sleds only present in the first blueprint of a diff - pub fn sleds_removed( - &self, - ) -> impl ExactSizeIterator< - Item = (SledUuid, &BlueprintOrCollectionZonesConfig), - > + '_ { - self.sleds.removed.iter().map(|(sled_id, zones)| (*sled_id, zones)) - } - - /// Iterate over sleds present in both blueprints in a diff that have - /// changes. - pub fn sleds_modified( - &self, - ) -> impl ExactSizeIterator + '_ { - self.sleds.modified.iter().map(|(sled_id, sled)| (*sled_id, sled)) - } - - /// Iterate over sleds present in both blueprints in a diff that have no - /// changes. - pub fn sleds_unchanged( - &self, - ) -> impl Iterator + '_ { - self.sleds.unchanged.iter().map(|(sled_id, zones)| (*sled_id, zones)) - } - - /// Return a struct that can be used to display the diff. - pub fn display(&self) -> BlueprintDiffDisplay<'_> { - BlueprintDiffDisplay::new(self) - } -} - -#[derive(Debug)] -struct DiffSleds { - added: BTreeMap, - removed: BTreeMap, - modified: BTreeMap, - unchanged: BTreeMap, -} - -impl DiffSleds { - /// Builds added, removed and common maps, verifying that the provided data - /// is valid. - /// - /// The return value only contains the sleds that are present in both - /// blueprints. - fn new( - before: BTreeMap, - mut after: BTreeMap, - errors: &mut Vec, - ) -> Self { - let mut removed = BTreeMap::new(); - let mut modified = BTreeMap::new(); - let mut unchanged = BTreeMap::new(); - - for (sled_id, mut before_z) in before { - if let Some(mut after_z) = after.remove(&sled_id) { - // Sort before_z and after_z so they can be compared directly. - before_z.sort(); - after_z.sort(); - - if before_z == after_z { - unchanged.insert(sled_id, after_z); - } else { - let sled_modified = DiffSledModified::new( - sled_id, before_z, after_z, errors, - ); - modified.insert(sled_id, sled_modified); - } - } else { - removed.insert(sled_id, before_z); - } - } - - // We removed everything common from `after` above, so anything left is - // an added sled. - Self { added: after, removed, modified, unchanged } - } -} - -/// Wrapper to allow a [`BlueprintDiff`] to be displayed. -/// -/// Returned by [`BlueprintDiff::display()`]. -#[derive(Clone, Debug)] -#[must_use = "this struct does nothing unless displayed"] -pub struct BlueprintDiffDisplay<'diff> { - diff: &'diff BlueprintDiff, - // TODO: add colorization with a stylesheet -} - -impl<'diff> BlueprintDiffDisplay<'diff> { - #[inline] - fn new(diff: &'diff BlueprintDiff) -> Self { - Self { diff } - } -} - -impl<'diff> fmt::Display for BlueprintDiffDisplay<'diff> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let diff = self.diff; - - // Print things differently based on whether the diff is between a - // collection and a blueprint, or a blueprint and a blueprint. - match &diff.before_meta { - DiffBeforeMetadata::Collection { id } => { - writeln!( - f, - "from: collection {}\n\ - to: blueprint {}", - id, diff.after_meta.id, - )?; - } - DiffBeforeMetadata::Blueprint(before) => { - writeln!( - f, - "from: blueprint {}\n\ - to: blueprint {}", - before.id, diff.after_meta.id - )?; - } - } - - writeln!(f, "\n{}", self.make_zone_diff_table())?; - - writeln!(f, "\n{}", table_display::metadata_diff_heading())?; - writeln!(f, "{}", self.make_metadata_diff_table())?; - - Ok(()) - } -} - -#[derive(Clone, Debug, Error)] -pub struct BlueprintDiffError { - pub before_meta: DiffBeforeMetadata, - pub after_meta: Box, - pub errors: Vec, -} - -impl fmt::Display for BlueprintDiffError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - writeln!( - f, - "errors in diff between {} and {}:", - self.before_meta.display_id(), - self.after_meta.display_id() - )?; - for e in &self.errors { - writeln!(f, " - {}", e)?; - } - Ok(()) - } -} - -/// An individual error within a [`BlueprintDiffError`]. -#[derive(Clone, Debug)] -pub enum BlueprintDiffSingleError { - /// The [`OmicronZoneType`] of a particular zone changed between the before - /// and after blueprints. - /// - /// For a particular zone, the type should never change. - ZoneTypeChanged { - sled_id: SledUuid, - zone_id: Uuid, - before: ZoneKind, - after: ZoneKind, - }, - InvalidOmicronZoneType(InvalidOmicronZoneType), -} - -impl fmt::Display for BlueprintDiffSingleError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - BlueprintDiffSingleError::ZoneTypeChanged { - sled_id, - zone_id, - before, - after, - } => write!( - f, - "on sled {sled_id}, zone {zone_id} changed type \ - from {before} to {after}", - ), - BlueprintDiffSingleError::InvalidOmicronZoneType(err) => { - write!(f, "invalid OmicronZoneType in collection: {err}") - } - } - } -} - /// Data about the "before" version within a [`BlueprintDiff`]. #[derive(Clone, Debug)] pub enum DiffBeforeMetadata { @@ -1216,6 +1138,15 @@ impl From for BlueprintOrCollectionZoneConfig { } } +impl PartialEq for BlueprintOrCollectionZoneConfig { + fn eq(&self, other: &BlueprintZoneConfig) -> bool { + self.kind() == other.kind() + && self.disposition() == other.disposition + && self.underlay_address() == other.underlay_address + && self.is_zone_type_equal(&other.zone_type) + } +} + impl BlueprintOrCollectionZoneConfig { pub fn id(&self) -> OmicronZoneUuid { match self { @@ -1266,151 +1197,39 @@ impl BlueprintOrCollectionZoneConfig { } } -/// Describes a sled that appeared on both sides of a diff and is changed. -#[derive(Clone, Debug)] -pub struct DiffSledModified { - /// id of the sled - pub sled_id: SledUuid, - /// generation of the "zones" configuration on the left side - pub generation_before: Generation, - /// generation of the "zones" configuration on the right side - pub generation_after: Generation, - zones_added: Vec, - zones_removed: Vec, - zones_common: Vec, +/// Single sled's disks config for "before" version within a [`BlueprintDiff`]. +#[derive(Clone, Debug, From)] +pub enum BlueprintOrCollectionDisksConfig { + /// The diff was made from a collection. + Collection(CollectionPhysicalDisksConfig), + /// The diff was made from a blueprint. + Blueprint(BlueprintPhysicalDisksConfig), } -impl DiffSledModified { - fn new( - sled_id: SledUuid, - before: BlueprintOrCollectionZonesConfig, - after: BlueprintZonesConfig, - errors: &mut Vec, - ) -> Self { - // Assemble separate summaries of the zones, indexed by zone id. - let before_by_id: HashMap<_, _> = - before.zones().map(|zone| (zone.id(), zone)).collect(); - let mut after_by_id: HashMap<_, _> = - after.zones.into_iter().map(|zone| (zone.id, zone)).collect(); - - let mut zones_removed = Vec::new(); - let mut zones_common = Vec::new(); - - // Now go through each zone and compare them. - for (zone_id, zone_before) in before_by_id { - if let Some(zone_after) = after_by_id.remove(&zone_id) { - let before_kind = zone_before.kind(); - let after_kind = zone_after.zone_type.kind(); - - if before_kind != after_kind { - errors.push(BlueprintDiffSingleError::ZoneTypeChanged { - sled_id, - zone_id: zone_id.into_untyped_uuid(), - before: before_kind, - after: after_kind, - }); - } else { - let common = DiffZoneCommon { zone_before, zone_after }; - zones_common.push(common); - } - } else { - zones_removed.push(zone_before); +impl BlueprintOrCollectionDisksConfig { + pub fn generation(&self) -> Option { + match self { + BlueprintOrCollectionDisksConfig::Collection(_) => None, + BlueprintOrCollectionDisksConfig::Blueprint(c) => { + Some(c.generation) } } - - // Since we removed common zones above, anything else exists only in - // before and was therefore added. - let mut zones_added: Vec<_> = after_by_id.into_values().collect(); - - // Sort for test reproducibility. - zones_added.sort_unstable_by_key(zone_sort_key); - zones_removed.sort_unstable_by_key(zone_sort_key); - zones_common.sort_unstable_by_key(|common| { - // The ID is common by definition, and the zone type was already - // verified to be the same above. So just sort by the sort key for - // the before zone. (In case of errors, the result will be thrown - // away anyway, so this is harmless.) - zone_sort_key(&common.zone_before) - }); - - Self { - sled_id, - generation_before: before.generation(), - generation_after: after.generation, - zones_added, - zones_removed, - zones_common, - } - } - - /// Iterate over zones added between the blueprints - pub fn zones_added( - &self, - ) -> impl ExactSizeIterator + '_ { - self.zones_added.iter() } - /// Iterate over zones removed between the blueprints - pub fn zones_removed( - &self, - ) -> impl ExactSizeIterator + '_ - { - self.zones_removed.iter() - } - - /// Iterate over zones that are common to both blueprints - pub fn zones_in_common( - &self, - ) -> impl ExactSizeIterator + '_ { - self.zones_common.iter() - } - - /// Iterate over zones that changed between the blueprints - pub fn zones_modified(&self) -> impl Iterator + '_ { - self.zones_in_common().filter(|z| z.is_modified()) - } - - /// Iterate over zones that did not change between the blueprints - pub fn zones_unchanged( - &self, - ) -> impl Iterator + '_ { - self.zones_in_common().filter(|z| !z.is_modified()) + pub fn disks(&self) -> BTreeSet { + match self { + BlueprintOrCollectionDisksConfig::Collection(c) => c.disks.clone(), + BlueprintOrCollectionDisksConfig::Blueprint(c) => { + c.disks.iter().map(|d| d.identity.clone()).collect() + } + } } } -/// Describes a zone that was common to both sides of a diff -#[derive(Debug, Clone)] -pub struct DiffZoneCommon { - /// full zone configuration before - pub zone_before: BlueprintOrCollectionZoneConfig, - /// full zone configuration after - pub zone_after: BlueprintZoneConfig, -} - -impl DiffZoneCommon { - /// Returns true if there are any differences between `zone_before` and - /// `zone_after`. - /// - /// This is equivalent to `config_changed() || disposition_changed()`. - #[inline] - pub fn is_modified(&self) -> bool { - // state is smaller and easier to compare than config. - self.disposition_changed() || self.config_changed() - } - - /// Returns true if the zone configuration (excluding the disposition) - /// changed. - #[inline] - pub fn config_changed(&self) -> bool { - self.zone_before.underlay_address() != self.zone_after.underlay_address - || !self.zone_before.is_zone_type_equal(&self.zone_after.zone_type) - } - - /// Returns true if the [`BlueprintZoneDisposition`] for the zone changed. - #[inline] - pub fn disposition_changed(&self) -> bool { - self.zone_before.disposition() != self.zone_after.disposition - } +/// Single sled's disk config for "before" version within a [`BlueprintDiff`]. +#[derive(Clone, Debug, From)] +pub struct CollectionPhysicalDisksConfig { + disks: BTreeSet, } /// Encapsulates Reconfigurator state @@ -1430,658 +1249,3 @@ pub struct UnstableReconfiguratorState { pub silo_names: Vec, pub external_dns_zone_names: Vec, } - -/// Code to generate tables. -/// -/// This is here because `tabled` has a number of generically-named types, and -/// we'd like to avoid name collisions with other types. -mod table_display { - use super::*; - use crate::sectioned_table::SectionSpacing; - use crate::sectioned_table::StBuilder; - use crate::sectioned_table::StSectionBuilder; - use tabled::builder::Builder; - use tabled::settings::object::Columns; - use tabled::settings::Modify; - use tabled::settings::Padding; - use tabled::settings::Style; - use tabled::Table; - - impl<'a> super::BlueprintDisplay<'a> { - pub(super) fn make_zone_table(&self) -> Table { - let blueprint_zones = &self.blueprint.blueprint_zones; - let mut builder = StBuilder::new(); - builder.push_header_row(header_row()); - - for (sled_id, sled_zones) in blueprint_zones { - let heading = format!( - "{SLED_INDENT}sled {sled_id}: blueprint zones at generation {}", - sled_zones.generation - ); - builder.make_section( - SectionSpacing::Always, - heading, - |section| { - for zone in &sled_zones.zones { - add_zone_record( - ZONE_INDENT.to_string(), - &zone.clone().into(), - section, - ); - } - - if section.is_empty() { - section.push_nested_heading( - SectionSpacing::IfNotFirst, - format!("{ZONE_HEAD_INDENT}{NO_ZONES_PARENS}"), - ); - } - }, - ); - } - - builder.build() - } - - pub(super) fn make_metadata_table(&self) -> Table { - let mut builder = Builder::new(); - - // Metadata is presented as a linear (top-to-bottom) table with a - // small indent. - - builder.push_record(vec![ - METADATA_INDENT.to_string(), - linear_table_label(&CREATED_BY), - self.blueprint.creator.clone(), - ]); - - builder.push_record(vec![ - METADATA_INDENT.to_string(), - linear_table_label(&CREATED_AT), - humantime::format_rfc3339_millis( - self.blueprint.time_created.into(), - ) - .to_string(), - ]); - - let comment = if self.blueprint.comment.is_empty() { - NONE_PARENS.to_string() - } else { - self.blueprint.comment.clone() - }; - - builder.push_record(vec![ - METADATA_INDENT.to_string(), - linear_table_label(&COMMENT), - comment, - ]); - - builder.push_record(vec![ - METADATA_INDENT.to_string(), - linear_table_label(&INTERNAL_DNS_VERSION), - self.blueprint.internal_dns_version.to_string(), - ]); - - builder.push_record(vec![ - METADATA_INDENT.to_string(), - linear_table_label(&EXTERNAL_DNS_VERSION), - self.blueprint.external_dns_version.to_string(), - ]); - - let mut table = builder.build(); - apply_linear_table_settings(&mut table); - table - } - } - - impl<'diff> BlueprintDiffDisplay<'diff> { - pub(super) fn make_zone_diff_table(&self) -> Table { - let diff = self.diff; - - // Add the unchanged prefix to the zone indent since the first - // column will be used as the prefix. - let mut builder = StBuilder::new(); - builder.push_header_row(diff_header_row()); - - // The order is: - // - // 1. Unchanged - // 2. Removed - // 3. Modified - // 4. Added - // - // The idea behind the order is to (a) group all changes together - // and (b) put changes towards the bottom, so people have to scroll - // back less. - // - // Zones within a modified sled follow the same order. If you're - // changing the order here, make sure to keep that in sync. - - // First, unchanged sleds. - builder.make_section( - SectionSpacing::Always, - unchanged_sleds_heading(), - |section| { - for (sled_id, sled_zones) in diff.sleds_unchanged() { - add_whole_sled_records( - sled_id, - &sled_zones.clone().into(), - WholeSledKind::Unchanged, - section, - ); - } - }, - ); - - // Then, removed sleds. - builder.make_section( - SectionSpacing::Always, - removed_sleds_heading(), - |section| { - for (sled_id, sled_zones) in diff.sleds_removed() { - add_whole_sled_records( - sled_id, - sled_zones, - WholeSledKind::Removed, - section, - ); - } - }, - ); - - // Then, modified sleds. - builder.make_section( - SectionSpacing::Always, - modified_sleds_heading(), - |section| { - // For sleds that are in common: - for (sled_id, modified) in diff.sleds_modified() { - add_modified_sled_records(sled_id, modified, section); - } - }, - ); - - // Finally, added sleds. - builder.make_section( - SectionSpacing::Always, - added_sleds_heading(), - |section| { - for (sled_id, sled_zones) in diff.sleds_added() { - add_whole_sled_records( - sled_id, - &sled_zones.clone().into(), - WholeSledKind::Added, - section, - ); - } - }, - ); - - builder.build() - } - - pub(super) fn make_metadata_diff_table(&self) -> Table { - let diff = self.diff; - let mut builder = Builder::new(); - - // Metadata is presented as a linear (top-to-bottom) table with a - // small indent. - - match &diff.before_meta { - DiffBeforeMetadata::Collection { .. } => { - // Collections don't have DNS versions, so this is new. - builder.push_record(vec![ - format!("{ADDED_PREFIX}{METADATA_DIFF_INDENT}"), - metadata_table_internal_dns(), - linear_table_modified( - &NOT_PRESENT_IN_COLLECTION_PARENS, - &diff.after_meta.internal_dns_version, - ), - ]); - - builder.push_record(vec![ - format!("{ADDED_PREFIX}{METADATA_DIFF_INDENT}"), - metadata_table_external_dns(), - linear_table_modified( - &NOT_PRESENT_IN_COLLECTION_PARENS, - &diff.after_meta.external_dns_version, - ), - ]); - } - DiffBeforeMetadata::Blueprint(before) => { - if before.internal_dns_version - != diff.after_meta.internal_dns_version - { - builder.push_record(vec![ - format!("{MODIFIED_PREFIX}{METADATA_DIFF_INDENT}"), - metadata_table_internal_dns(), - linear_table_modified( - &before.internal_dns_version, - &diff.after_meta.internal_dns_version, - ), - ]); - } else { - builder.push_record(vec![ - format!("{UNCHANGED_PREFIX}{METADATA_DIFF_INDENT}"), - metadata_table_internal_dns(), - linear_table_unchanged( - &before.internal_dns_version, - ), - ]); - }; - - if before.external_dns_version - != diff.after_meta.external_dns_version - { - builder.push_record(vec![ - format!("{MODIFIED_PREFIX}{METADATA_DIFF_INDENT}"), - metadata_table_external_dns(), - linear_table_modified( - &before.external_dns_version, - &diff.after_meta.external_dns_version, - ), - ]); - } else { - builder.push_record(vec![ - format!("{UNCHANGED_PREFIX}{METADATA_DIFF_INDENT}"), - metadata_table_external_dns(), - linear_table_unchanged( - &before.external_dns_version, - ), - ]); - }; - } - } - - let mut table = builder.build(); - apply_linear_table_settings(&mut table); - table - } - } - - fn add_whole_sled_records( - sled_id: SledUuid, - sled_zones: &BlueprintOrCollectionZonesConfig, - kind: WholeSledKind, - section: &mut StSectionBuilder, - ) { - let heading = format!( - "{}{SLED_INDENT}sled {sled_id}: blueprint zones at generation {}", - kind.prefix(), - sled_zones.generation(), - ); - let prefix = kind.prefix(); - let status = kind.status(); - section.make_subsection(SectionSpacing::Always, heading, |s2| { - // Also add another section for zones. - for zone in sled_zones.zones() { - match status { - Some(status) => { - add_zone_record_with_status( - format!("{prefix}{ZONE_INDENT}"), - &zone, - status, - s2, - ); - } - None => { - add_zone_record( - format!("{prefix}{ZONE_INDENT}"), - &zone, - s2, - ); - } - } - } - }); - } - - fn add_modified_sled_records( - sled_id: SledUuid, - modified: &DiffSledModified, - section: &mut StSectionBuilder, - ) { - let (generation_heading, warning) = - if modified.generation_before != modified.generation_after { - ( - format!( - "blueprint zones at generation: {} -> {}", - modified.generation_before, modified.generation_after, - ), - None, - ) - } else { - // Modified sleds should always see a generation bump. - ( - format!( - "blueprint zones at generation: {}", - modified.generation_before - ), - Some(format!( - "{WARNING_PREFIX}{ZONE_HEAD_INDENT}\ - warning: generation should have changed" - )), - ) - }; - - let sled_heading = - format!("{MODIFIED_PREFIX}{SLED_INDENT}sled {sled_id}: {generation_heading}"); - - section.make_subsection(SectionSpacing::Always, sled_heading, |s2| { - if let Some(warning) = warning { - s2.push_nested_heading(SectionSpacing::Never, warning); - } - - // The order is: - // - // 1. Unchanged - // 2. Removed - // 3. Modified - // 4. Added - // - // The idea behind the order is to (a) group all changes together - // and (b) put changes towards the bottom, so people have to scroll - // back less. - // - // Sleds follow the same order. If you're changing the order here, - // make sure to keep that in sync. - - // First, unchanged zones. - for zone_unchanged in modified.zones_unchanged() { - add_zone_record( - format!("{UNCHANGED_PREFIX}{ZONE_INDENT}"), - &zone_unchanged.zone_before, - s2, - ); - } - - // Then, removed zones. - for zone in modified.zones_removed() { - add_zone_record_with_status( - format!("{REMOVED_PREFIX}{ZONE_INDENT}"), - zone, - REMOVED, - s2, - ); - } - - // Then, modified zones. - for zone_modified in modified.zones_modified() { - add_modified_zone_records(zone_modified, s2); - } - - // Finally, added zones. - for zone in modified.zones_added() { - add_zone_record_with_status( - format!("{ADDED_PREFIX}{ZONE_INDENT}"), - &zone.clone().into(), - ADDED, - s2, - ); - } - - // If no rows were pushed, add a row indicating that for this sled. - if s2.is_empty() { - s2.push_nested_heading( - SectionSpacing::Never, - format!( - "{UNCHANGED_PREFIX}{ZONE_HEAD_INDENT}\ - {NO_ZONES_PARENS}" - ), - ); - } - }); - } - - /// Add a zone record to this section. - /// - /// This is the meat-and-potatoes of the diff display. - fn add_zone_record( - first_column: String, - zone: &BlueprintOrCollectionZoneConfig, - section: &mut StSectionBuilder, - ) { - section.push_record(vec![ - first_column, - zone.kind().to_string(), - zone.id().to_string(), - zone.disposition().to_string(), - zone.underlay_address().to_string(), - ]); - } - - fn add_zone_record_with_status( - first_column: String, - zone: &BlueprintOrCollectionZoneConfig, - status: &str, - section: &mut StSectionBuilder, - ) { - section.push_record(vec![ - first_column, - zone.kind().to_string(), - zone.id().to_string(), - zone.disposition().to_string(), - zone.underlay_address().to_string(), - status.to_string(), - ]); - } - - /// Add a change table for the zone to the section. - /// - /// For diffs, this contains a table of changes between two zone - /// records. - fn add_modified_zone_records( - modified: &DiffZoneCommon, - section: &mut StSectionBuilder, - ) { - // Negative record for the before. - let before = &modified.zone_before; - let after = &modified.zone_after; - - // Before record. - add_zone_record_with_status( - format!("{REMOVED_PREFIX}{ZONE_INDENT}"), - &before, - MODIFIED, - section, - ); - - let mut what_changed = Vec::new(); - if !before.is_zone_type_equal(&after.zone_type) { - what_changed.push(ZONE_TYPE_CONFIG); - } - if before.disposition() != after.disposition { - what_changed.push(DISPOSITION); - } - if before.underlay_address() != after.underlay_address { - what_changed.push(UNDERLAY_IP); - } - debug_assert!( - !what_changed.is_empty(), - "at least something should have changed:\n\ - before = {before:#?}\n\ - after = {after:#?}" - ); - - let record = vec![ - format!("{ADDED_PREFIX}{ZONE_INDENT}"), - // First two columns of data are skipped over since they're - // always the same (verified at diff construction time). - format!(" {SUB_NOT_LAST}"), - "".to_string(), - after.disposition.to_string(), - after.underlay_address.to_string(), - ]; - section.push_record(record); - - section.push_spanned_row(format!( - "{MODIFIED_PREFIX}{ZONE_INDENT} \ - {SUB_LAST} changed: {}", - what_changed.join(", "), - )); - } - - #[derive(Copy, Clone, Debug)] - enum WholeSledKind { - Removed, - Added, - Unchanged, - } - - impl WholeSledKind { - fn prefix(self) -> char { - match self { - WholeSledKind::Removed => REMOVED_PREFIX, - WholeSledKind::Added => ADDED_PREFIX, - WholeSledKind::Unchanged => UNCHANGED_PREFIX, - } - } - - fn status(self) -> Option<&'static str> { - match self { - WholeSledKind::Removed => Some(REMOVED), - WholeSledKind::Added => Some(ADDED), - WholeSledKind::Unchanged => None, - } - } - } - - // Apply settings for a table which has top-to-bottom rows, and a first - // column with indents. - fn apply_linear_table_settings(table: &mut Table) { - table.with(Style::empty()).with(Padding::zero()).with( - Modify::new(Columns::single(1)) - // Add an padding on the right of the label column to make the - // table visually distinctive. - .with(Padding::new(0, 2, 0, 0)), - ); - } - - // --- - // Heading and other definitions - // --- - - // This aligns the heading with the first column of actual text. - const H1_INDENT: &str = " "; - const SLED_HEAD_INDENT: &str = " "; - const SLED_INDENT: &str = " "; - const ZONE_HEAD_INDENT: &str = " "; - // Due to somewhat mysterious reasons with how padding works with tabled, - // this needs to be 3 columns wide rather than 4. - const ZONE_INDENT: &str = " "; - const METADATA_INDENT: &str = " "; - const METADATA_DIFF_INDENT: &str = " "; - - const ADDED_PREFIX: char = '+'; - const REMOVED_PREFIX: char = '-'; - const MODIFIED_PREFIX: char = '*'; - const UNCHANGED_PREFIX: char = ' '; - const WARNING_PREFIX: char = '!'; - - const ARROW: &str = "->"; - const SUB_NOT_LAST: &str = "├─"; - const SUB_LAST: &str = "└─"; - - const ZONE_TYPE: &str = "zone type"; - const ZONE_ID: &str = "zone ID"; - const DISPOSITION: &str = "disposition"; - const UNDERLAY_IP: &str = "underlay IP"; - const ZONE_TYPE_CONFIG: &str = "zone type config"; - const STATUS: &str = "status"; - const REMOVED_SLEDS_HEADING: &str = "REMOVED SLEDS"; - const MODIFIED_SLEDS_HEADING: &str = "MODIFIED SLEDS"; - const UNCHANGED_SLEDS_HEADING: &str = "UNCHANGED SLEDS"; - const ADDED_SLEDS_HEADING: &str = "ADDED SLEDS"; - const REMOVED: &str = "removed"; - const ADDED: &str = "added"; - const MODIFIED: &str = "modified"; - - const METADATA_HEADING: &str = "METADATA"; - const CREATED_BY: &str = "created by"; - const CREATED_AT: &str = "created at"; - const INTERNAL_DNS_VERSION: &str = "internal DNS version"; - const EXTERNAL_DNS_VERSION: &str = "external DNS version"; - const COMMENT: &str = "comment"; - - const UNCHANGED_PARENS: &str = "(unchanged)"; - const NO_ZONES_PARENS: &str = "(no zones)"; - const NONE_PARENS: &str = "(none)"; - const NOT_PRESENT_IN_COLLECTION_PARENS: &str = - "(not present in collection)"; - - fn header_row() -> Vec { - vec![ - // First column is so that the header border aligns with the ZONE - // TABLE section header. - SLED_INDENT.to_string(), - ZONE_TYPE.to_string(), - ZONE_ID.to_string(), - DISPOSITION.to_string(), - UNDERLAY_IP.to_string(), - ] - } - - fn diff_header_row() -> Vec { - vec![ - // First column is so that the header border aligns with the ZONE - // TABLE section header. - SLED_HEAD_INDENT.to_string(), - ZONE_TYPE.to_string(), - ZONE_ID.to_string(), - DISPOSITION.to_string(), - UNDERLAY_IP.to_string(), - STATUS.to_string(), - ] - } - - pub(super) fn metadata_heading() -> String { - format!("{METADATA_HEADING}:") - } - - pub(super) fn metadata_diff_heading() -> String { - format!("{H1_INDENT}{METADATA_HEADING}:") - } - - fn sleds_heading(prefix: char, heading: &'static str) -> String { - format!("{prefix}{SLED_HEAD_INDENT}{heading}:") - } - - fn removed_sleds_heading() -> String { - sleds_heading(UNCHANGED_PREFIX, REMOVED_SLEDS_HEADING) - } - - fn added_sleds_heading() -> String { - sleds_heading(UNCHANGED_PREFIX, ADDED_SLEDS_HEADING) - } - - fn modified_sleds_heading() -> String { - sleds_heading(UNCHANGED_PREFIX, MODIFIED_SLEDS_HEADING) - } - - fn unchanged_sleds_heading() -> String { - sleds_heading(UNCHANGED_PREFIX, UNCHANGED_SLEDS_HEADING) - } - - fn metadata_table_internal_dns() -> String { - linear_table_label(&INTERNAL_DNS_VERSION) - } - - fn metadata_table_external_dns() -> String { - linear_table_label(&EXTERNAL_DNS_VERSION) - } - - fn linear_table_label(value: &dyn fmt::Display) -> String { - format!("{value}:") - } - - fn linear_table_modified( - before: &dyn fmt::Display, - after: &dyn fmt::Display, - ) -> String { - format!("{before} {ARROW} {after}") - } - - fn linear_table_unchanged(value: &dyn fmt::Display) -> String { - format!("{value} {UNCHANGED_PARENS}") - } -} diff --git a/nexus/types/src/deployment/blueprint_diff.rs b/nexus/types/src/deployment/blueprint_diff.rs new file mode 100644 index 00000000000..c3c28a474ce --- /dev/null +++ b/nexus/types/src/deployment/blueprint_diff.rs @@ -0,0 +1,854 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Types helpful for diffing [`Blueprints`]. + +use super::blueprint_display::{ + constants::*, linear_table_modified, linear_table_unchanged, BpDiffState, + BpGeneration, BpOmicronZonesSubtableSchema, BpPhysicalDisksSubtableSchema, + BpSledSubtable, BpSledSubtableData, BpSledSubtableRow, KvListWithHeading, + KvPair, +}; +use super::zone_sort_key; +use omicron_common::api::external::Generation; +use omicron_common::disk::DiskIdentity; +use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::SledUuid; +use sled_agent_client::ZoneKind; +use std::collections::{BTreeMap, BTreeSet}; +use std::fmt; + +use crate::deployment::{ + BlueprintMetadata, BlueprintOrCollectionDisksConfig, + BlueprintOrCollectionZoneConfig, BlueprintOrCollectionZonesConfig, + BlueprintPhysicalDisksConfig, BlueprintZoneConfig, + BlueprintZoneDisposition, BlueprintZonesConfig, DiffBeforeMetadata, + ZoneSortKey, +}; + +/// Diffs for omicron zones on a given sled with a given `BpDiffState` +#[derive(Debug)] +pub struct BpDiffZoneDetails { + pub generation_before: Option, + pub generation_after: Option, + pub zones: Vec, +} + +impl BpSledSubtableData for BpDiffZoneDetails { + fn bp_generation(&self) -> BpGeneration { + BpGeneration::Diff { + before: self.generation_before, + after: self.generation_after, + } + } + + fn rows( + &self, + state: BpDiffState, + ) -> impl Iterator { + self.zones.iter().map(move |zone| { + BpSledSubtableRow::new( + state, + vec![ + zone.kind().to_string(), + zone.id().to_string(), + zone.disposition().to_string(), + zone.underlay_address().to_string(), + ], + ) + }) + } +} + +/// A modified omicron zone +/// +/// A zone is considered modified if its `disposition` changes. All +/// modifications to other fields are considered errors, and will be recorded +/// as such. +#[derive(Debug)] +pub struct ModifiedZone { + pub prior_disposition: BlueprintZoneDisposition, + pub zone: BlueprintOrCollectionZoneConfig, +} + +impl ZoneSortKey for ModifiedZone { + fn kind(&self) -> ZoneKind { + self.zone.kind() + } + + fn id(&self) -> OmicronZoneUuid { + self.zone.id() + } +} + +impl ModifiedZone { + #[allow(clippy::result_large_err)] + pub fn new( + before: BlueprintOrCollectionZoneConfig, + after: BlueprintZoneConfig, + ) -> Result { + // Do we have any errors? If so, create a "reason" string. + let mut reason = String::new(); + if before.kind() != after.kind() { + let msg = format!( + "mismatched zone kind: before: {}, after: {}\n", + before.kind(), + after.kind() + ); + reason.push_str(&msg); + } + if before.underlay_address() != after.underlay_address { + let msg = format!( + "mismatched underlay address: before: {}, after: {}\n", + before.underlay_address(), + after.underlay_address + ); + reason.push_str(&msg); + } + if !before.is_zone_type_equal(&after.zone_type) { + let msg = format!( + "mismatched zone type: after: {:#?}\n", + after.zone_type + ); + reason.push_str(&msg); + } + if reason.is_empty() { + Ok(ModifiedZone { + prior_disposition: before.disposition(), + zone: after.into(), + }) + } else { + Err(BpDiffZoneError { + zone_before: before, + zone_after: after.into(), + reason, + }) + } + } +} + +/// Details of modified zones on a given sled +#[derive(Debug)] +pub struct BpDiffZonesModified { + pub generation_before: Generation, + pub generation_after: Generation, + pub zones: Vec, +} + +impl BpSledSubtableData for BpDiffZonesModified { + fn bp_generation(&self) -> BpGeneration { + BpGeneration::Diff { + before: Some(self.generation_before), + after: Some(self.generation_after), + } + } + + fn rows( + &self, + state: BpDiffState, + ) -> impl Iterator { + self.zones.iter().map(move |zone| { + let disposition = format!( + "{} {ARROW} {}", + zone.prior_disposition, + zone.zone.disposition() + ); + BpSledSubtableRow::new( + state, + vec![ + zone.zone.kind().to_string(), + zone.zone.id().to_string(), + disposition, + zone.zone.underlay_address().to_string(), + ], + ) + }) + } +} + +#[derive(Debug)] +/// Errors arising from illegally modified zone fields +pub struct BpDiffZoneErrors { + pub generation_before: Generation, + pub generation_after: Generation, + pub errors: Vec, +} + +#[derive(Debug)] +pub struct BpDiffZoneError { + pub zone_before: BlueprintOrCollectionZoneConfig, + pub zone_after: BlueprintOrCollectionZoneConfig, + pub reason: String, +} + +/// All known zones across all known sleds, their various states, and errors +#[derive(Debug, Default)] +pub struct BpDiffZones { + pub added: BTreeMap, + pub removed: BTreeMap, + pub unchanged: BTreeMap, + pub modified: BTreeMap, + pub errors: BTreeMap, +} + +impl BpDiffZones { + pub fn new( + before: BTreeMap, + mut after: BTreeMap, + ) -> Self { + let mut diffs = BpDiffZones::default(); + for (sled_id, before_zones) in before { + let before_generation = before_zones.generation(); + let mut removed = vec![]; + if let Some(after_zones) = after.remove(&sled_id) { + let after_generation = after_zones.generation; + let mut unchanged = vec![]; + let mut modified = vec![]; + let mut errors = vec![]; + let mut added = vec![]; + + // Compare `before_zones` and `after_zones` to look + // for additions, deletions, modifications, and errors. + let before_by_id: BTreeMap<_, BlueprintOrCollectionZoneConfig> = + before_zones.zones().map(|z| (z.id(), z)).collect(); + let mut after_by_id: BTreeMap<_, BlueprintZoneConfig> = + after_zones.zones.into_iter().map(|z| (z.id, z)).collect(); + + for (zone_id, zone_before) in before_by_id { + if let Some(zone_after) = after_by_id.remove(&zone_id) { + // Are the zones equal? + if zone_before == zone_after { + unchanged.push(zone_after.into()); + } else { + // The zones are different. They are only allowed to differ in terms + // of `disposition`, otherwise we have an error. + match ModifiedZone::new(zone_before, zone_after) { + Ok(modified_zone) => { + modified.push(modified_zone) + } + Err(error) => errors.push(error), + } + } + } else { + // This zone doesn't exist in `zone_after` so it must have + // been removed. + removed.push(zone_before); + } + } + // Any remaining zones in `after_by_id` are newly added + for (_, zone_after) in after_by_id { + added.push(zone_after.into()); + } + + // Add all records to `diffs` that come from either `before` or `after` + // for this `sled_id`. + if !unchanged.is_empty() { + unchanged.sort_unstable_by_key(zone_sort_key); + diffs.unchanged.insert( + sled_id, + BpDiffZoneDetails { + generation_before: Some(before_generation), + generation_after: Some(after_generation), + zones: unchanged, + }, + ); + } + if !removed.is_empty() { + removed.sort_unstable_by_key(zone_sort_key); + diffs.removed.insert( + sled_id, + BpDiffZoneDetails { + generation_before: Some(before_generation), + generation_after: Some(after_generation), + zones: removed, + }, + ); + } + if !added.is_empty() { + added.sort_unstable_by_key(zone_sort_key); + diffs.added.insert( + sled_id, + BpDiffZoneDetails { + generation_before: Some(before_generation), + generation_after: Some(after_generation), + zones: added, + }, + ); + } + if !modified.is_empty() { + modified.sort_unstable_by_key(zone_sort_key); + diffs.modified.insert( + sled_id, + BpDiffZonesModified { + generation_before: before_generation, + generation_after: after_generation, + zones: modified, + }, + ); + } + if !errors.is_empty() { + diffs.errors.insert( + sled_id, + BpDiffZoneErrors { + generation_before: before_generation, + generation_after: after_generation, + errors, + }, + ); + } + } else { + // No `after_zones` for this `sled_id`, so `before_zones` are removed + assert!(removed.is_empty()); + for zone in before_zones.zones() { + removed.push(zone); + } + + if !removed.is_empty() { + removed.sort_unstable_by_key(zone_sort_key); + diffs.removed.insert( + sled_id, + BpDiffZoneDetails { + generation_before: Some(before_generation), + generation_after: None, + zones: removed, + }, + ); + } + } + } + + // Any sleds remaining in `after` have just been added, since we remove + // sleds from `after`, that were also in `before`, in the above loop. + for (sled_id, after_zones) in after { + if !after_zones.zones.is_empty() { + diffs.added.insert( + sled_id, + BpDiffZoneDetails { + generation_before: None, + generation_after: Some(after_zones.generation), + zones: after_zones + .zones + .into_iter() + .map(|z| z.into()) + .collect(), + }, + ); + } + } + + diffs + } + + /// Return a [`BpSledSubtable`] for the given `sled_id` + /// + /// We collate all the data from each category to produce a single table. + /// The order is: + /// + /// 1. Unchanged + /// 2. Removed + /// 3. Modified + /// 4. Added + /// + /// The idea behind the order is to (a) group all changes together + /// and (b) put changes towards the bottom, so people have to scroll + /// back less. + /// + /// Errors are printed in a more freeform manner after the table is + /// displayed. + pub fn to_bp_sled_subtable( + &self, + sled_id: &SledUuid, + ) -> Option { + let mut generation = BpGeneration::Diff { before: None, after: None }; + let mut rows = vec![]; + if let Some(diff) = self.unchanged.get(sled_id) { + generation = diff.bp_generation(); + rows.extend(diff.rows(BpDiffState::Unchanged)); + } + if let Some(diff) = self.removed.get(sled_id) { + // Generations never vary for the same sled, so this is harmless + generation = diff.bp_generation(); + rows.extend(diff.rows(BpDiffState::Removed)); + } + + if let Some(diff) = self.modified.get(sled_id) { + // Generations never vary for the same sled, so this is harmless + generation = diff.bp_generation(); + rows.extend(diff.rows(BpDiffState::Modified)); + } + + if let Some(diff) = self.added.get(sled_id) { + // Generations never vary for the same sled, so this is harmless + generation = diff.bp_generation(); + rows.extend(diff.rows(BpDiffState::Added)); + } + + if rows.is_empty() { + None + } else { + Some(BpSledSubtable::new( + BpOmicronZonesSubtableSchema {}, + generation, + rows, + )) + } + } +} + +#[derive(Debug)] +pub struct DiffPhysicalDisksDetails { + // Disks that come from inventory don't have generation numbers + pub before_generation: Option, + + // Disks that are removed don't have "after" generation numbers + pub after_generation: Option, + + // Disks added, removed, or unmodified + pub disks: BTreeSet, +} + +impl BpSledSubtableData for DiffPhysicalDisksDetails { + fn bp_generation(&self) -> BpGeneration { + BpGeneration::Diff { + before: self.before_generation, + after: self.after_generation, + } + } + + fn rows( + &self, + state: BpDiffState, + ) -> impl Iterator { + self.disks.iter().map(move |d| { + BpSledSubtableRow::new( + state, + vec![d.vendor.clone(), d.model.clone(), d.serial.clone()], + ) + }) + } +} + +#[derive(Debug, Default)] +pub struct BpDiffPhysicalDisks { + pub added: BTreeMap, + pub removed: BTreeMap, + pub unchanged: BTreeMap, +} + +impl BpDiffPhysicalDisks { + pub fn new( + before: BTreeMap, + mut after: BTreeMap, + ) -> Self { + let mut diffs = BpDiffPhysicalDisks::default(); + for (sled_id, before_disks) in before { + let before_generation = before_disks.generation(); + if let Some(after_disks) = after.remove(&sled_id) { + let after_generation = Some(after_disks.generation); + let a: BTreeSet = + after_disks.disks.into_iter().map(|d| d.identity).collect(); + let b = before_disks.disks(); + let added: BTreeSet<_> = a.difference(&b).cloned().collect(); + let removed: BTreeSet<_> = b.difference(&a).cloned().collect(); + let unchanged: BTreeSet<_> = + a.intersection(&b).cloned().collect(); + if !added.is_empty() { + diffs.added.insert( + sled_id, + DiffPhysicalDisksDetails { + before_generation, + after_generation, + disks: added, + }, + ); + } + if !removed.is_empty() { + diffs.removed.insert( + sled_id, + DiffPhysicalDisksDetails { + before_generation, + after_generation, + disks: removed, + }, + ); + } + if !unchanged.is_empty() { + diffs.unchanged.insert( + sled_id, + DiffPhysicalDisksDetails { + before_generation, + after_generation, + disks: unchanged, + }, + ); + } + } else { + diffs.removed.insert( + sled_id, + DiffPhysicalDisksDetails { + before_generation, + after_generation: None, + disks: before_disks.disks().into_iter().collect(), + }, + ); + } + } + + // Any sleds remaining in `after` have just been added, since we remove + // sleds from `after`, that were also in `before`, in the above loop. + for (sled_id, after_disks) in after { + let added: BTreeSet = + after_disks.disks.into_iter().map(|d| d.identity).collect(); + if !added.is_empty() { + diffs.added.insert( + sled_id, + DiffPhysicalDisksDetails { + before_generation: None, + after_generation: Some(after_disks.generation), + disks: added, + }, + ); + } + } + + diffs + } + + /// Return a [`BpSledSubtable`] for the given `sled_id` + pub fn to_bp_sled_subtable( + &self, + sled_id: &SledUuid, + ) -> Option { + let mut generation = BpGeneration::Diff { before: None, after: None }; + let mut rows = vec![]; + if let Some(diff) = self.unchanged.get(sled_id) { + generation = diff.bp_generation(); + rows.extend(diff.rows(BpDiffState::Unchanged)); + } + if let Some(diff) = self.removed.get(sled_id) { + // Generations never vary for the same sled, so this is harmless + generation = diff.bp_generation(); + rows.extend(diff.rows(BpDiffState::Removed)); + } + + if let Some(diff) = self.added.get(sled_id) { + // Generations never vary for the same sled, so this is harmless + generation = diff.bp_generation(); + rows.extend(diff.rows(BpDiffState::Added)); + } + + if rows.is_empty() { + None + } else { + Some(BpSledSubtable::new( + BpPhysicalDisksSubtableSchema {}, + generation, + rows, + )) + } + } +} + +/// Summarizes the differences between two blueprints +#[derive(Debug)] +pub struct BlueprintDiff { + pub before_meta: DiffBeforeMetadata, + pub after_meta: BlueprintMetadata, + pub zones: BpDiffZones, + pub physical_disks: BpDiffPhysicalDisks, + pub sleds_added: BTreeSet, + pub sleds_removed: BTreeSet, + pub sleds_unchanged: BTreeSet, + pub sleds_modified: BTreeSet, +} + +impl BlueprintDiff { + /// Build a diff with the provided contents, verifying that the provided + /// data is valid. + pub fn new( + before_meta: DiffBeforeMetadata, + before_zones: BTreeMap, + after_meta: BlueprintMetadata, + after_zones: BTreeMap, + before_disks: BTreeMap, + after_disks: BTreeMap, + ) -> Self { + let before_sleds: BTreeSet<_> = + before_zones.keys().chain(before_disks.keys()).collect(); + let after_sleds: BTreeSet<_> = + after_zones.keys().chain(after_disks.keys()).collect(); + let all_sleds: BTreeSet<_> = + before_sleds.union(&after_sleds).map(|&sled_id| *sled_id).collect(); + + // All sleds that have zones or disks in `after_*`, but not `before_*` + // have been added. + let sleds_added: BTreeSet<_> = after_sleds + .difference(&before_sleds) + .map(|&sled_id| *sled_id) + .collect(); + + // All sleds that have zones or disks in `before_*`, but not `after_*` + // have been removed. + let sleds_removed: BTreeSet<_> = before_sleds + .difference(&after_sleds) + .map(|&sled_id| *sled_id) + .collect(); + + let zones = BpDiffZones::new(before_zones, after_zones); + let physical_disks = + BpDiffPhysicalDisks::new(before_disks, after_disks); + + // Sleds that haven't been added or removed are either unchanged or + // modified. + let sleds_unchanged_or_modified: BTreeSet<_> = all_sleds + .iter() + .filter(|&sled_id| { + !sleds_added.contains(sled_id) + && !sleds_removed.contains(sled_id) + }) + .map(|s| *s) + .collect(); + + // Sleds are modified if any zones or disks on those sleds are anything + // other than unchanged. + let mut sleds_modified = sleds_unchanged_or_modified.clone(); + sleds_modified.retain(|sled_id| { + physical_disks.added.contains_key(sled_id) + || physical_disks.removed.contains_key(sled_id) + || zones.added.contains_key(sled_id) + || zones.removed.contains_key(sled_id) + || zones.modified.contains_key(sled_id) + || zones.errors.contains_key(sled_id) + }); + + // The rest of the sleds must be unchanged. + let unchanged_sleds: BTreeSet<_> = sleds_unchanged_or_modified + .difference(&sleds_modified) + .map(|sled_id| *sled_id) + .collect(); + + BlueprintDiff { + before_meta, + after_meta, + zones, + physical_disks, + sleds_added, + sleds_removed, + sleds_unchanged: unchanged_sleds, + sleds_modified, + } + } + + /// Return a struct that can be used to display the diff. + pub fn display(&self) -> BlueprintDiffDisplay<'_> { + BlueprintDiffDisplay::new(self) + } +} + +/// Wrapper to allow a [`BlueprintDiff`] to be displayed. +/// +/// Returned by [`BlueprintDiff::display()`]. +#[derive(Clone, Debug)] +#[must_use = "this struct does nothing unless displayed"] +pub struct BlueprintDiffDisplay<'diff> { + pub diff: &'diff BlueprintDiff, + // TODO: add colorization with a stylesheet +} + +impl<'diff> BlueprintDiffDisplay<'diff> { + #[inline] + fn new(diff: &'diff BlueprintDiff) -> Self { + Self { diff } + } + + pub fn make_metadata_diff_table(&self) -> KvListWithHeading { + let diff = self.diff; + let mut kv = vec![]; + match &diff.before_meta { + DiffBeforeMetadata::Collection { .. } => { + // Collections don't have DNS versions, so this is new. + kv.push(KvPair::new( + BpDiffState::Added, + INTERNAL_DNS_VERSION, + linear_table_modified( + &NOT_PRESENT_IN_COLLECTION_PARENS, + &diff.after_meta.internal_dns_version, + ), + )); + kv.push(KvPair::new( + BpDiffState::Added, + EXTERNAL_DNS_VERSION, + linear_table_modified( + &NOT_PRESENT_IN_COLLECTION_PARENS, + &diff.after_meta.external_dns_version, + ), + )); + } + DiffBeforeMetadata::Blueprint(before) => { + if before.internal_dns_version + != diff.after_meta.internal_dns_version + { + kv.push(KvPair::new( + BpDiffState::Modified, + INTERNAL_DNS_VERSION, + linear_table_modified( + &before.internal_dns_version, + &diff.after_meta.internal_dns_version, + ), + )); + } else { + kv.push(KvPair::new( + BpDiffState::Unchanged, + INTERNAL_DNS_VERSION, + linear_table_unchanged(&before.internal_dns_version), + )); + }; + + if before.external_dns_version + != diff.after_meta.external_dns_version + { + kv.push(KvPair::new( + BpDiffState::Modified, + EXTERNAL_DNS_VERSION, + linear_table_modified( + &before.external_dns_version, + &diff.after_meta.external_dns_version, + ), + )); + } else { + kv.push(KvPair::new( + BpDiffState::Unchanged, + EXTERNAL_DNS_VERSION, + linear_table_unchanged(&before.external_dns_version), + )); + }; + } + } + + KvListWithHeading::new(METADATA_HEADING, kv) + } + + /// Write out physical disk and zone tables for a given `sled_id` + fn write_tables( + &self, + f: &mut fmt::Formatter<'_>, + sled_id: &SledUuid, + ) -> fmt::Result { + // Write the physical disks table if it exists + if let Some(table) = + self.diff.physical_disks.to_bp_sled_subtable(sled_id) + { + writeln!(f, "{table}\n")?; + } + + // Write the zones table if it exists + if let Some(table) = self.diff.zones.to_bp_sled_subtable(sled_id) { + writeln!(f, "{table}\n")?; + } + + Ok(()) + } +} + +impl<'diff> fmt::Display for BlueprintDiffDisplay<'diff> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let diff = self.diff; + + // Print things differently based on whether the diff is between a + // collection and a blueprint, or a blueprint and a blueprint. + match &diff.before_meta { + DiffBeforeMetadata::Collection { id } => { + writeln!( + f, + "from: collection {}\n\ + to: blueprint {}", + id, diff.after_meta.id, + )?; + } + DiffBeforeMetadata::Blueprint(before) => { + writeln!( + f, + "from: blueprint {}\n\ + to: blueprint {}\n", + before.id, diff.after_meta.id + )?; + } + } + + // Write out sled information + // + // The order is: + // + // 1. Unchanged + // 2. Removed + // 3. Modified + // 4. Added + // 5. Errors + // + // The idea behind the order is to (a) group all changes together + // and (b) put changes towards the bottom, so people have to scroll + // back less. + // + // We put errors at the bottom to ensure they are seen immediately. + + // Write out tables for unchanged sleds + if !diff.sleds_unchanged.is_empty() { + writeln!(f, " UNCHANGED SLEDS:\n")?; + for sled_id in &diff.sleds_unchanged { + writeln!(f, " sled {sled_id}:\n")?; + self.write_tables(f, sled_id)?; + } + } + + // Write out tables for removed sleds + if !diff.sleds_removed.is_empty() { + writeln!(f, " REMOVED SLEDS:\n")?; + for sled_id in &diff.sleds_removed { + writeln!(f, " sled {sled_id}:\n")?; + self.write_tables(f, sled_id)?; + } + } + + // Write out tables for modified sleds + if !diff.sleds_modified.is_empty() { + writeln!(f, " MODIFIED SLEDS:\n")?; + for sled_id in &diff.sleds_modified { + writeln!(f, " sled {sled_id}:\n")?; + self.write_tables(f, sled_id)?; + } + } + + // Write out tables for added sleds + if !diff.sleds_added.is_empty() { + writeln!(f, " ADDED SLEDS:\n")?; + for sled_id in &diff.sleds_added { + writeln!(f, " sled {sled_id}:\n")?; + self.write_tables(f, sled_id)?; + } + } + + // Write out zone errors. + if !diff.zones.errors.is_empty() { + writeln!(f, "ERRORS:")?; + for (sled_id, errors) in &diff.zones.errors { + writeln!(f, "\n sled {sled_id}\n")?; + writeln!( + f, + " zone diff errors: before gen {}, after gen {}\n", + errors.generation_before, errors.generation_after + )?; + + for err in &errors.errors { + writeln!(f, " zone id: {}", err.zone_before.id())?; + writeln!(f, " reason: {}", err.reason)?; + } + } + } + + // Write out metadata diff table + writeln!(f, "{}", self.make_metadata_diff_table())?; + + Ok(()) + } +} diff --git a/nexus/types/src/deployment/blueprint_display.rs b/nexus/types/src/deployment/blueprint_display.rs new file mode 100644 index 00000000000..d5dc5e3074a --- /dev/null +++ b/nexus/types/src/deployment/blueprint_display.rs @@ -0,0 +1,331 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Types helpful for rendering [`Blueprints`]. + +use omicron_common::api::external::Generation; +use std::fmt; + +pub mod constants { + pub(super) const ADDED_PREFIX: char = '+'; + pub(super) const REMOVED_PREFIX: char = '-'; + pub(super) const MODIFIED_PREFIX: char = '*'; + pub(super) const UNCHANGED_PREFIX: char = ' '; + + pub const ARROW: &str = "->"; + pub const METADATA_HEADING: &str = "METADATA"; + pub const CREATED_BY: &str = "created by"; + pub const CREATED_AT: &str = "created at"; + pub const INTERNAL_DNS_VERSION: &str = "internal DNS version"; + pub const EXTERNAL_DNS_VERSION: &str = "external DNS version"; + pub const COMMENT: &str = "comment"; + + pub const UNCHANGED_PARENS: &str = "(unchanged)"; + pub const NONE_PARENS: &str = "(none)"; + pub const NOT_PRESENT_IN_COLLECTION_PARENS: &str = + "(not present in collection)"; +} +use constants::*; + +/// The state of a sled or resource (e.g. zone or physical disk) in this +/// blueprint, with regards to the parent blueprint +#[derive(Debug, Clone, Copy)] +pub enum BpDiffState { + Unchanged, + Removed, + Modified, + Added, +} + +impl BpDiffState { + pub fn prefix(&self) -> char { + match self { + BpDiffState::Unchanged => UNCHANGED_PREFIX, + BpDiffState::Removed => REMOVED_PREFIX, + BpDiffState::Modified => MODIFIED_PREFIX, + BpDiffState::Added => ADDED_PREFIX, + } + } +} + +impl fmt::Display for BpDiffState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self { + BpDiffState::Unchanged => "UNCHANGED", + BpDiffState::Removed => "REMOVED", + BpDiffState::Modified => "MODIFIED", + BpDiffState::Added => "ADDED", + }; + write!(f, "{s}") + } +} + +/// A wrapper aound generation numbers for blueprints or blueprint diffs +#[derive(Debug, Clone, Copy)] +pub enum BpGeneration { + // A value in a single blueprint + Value(Generation), + + // A diff between two blueprints + Diff { before: Option, after: Option }, +} + +impl fmt::Display for BpGeneration { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + BpGeneration::Value(generation) => { + write!(f, "at generation {generation}") + } + BpGeneration::Diff { before: None, after: Some(after) } => { + write!(f, "at generation {after}") + } + BpGeneration::Diff { before: Some(before), after: None } => { + write!(f, "from generation {before}") + } + BpGeneration::Diff { before: Some(before), after: Some(after) } => { + if before == after { + write!(f, "at generation {after}") + } else { + write!(f, "generation {before} -> {after}") + } + } + BpGeneration::Diff { before: None, after: None } => { + write!(f, "Error: unknown generation") + } + } + } +} + +/// A row in a [`BpSledSubtable`] +pub struct BpSledSubtableRow { + state: BpDiffState, + columns: Vec, +} + +impl BpSledSubtableRow { + pub fn new(state: BpDiffState, columns: Vec) -> Self { + BpSledSubtableRow { state, columns } + } +} + +/// Metadata about all instances of specific type of [`BpSledSubtable`], +/// such as omicron zones or physical disks. +pub trait BpSledSubtableSchema { + fn table_name(&self) -> &'static str; + fn column_names(&self) -> &'static [&'static str]; +} + +// Provide data specific to an instance of a [`BpSledSubtable`] +pub trait BpSledSubtableData { + fn bp_generation(&self) -> BpGeneration; + fn rows( + &self, + state: BpDiffState, + ) -> impl Iterator; +} + +/// A table specific to a sled resource, such as a zone or disk. +/// `BpSledSubtable`s are always nested under [`BpSledTable`]s. +pub struct BpSledSubtable { + table_name: &'static str, + column_names: &'static [&'static str], + generation: BpGeneration, + rows: Vec, +} + +impl BpSledSubtable { + pub fn new( + schema: impl BpSledSubtableSchema, + generation: BpGeneration, + rows: Vec, + ) -> BpSledSubtable { + BpSledSubtable { + table_name: schema.table_name(), + column_names: schema.column_names(), + generation, + rows, + } + } + + /// Compute the max column widths based on the contents of `column_names` + // and `rows`. + fn column_widths(&self) -> Vec { + let mut widths: Vec = + self.column_names.iter().map(|s| s.len()).collect(); + + for row in &self.rows { + assert_eq!(row.columns.len(), widths.len()); + for (i, s) in row.columns.iter().enumerate() { + widths[i] = usize::max(s.len(), widths[i]); + } + } + + widths + } +} + +const SUBTABLE_INDENT: usize = 4; +const COLUMN_GAP: usize = 3; + +impl fmt::Display for BpSledSubtable { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let widths = self.column_widths(); + let mut total_width = + widths.iter().fold(0, |acc, i| acc + i + COLUMN_GAP); + total_width -= COLUMN_GAP; + + // Write the name of the subtable + writeln!( + f, + "{: &'static str { + "physical disks" + } + + fn column_names(&self) -> &'static [&'static str] { + &["vendor", "model", "serial"] + } +} + +/// The [`BpSledSubtable`] schema for omicron zones +pub struct BpOmicronZonesSubtableSchema {} +impl BpSledSubtableSchema for BpOmicronZonesSubtableSchema { + fn table_name(&self) -> &'static str { + "omicron zones" + } + fn column_names(&self) -> &'static [&'static str] { + &["zone type", "zone id", "disposition", "underlay IP"] + } +} + +// An entry in a [`KvListWithHeading`] +#[derive(Debug)] +pub struct KvPair { + state: BpDiffState, + key: String, + val: String, +} + +impl KvPair { + pub fn new_unchanged, S2: Into>( + key: S1, + val: S2, + ) -> KvPair { + KvPair { + state: BpDiffState::Unchanged, + key: key.into(), + val: val.into(), + } + } + + pub fn new, S2: Into>( + state: BpDiffState, + key: S1, + val: S2, + ) -> KvPair { + KvPair { state, key: key.into(), val: val.into() } + } +} + +// A top-to-bottom list of KV pairs with a heading +#[derive(Debug)] +pub struct KvListWithHeading { + heading: &'static str, + kv: Vec, +} + +impl KvListWithHeading { + pub fn new_unchanged, S2: Into>( + heading: &'static str, + kv: Vec<(S1, S2)>, + ) -> KvListWithHeading { + let kv = + kv.into_iter().map(|(k, v)| KvPair::new_unchanged(k, v)).collect(); + KvListWithHeading { heading, kv } + } + + pub fn new(heading: &'static str, kv: Vec) -> KvListWithHeading { + KvListWithHeading { heading, kv } + } + + /// Compute the max width of the keys for alignment purposes + fn max_key_width(&self) -> usize { + self.kv.iter().fold(0, |acc, kv| usize::max(acc, kv.key.len())) + } +} + +impl fmt::Display for KvListWithHeading { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Write the heading + writeln!(f, " {}:", self.heading)?; + + // Write the rows + let key_width = self.max_key_width() + 1; + for kv in &self.kv { + let prefix = kv.state.prefix(); + writeln!( + f, + "{prefix: String { + format!("{before} {ARROW} {after}") +} + +pub fn linear_table_unchanged(value: &dyn fmt::Display) -> String { + format!("{value} {UNCHANGED_PARENS}") +} diff --git a/nexus/types/src/lib.rs b/nexus/types/src/lib.rs index b6286c3f649..494573e8341 100644 --- a/nexus/types/src/lib.rs +++ b/nexus/types/src/lib.rs @@ -34,4 +34,3 @@ pub mod external_api; pub mod identity; pub mod internal_api; pub mod inventory; -mod sectioned_table; diff --git a/nexus/types/src/sectioned_table.rs b/nexus/types/src/sectioned_table.rs deleted file mode 100644 index addb4c876e8..00000000000 --- a/nexus/types/src/sectioned_table.rs +++ /dev/null @@ -1,357 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! Support for tables with builtin sections. -//! -//! This could live in its own crate (within omicron, or even on crates.io), -//! but is here for now. - -use std::collections::HashSet; -use std::iter; - -use tabled::builder::Builder; -use tabled::grid::config::Border; -use tabled::settings::object::Columns; -use tabled::settings::object::Object; -use tabled::settings::object::Rows; -use tabled::settings::span::ColumnSpan; -use tabled::settings::Modify; -use tabled::settings::Padding; -use tabled::settings::Style; -use tabled::Table; - -/// A sectioned table. -/// -/// A sectioned table allows sections and subsections to be defined, with each -/// section having a title and a list of rows in that section. The section -/// headers and other rows can break standard table conventions. -/// -/// There are two kinds of special rows: -/// -/// 1. Headings: rows that span all columns. -/// 2. Spanned rows: also rows that span all columns, but not as headings. -/// -/// This builder does not currently automatically indent sections or records -- -/// that can be done in the future, though it has to be done with some care. -#[derive(Debug)] -pub(crate) struct StBuilder { - builder: Builder, - // Rows that are marked off with ---- on both sides. - header_rows: Vec, - // Heading rows that span all columns. - headings: Vec<(HeadingSpacing, usize)>, - // Other rows that span all columns. - spanned_rows: Vec, -} - -impl StBuilder { - pub(crate) fn new() -> Self { - let builder = Builder::new(); - - Self { - builder, - header_rows: Vec::new(), - headings: Vec::new(), - spanned_rows: Vec::new(), - } - } - - /// Adds a header row to the table. - /// - /// This row contains column titles, along with *two* initial columns of - /// padding. The border will extend to the first column but not the second - /// one. - pub(crate) fn push_header_row(&mut self, row: Vec) { - self.header_rows.push(self.builder.count_records()); - self.push_record(row); - } - - /// Adds a record to the table. - pub(crate) fn push_record(&mut self, row: Vec) { - self.builder.push_record(row); - } - - /// Makes a new section of the table. - /// - /// This section will not be added to the table unless at least one row is - /// added to it, either directly or via nested sections. - pub(crate) fn make_section( - &mut self, - spacing: SectionSpacing, - heading: String, - cb: impl FnOnce(&mut StSectionBuilder), - ) { - let mut section = StSectionBuilder::from_builder( - self, - spacing.resolve(self.headings.is_empty()), - heading, - ); - cb(&mut section); - section.finish_with_root(self); - } - - /// Does the final build to produce a [`Table`]. - pub(crate) fn build(mut self) -> Table { - // Insert a column between 0 and 1 to enable header borders to be - // properly aligned with the rest of the text. - self.builder.insert_column( - 1, - iter::repeat("").take(self.builder.count_records()), - ); - - let mut table = self.builder.build(); - table - .with(Style::blank()) - .with( - // Columns 0 and 1 (indent/gutter) should not have any border - // and padding. - Modify::new(Columns::new(0..=1)) - .with(Border::empty()) - .with(Padding::zero()), - ) - .with( - Modify::new(Columns::single(2)) - // Column 2 (first column of actual data) should not have - // left padding. - .with(Padding::new(0, 1, 0, 0)), - ) - .with( - Modify::new(Columns::last()) - // Rightmost column should have no border and padding. - .with(Border::empty()) - .with(Padding::zero()), - ); - apply_normal_row_settings( - &mut table, - self.header_rows - .iter() - .copied() - .chain(self.headings.iter().map(|(_, i)| *i)) - .chain(self.spanned_rows.iter().copied()) - .collect(), - ); - apply_header_row_settings(&mut table, &self.header_rows); - apply_heading_settings(&mut table, &self.headings); - apply_spanned_row_settings(&mut table, &self.spanned_rows); - - table - } -} - -/// A part of a sectioned table. -/// -/// Created by [`StBuilder::make_section`] or -/// [`StNestedBuilder::make_subsection`]. -#[derive(Debug)] -pub(crate) struct StSectionBuilder { - start_index: usize, - spacing: HeadingSpacing, - heading: String, - rows: Vec>, - // Indexes for special rows, stored as absolute indexes wrt the overall - // zone table (i.e. start_index + 1 + index in rows). - nested_headings: Vec<(HeadingSpacing, usize)>, - spanned_rows: Vec, -} - -impl StSectionBuilder { - fn from_builder( - builder: &StBuilder, - spacing: HeadingSpacing, - heading: String, - ) -> Self { - let start_index = builder.builder.count_records(); - Self { - start_index, - spacing, - heading, - rows: Vec::new(), - nested_headings: Vec::new(), - spanned_rows: Vec::new(), - } - } - - pub(crate) fn is_empty(&self) -> bool { - self.rows.is_empty() - } - - pub(crate) fn push_record(&mut self, row: Vec) { - self.rows.push(row); - } - - pub(crate) fn push_spanned_row(&mut self, row: String) { - self.spanned_rows.push(self.next_row()); - self.rows.push(vec![row]); - } - - pub(crate) fn push_nested_heading( - &mut self, - spacing: SectionSpacing, - heading: String, - ) { - self.nested_headings.push(( - spacing.resolve(self.nested_headings.is_empty()), - self.next_row(), - )); - self.rows.push(vec![heading]); - } - - /// Makes a new subsection of this section. - /// - /// This subsection will not be added to the table unless at least one row - /// is added to it, either directly or via nested sections. - pub(crate) fn make_subsection( - &mut self, - spacing: SectionSpacing, - heading: String, - cb: impl FnOnce(&mut Self), - ) { - let mut subsection = Self { - start_index: self.next_row(), - spacing: spacing.resolve(self.nested_headings.is_empty()), - heading, - rows: Vec::new(), - nested_headings: Vec::new(), - spanned_rows: Vec::new(), - }; - cb(&mut subsection); - subsection.finish_with_parent(self); - } - - fn next_row(&self) -> usize { - // +1 to account for the heading row. - self.start_index + 1 + self.rows.len() - } - - fn finish_with_root(self, root: &mut StBuilder) { - if !self.rows.is_empty() { - // Push all the indexes. - root.headings.push((self.spacing, self.start_index)); - root.headings.extend(self.nested_headings); - root.spanned_rows.extend(self.spanned_rows); - - // Push all the rows. - root.push_record(vec![self.heading]); - for row in self.rows { - root.push_record(row); - } - } - } - - fn finish_with_parent(self, parent: &mut StSectionBuilder) { - if !self.rows.is_empty() { - // Push all the indexes. - parent.nested_headings.push((self.spacing, self.start_index)); - parent.nested_headings.extend(self.nested_headings); - parent.spanned_rows.extend(self.spanned_rows); - - // Push all the rows. - parent.rows.push(vec![self.heading]); - parent.rows.extend(self.rows); - } - } -} - -/// Spacing for sections. -#[derive(Copy, Clone, Debug)] -pub(crate) enum SectionSpacing { - /// Always add a line of spacing above the section heading. - /// - /// There will always be one row of padding above the heading. - Always, - - /// Only add a line of spacing if this isn't the first heading in the - /// series. - IfNotFirst, - - /// Do not add a line of spacing above the heading. - Never, -} - -impl SectionSpacing { - fn resolve(self, is_empty: bool) -> HeadingSpacing { - match (self, is_empty) { - (SectionSpacing::Always, _) => HeadingSpacing::Yes, - (SectionSpacing::IfNotFirst, true) => HeadingSpacing::No, - (SectionSpacing::IfNotFirst, false) => HeadingSpacing::Yes, - (SectionSpacing::Never, _) => HeadingSpacing::No, - } - } -} - -/// Spacing for headings -- a resolved form of [`SectionSpacing`]. -#[derive(Copy, Clone, Debug)] -enum HeadingSpacing { - /// Add a line of padding above the heading. - Yes, - - /// Do not add a line of padding above the heading. - No, -} - -fn apply_normal_row_settings(table: &mut Table, special_rows: HashSet) { - for row in 0..table.count_rows() { - if special_rows.contains(&row) { - continue; - } - - table.with( - Modify::new((row, 0)) - // Adjust the first column to span 2 (the extra indent). - .with(ColumnSpan::new(2)), - ); - } -} - -fn apply_header_row_settings(table: &mut Table, header_rows: &[usize]) { - for &hr in header_rows { - table.with( - Modify::new(Rows::single(hr).intersect(Columns::new(1..))) - // Column 1 onwards (everything after the initial indent) have - // borders. - .with(Border::new( - // top/bottom - Some('-'), - Some('-'), - // no left/right - None, - None, - // corners - Some('-'), - Some('-'), - Some('-'), - Some('-'), - )), - ); - } -} - -fn apply_heading_settings( - table: &mut Table, - headings: &[(HeadingSpacing, usize)], -) { - for &(kind, h) in headings { - let padding = match kind { - HeadingSpacing::Yes => Padding::new(0, 0, 1, 0), - HeadingSpacing::No => Padding::new(0, 0, 0, 0), - }; - - table.with( - Modify::new((h, 0)) - // Adjust each heading row to span the whole row. - .with(ColumnSpan::max()) - .with(padding), - ); - } -} - -fn apply_spanned_row_settings(table: &mut Table, spanned_rows: &[usize]) { - for &sr in spanned_rows { - table.with( - Modify::new((sr, 0)) - // Adjust each spanned row to span the whole row. - .with(ColumnSpan::max()), - ); - } -} diff --git a/oximeter/collector/tests/output/self-stat-schema.json b/oximeter/collector/tests/output/self-stat-schema.json index 1b18362a263..4a56a81c867 100644 --- a/oximeter/collector/tests/output/self-stat-schema.json +++ b/oximeter/collector/tests/output/self-stat-schema.json @@ -39,7 +39,7 @@ } ], "datum_type": "cumulative_u64", - "created": "2024-05-03T22:37:51.326086935Z" + "created": "2024-05-11T09:41:23.361298682Z" }, "oximeter_collector:failed_collections": { "timeseries_name": "oximeter_collector:failed_collections", @@ -86,6 +86,6 @@ } ], "datum_type": "cumulative_u64", - "created": "2024-05-03T22:37:51.327389025Z" + "created": "2024-05-11T09:41:23.361907436Z" } -} \ No newline at end of file +} diff --git a/sled-storage/src/disk.rs b/sled-storage/src/disk.rs index c9e848559eb..608d3678da9 100644 --- a/sled-storage/src/disk.rs +++ b/sled-storage/src/disk.rs @@ -25,7 +25,16 @@ use crate::config::MountConfig; use crate::dataset; #[derive( - Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, + Clone, + Debug, + Deserialize, + Serialize, + JsonSchema, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, )] pub struct OmicronPhysicalDiskConfig { pub identity: DiskIdentity, From 316a7d21b105401a9663c3bc5b8de149820fedb2 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Thu, 16 May 2024 11:26:24 -0700 Subject: [PATCH 18/37] Update Rust crate thiserror to v1.0.60 (#5775) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9ead6007ede..ca2a2e519c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9535,18 +9535,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.59" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" +checksum = "579e9083ca58dd9dcf91a9923bb9054071b9ebbd800b342194c9feb0ee89fc18" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.59" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" +checksum = "e2470041c06ec3ac1ab38d0356a6119054dedaea53e12fbefc0de730a1c08524" dependencies = [ "proc-macro2", "quote", From 12391a90a15d6d40754e05bb21fca533add0b540 Mon Sep 17 00:00:00 2001 From: Eliza Weisman Date: Thu, 16 May 2024 12:56:51 -0700 Subject: [PATCH 19/37] [nexus] really deflake test_instance_watcher_metrics (#5784) --- nexus/tests/integration_tests/metrics.rs | 54 +++++++++++++++++------- 1 file changed, 38 insertions(+), 16 deletions(-) diff --git a/nexus/tests/integration_tests/metrics.rs b/nexus/tests/integration_tests/metrics.rs index abcc7f1c75d..5fbff216d94 100644 --- a/nexus/tests/integration_tests/metrics.rs +++ b/nexus/tests/integration_tests/metrics.rs @@ -318,6 +318,28 @@ pub async fn timeseries_query( async fn test_instance_watcher_metrics( cptestctx: &ControlPlaneTestContext, ) { + macro_rules! assert_gte { + ($a:expr, $b:expr) => {{ + let a = $a; + let b = $b; + assert!( + $a >= $b, + concat!( + "assertion failed: ", + stringify!($a), + " >= ", + stringify!($b), + ", ", + stringify!($a), + " = {:?}, ", + stringify!($b), + " = {:?}", + ), + a, + b + ); + }}; + } use oximeter::types::FieldValue; const INSTANCE_ID_FIELD: &str = "instance_id"; const STATE_FIELD: &str = "state"; @@ -462,7 +484,7 @@ async fn test_instance_watcher_metrics( .find(|t| t.name() == "virtual_machine:check") .expect("missing virtual_machine:check"); let ts = dbg!(count_state(&checks, instance1_uuid, STATE_STARTING)); - assert_eq!(ts, 1); + assert_gte!(ts, 1); // okay, make another instance eprintln!("--- creating instance 2 ---"); @@ -479,8 +501,8 @@ async fn test_instance_watcher_metrics( .expect("missing virtual_machine:check"); let ts1 = dbg!(count_state(&checks, instance1_uuid, STATE_STARTING)); let ts2 = dbg!(count_state(&checks, instance2_uuid, STATE_STARTING)); - assert_eq!(ts1, 2); - assert_eq!(ts2, 1); + assert_gte!(ts1, 2); + assert_gte!(ts2, 1); // poke instance 1 to get it into the running state eprintln!("--- starting instance 1 ---"); @@ -498,9 +520,9 @@ async fn test_instance_watcher_metrics( dbg!(count_state(&checks, instance1_uuid, STATE_STARTING)); let ts1_running = dbg!(count_state(&checks, instance1_uuid, STATE_RUNNING)); let ts2 = dbg!(count_state(&checks, instance2_uuid, STATE_STARTING)); - assert_eq!(ts1_starting, 2); - assert_eq!(ts1_running, 1); - assert_eq!(ts2, 2); + assert_gte!(ts1_starting, 2); + assert_gte!(ts1_running, 1); + assert_gte!(ts2, 2); // poke instance 2 to get it into the Running state. eprintln!("--- starting instance 2 ---"); @@ -528,11 +550,11 @@ async fn test_instance_watcher_metrics( let ts2_starting = dbg!(count_state(&checks, instance2_uuid, STATE_STARTING)); let ts2_running = dbg!(count_state(&checks, instance2_uuid, STATE_RUNNING)); - assert_eq!(ts1_starting, 2); - assert_eq!(ts1_running, 1); - assert_eq!(ts1_stopping, 1); - assert_eq!(ts2_starting, 2); - assert_eq!(ts2_running, 1); + assert_gte!(ts1_starting, 2); + assert_gte!(ts1_running, 1); + assert_gte!(ts1_stopping, 1); + assert_gte!(ts2_starting, 2); + assert_gte!(ts2_running, 1); // simulate instance 1 completing its stop, which will remove it from the // set of active instances in CRDB. now, it won't be checked again. @@ -556,11 +578,11 @@ async fn test_instance_watcher_metrics( let ts2_starting = dbg!(count_state(&checks, instance2_uuid, STATE_STARTING)); let ts2_running = dbg!(count_state(&checks, instance2_uuid, STATE_RUNNING)); - assert_eq!(ts1_starting, 2); - assert_eq!(ts1_running, 1); - assert_eq!(ts1_stopping, 1); - assert_eq!(ts2_starting, 2); - assert_eq!(ts2_running, 2); + assert_gte!(ts1_starting, 2); + assert_gte!(ts1_running, 1); + assert_gte!(ts1_stopping, 1); + assert_gte!(ts2_starting, 2); + assert_gte!(ts2_running, 2); } /// Wait until a producer is registered with Oximeter. From 5d5aab50d593dcab01028d026bd135e33486fda5 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Thu, 16 May 2024 16:31:34 -0400 Subject: [PATCH 20/37] Blueprint builder: Guard against unknown external networking inputs (#5776) This started as a small cleanup to make `builder.rs` a little less noisy (the first commit, that just moves all the external networking "what IPs are in use and what IP should I pick next" logic into a separate module). As I was doing that, though, I noticed we only look at the parent blueprint when deciding what IPs are used, and we completely ignore `PlanningInput` (which tells us what IPs were in use in the database "recently"). I _think_ we should never have an IP or NIC in the database that isn't also in the parent blueprint: blueprints are the source of how IPs/NICs are created, and we shouldn't prune zones from the blueprint until we've confirmed that external networking records for those zones have been deleted. I made this a planning error: if we get an input that reports external IPs or NICs that the parent blueprint doesn't know about, we bail out and fail to plan. Assuming all of the above is correct (please correct me if any of it seems wrong!), it is still possible for this to fail, I think, if we had a sequence like this: 1. Nexus A generates a `PlanningInput` by reading from CRDB 2. Nexus B executes on a target blueprint that removes IPs/NICs from CRDB 3. Nexus B regenerates a new blueprint and prunes the zone(s) associated with the IPs/NICs from step 2 4. Nexus B makes this new blueprint the target 5. Nexus A attempts to generate a new blueprint with its `PlanningInput` from step 1 This isn't possible today for at least a couple reasons: we don't do prune zones as in step 3 at all, and the timing for steps 3-5 probably require automated blueprint regeneration. I think it could eventually be possible, but seems fine? Nexus A would fail to plan in step 5, but it would succeed the next time it tried (because it would generate a new `PlanningInput` that reflected the changes from step 2). --- .../planning/src/blueprint_builder/builder.rs | 300 ++++------------ .../blueprint_builder/external_networking.rs | 331 ++++++++++++++++++ .../planning/src/blueprint_builder/mod.rs | 1 + nexus/reconfigurator/planning/src/planner.rs | 39 +++ .../types/src/deployment/network_resources.rs | 24 +- nexus/types/src/deployment/planning_input.rs | 6 + nexus/types/src/deployment/tri_map.rs | 4 + 7 files changed, 475 insertions(+), 230 deletions(-) create mode 100644 nexus/reconfigurator/planning/src/blueprint_builder/external_networking.rs diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 8c8f4f3e29c..45aea754734 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -7,12 +7,9 @@ use crate::ip_allocator::IpAllocator; use crate::planner::ZoneExpungeReason; use anyhow::anyhow; -use anyhow::bail; -use debug_ignore::DebugIgnore; use internal_dns::config::Host; use internal_dns::config::Zone; use ipnet::IpAdd; -use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use nexus_inventory::now_db_precision; use nexus_types::deployment::blueprint_zone_type; use nexus_types::deployment::Blueprint; @@ -35,13 +32,9 @@ use omicron_common::address::get_internal_dns_server_addresses; use omicron_common::address::get_sled_address; use omicron_common::address::get_switch_zone_address; use omicron_common::address::CP_SERVICES_RESERVED_ADDRESSES; -use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; -use omicron_common::address::NEXUS_OPTE_IPV6_SUBNET; use omicron_common::address::NTP_PORT; use omicron_common::address::SLED_RESERVED_ADDRESSES; use omicron_common::api::external::Generation; -use omicron_common::api::external::IpNet; -use omicron_common::api::external::MacAddr; use omicron_common::api::external::Vni; use omicron_common::api::internal::shared::NetworkInterface; use omicron_common::api::internal::shared::NetworkInterfaceKind; @@ -64,13 +57,14 @@ use std::collections::BTreeSet; use std::collections::HashSet; use std::hash::Hash; use std::net::IpAddr; -use std::net::Ipv4Addr; use std::net::Ipv6Addr; use std::net::SocketAddrV6; use thiserror::Error; use typed_rng::TypedUuidRng; use typed_rng::UuidRng; +use super::external_networking::BuilderExternalNetworking; +use super::external_networking::ExternalNetworkingChoice; use super::zones::is_already_expunged; use super::zones::BuilderZoneState; use super::zones::BuilderZonesConfig; @@ -142,9 +136,10 @@ pub struct BlueprintBuilder<'a> { /// previous blueprint, on which this one will be based parent_blueprint: &'a Blueprint, - // These fields are used to allocate resources from sleds. + // These fields are used to allocate resources for sleds. input: &'a PlanningInput, sled_ip_allocators: BTreeMap, + external_networking: BuilderExternalNetworking<'a>, // These fields will become part of the final blueprint. See the // corresponding fields in `Blueprint`. @@ -155,16 +150,6 @@ pub struct BlueprintBuilder<'a> { creator: String, comments: Vec, - // These fields mirror how RSS chooses addresses for zone NICs. - nexus_v4_ips: AvailableIterator<'static, Ipv4Addr>, - nexus_v6_ips: AvailableIterator<'static, Ipv6Addr>, - - // Iterator of available external IPs for service zones - available_external_ips: AvailableIterator<'a, IpAddr>, - - // Iterator of available MAC addresses in the system address range - available_system_macs: AvailableIterator<'a, MacAddr>, - // Random number generator for new UUIDs rng: BlueprintBuilderRng, } @@ -242,111 +227,8 @@ impl<'a> BlueprintBuilder<'a> { "parent_id" => parent_blueprint.id.to_string(), )); - // Scan through the parent blueprint and build several sets of "used - // resources". When adding new control plane zones to a sled, we may - // need to allocate new resources to that zone. However, allocation at - // this point is entirely optimistic and theoretical: our caller may - // discard the blueprint we create without ever making it the new - // target, or it might be an arbitrarily long time before it becomes - // the target. We need to be able to make allocation decisions that we - // expect the blueprint executor to be able to realize successfully if - // and when we become the target, but we cannot _actually_ perform - // resource allocation. - // - // To do this, we look at our parent blueprint's used resources, and - // then choose new resources that aren't already in use (if possible; - // if we need to allocate a new resource and the parent blueprint - // appears to be using all the resources of that kind, our blueprint - // generation will fail). - // - // For example, RSS assigns Nexus NIC IPs by stepping through a list of - // addresses based on `NEXUS_OPTE_IPVx_SUBNET` (as in the iterators - // below). We use the same list of addresses, but additionally need to - // filter out the existing IPs for any Nexus instances that already - // exist. - // - // Note that by building these iterators up front based on - // `parent_blueprint`, we cannot reuse resources in a case where we - // remove a zone that used a resource and then add another zone that - // wants the same kind of resource. That is mostly okay, but there are - // some cases in which we may have to do that -- particularly external - // DNS zones, which tend to have a small number of fixed IPs. Solving - // that is a TODO. - // - // Also note that currently, we don't perform any kind of garbage - // collection on sleds and zones that no longer have any attached - // resources. Once a sled or zone is marked expunged, it will always - // stay in that state. - // https://github.com/oxidecomputer/omicron/issues/5552 tracks - // implementing this kind of garbage collection, and we should do it - // very soon. - - let mut existing_nexus_v4_ips: HashSet = HashSet::new(); - let mut existing_nexus_v6_ips: HashSet = HashSet::new(); - let mut used_external_ips: HashSet = HashSet::new(); - let mut used_macs: HashSet = HashSet::new(); - - for (_, z) in - parent_blueprint.all_omicron_zones(BlueprintZoneFilter::All) - { - let zone_type = &z.zone_type; - if let BlueprintZoneType::Nexus(nexus) = zone_type { - match nexus.nic.ip { - IpAddr::V4(ip) => { - if !existing_nexus_v4_ips.insert(ip) { - bail!("duplicate Nexus NIC IP: {ip}"); - } - } - IpAddr::V6(ip) => { - if !existing_nexus_v6_ips.insert(ip) { - bail!("duplicate Nexus NIC IP: {ip}"); - } - } - } - } - - if let Some((external_ip, nic)) = zone_type.external_networking() { - // For the test suite, ignore localhost. It gets reused many - // times and that's okay. We don't expect to see localhost - // outside the test suite. - if !external_ip.ip().is_loopback() - && !used_external_ips.insert(external_ip.ip()) - { - bail!("duplicate external IP: {external_ip:?}"); - } - - if !used_macs.insert(nic.mac) { - bail!("duplicate service vNIC MAC: {}", nic.mac); - } - } - } - - // TODO-performance Building these iterators as "walk through the list - // and skip anything we've used already" is fine as long as we're - // talking about a small number of resources (e.g., single-digit number - // of Nexus instances), but wouldn't be ideal if we have many resources - // we need to skip. We could do something smarter here based on the sets - // of used resources we built above if needed. - let nexus_v4_ips = AvailableIterator::new( - NEXUS_OPTE_IPV4_SUBNET - .0 - .iter() - .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES), - existing_nexus_v4_ips, - ); - let nexus_v6_ips = AvailableIterator::new( - NEXUS_OPTE_IPV6_SUBNET - .0 - .iter() - .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES), - existing_nexus_v6_ips, - ); - let available_external_ips = AvailableIterator::new( - input.service_ip_pool_ranges().iter().flat_map(|r| r.iter()), - used_external_ips, - ); - let available_system_macs = - AvailableIterator::new(MacAddr::iter_system(), used_macs); + let external_networking = + BuilderExternalNetworking::new(parent_blueprint, input)?; // Prefer the sled state from our parent blueprint for sleds // that were in it; there may be new sleds in `input`, in which @@ -378,15 +260,12 @@ impl<'a> BlueprintBuilder<'a> { parent_blueprint, input, sled_ip_allocators: BTreeMap::new(), + external_networking, zones: BlueprintZonesBuilder::new(parent_blueprint), disks: BlueprintDisksBuilder::new(parent_blueprint), sled_state, creator: creator.to_owned(), comments: Vec::new(), - nexus_v4_ips, - nexus_v6_ips, - available_external_ips, - available_system_macs, rng: BlueprintBuilderRng::new(), }) } @@ -806,44 +685,27 @@ impl<'a> BlueprintBuilder<'a> { for _ in 0..num_nexus_to_add { let nexus_id = self.rng.zone_rng.next(); + let ExternalNetworkingChoice { + external_ip, + nic_ip, + nic_subnet, + nic_mac, + } = self.external_networking.for_new_nexus()?; let external_ip = OmicronZoneExternalFloatingIp { id: self.rng.external_ip_rng.next(), - ip: self - .available_external_ips - .next() - .ok_or(Error::NoExternalServiceIpAvailable)?, + ip: external_ip, }; let nic = { - let (ip, subnet) = match external_ip.ip { - IpAddr::V4(_) => ( - self.nexus_v4_ips - .next() - .ok_or(Error::ExhaustedNexusIps)? - .into(), - IpNet::from(*NEXUS_OPTE_IPV4_SUBNET), - ), - IpAddr::V6(_) => ( - self.nexus_v6_ips - .next() - .ok_or(Error::ExhaustedNexusIps)? - .into(), - IpNet::from(*NEXUS_OPTE_IPV6_SUBNET), - ), - }; - let mac = self - .available_system_macs - .next() - .ok_or(Error::NoSystemMacAddressAvailable)?; NetworkInterface { id: self.rng.network_interface_rng.next(), kind: NetworkInterfaceKind::Service { id: nexus_id.into_untyped_uuid(), }, name: format!("nexus-{nexus_id}").parse().unwrap(), - ip, - mac, - subnet, + ip: nic_ip, + mac: nic_mac, + subnet: nic_subnet, vni: Vni::SERVICES_VNI, primary: true, slot: 0, @@ -942,45 +804,6 @@ impl<'a> BlueprintBuilder<'a> { } } -/// Combines a base iterator with an `in_use` set, filtering out any elements -/// that are in the "in_use" set. -/// -/// This can be done with a chained `.filter` on the iterator, but -/// `AvailableIterator` also allows for inspection of the `in_use` set. -/// -/// Note that this is a stateful iterator -- i.e. it implements `Iterator`, not -/// `IntoIterator`. That's what we currently need in the planner. -#[derive(Debug)] -pub struct AvailableIterator<'a, T> { - base: DebugIgnore + Send + 'a>>, - in_use: HashSet, -} - -impl<'a, T: Hash + Eq> AvailableIterator<'a, T> { - /// Creates a new `AvailableIterator` from a base iterator and a set of - /// elements that are in use. - pub fn new(base: I, in_use: impl IntoIterator) -> Self - where - I: Iterator + Send + 'a, - { - let in_use = in_use.into_iter().collect(); - AvailableIterator { base: DebugIgnore(Box::new(base)), in_use } - } - - /// Returns the in-use set. - pub fn in_use(&self) -> &HashSet { - &self.in_use - } -} - -impl Iterator for AvailableIterator<'_, T> { - type Item = T; - - fn next(&mut self) -> Option { - self.base.find(|item| !self.in_use.contains(item)) - } -} - #[derive(Debug)] struct BlueprintBuilderRng { // Have separate RNGs for the different kinds of UUIDs we might add, @@ -1230,11 +1053,12 @@ pub mod test { use expectorate::assert_contents; use nexus_types::deployment::BlueprintOrCollectionZoneConfig; use nexus_types::deployment::BlueprintZoneFilter; + use nexus_types::deployment::OmicronZoneNetworkResources; use nexus_types::external_api::views::SledPolicy; use omicron_common::address::IpRange; use omicron_test_utils::dev::test_setup_log; use std::collections::BTreeSet; - use test_strategy::proptest; + use std::mem; pub const DEFAULT_N_SLEDS: usize = 3; @@ -1535,6 +1359,14 @@ pub mod test { static TEST_NAME: &str = "blueprint_builder_test_add_physical_disks"; let logctx = test_setup_log(TEST_NAME); let (_, input, _) = example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); + let input = { + // Clear out the external networking records from `input`, since + // we're building an empty blueprint. + let mut builder = input.into_builder(); + *builder.network_resources_mut() = + OmicronZoneNetworkResources::new(); + builder.build() + }; // Start with an empty blueprint (sleds with no zones). let parent = BlueprintBuilder::build_empty_with_sleds_seeded( @@ -1583,6 +1415,14 @@ pub mod test { // Discard the example blueprint and start with an empty one. let (collection, input, _) = example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); + let input = { + // Clear out the external networking records from `input`, since + // we're building an empty blueprint. + let mut builder = input.into_builder(); + *builder.network_resources_mut() = + OmicronZoneNetworkResources::new(); + builder.build() + }; let parent = BlueprintBuilder::build_empty_with_sleds_seeded( input.all_sled_ids(SledFilter::Commissioned), "test", @@ -1624,7 +1464,7 @@ pub mod test { fn test_add_nexus_error_cases() { static TEST_NAME: &str = "blueprint_builder_test_add_nexus_error_cases"; let logctx = test_setup_log(TEST_NAME); - let (mut collection, input, mut parent) = + let (mut collection, mut input, mut parent) = example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); // Remove the Nexus zone from one of the sleds so that @@ -1638,12 +1478,51 @@ pub mod test { if zones.zones.zones.len() < nzones_before_retain { selected_sled_id = Some(*sled_id); // Also remove this zone from the blueprint. + let mut removed_nexus = None; parent .blueprint_zones .get_mut(sled_id) .expect("missing sled") .zones - .retain(|z| !z.zone_type.is_nexus()); + .retain(|z| match &z.zone_type { + BlueprintZoneType::Nexus(z) => { + removed_nexus = Some(z.clone()); + false + } + _ => true, + }); + let removed_nexus = + removed_nexus.expect("removed Nexus from blueprint"); + + // Also remove this Nexus's external networking resources + // from `input`. + let mut builder = input.into_builder(); + let mut new_network_resources = + OmicronZoneNetworkResources::new(); + let old_network_resources = builder.network_resources_mut(); + for ip in old_network_resources.omicron_zone_external_ips() + { + if ip.ip.id() != removed_nexus.external_ip.id { + new_network_resources + .add_external_ip(ip.zone_id, ip.ip) + .expect("copied IP to new input"); + } + } + for nic in old_network_resources.omicron_zone_nics() { + if nic.nic.id.into_untyped_uuid() + != removed_nexus.nic.id + { + new_network_resources + .add_nic(nic.zone_id, nic.nic) + .expect("copied NIC to new input"); + } + } + mem::swap( + old_network_resources, + &mut new_network_resources, + ); + input = builder.build(); + break; } } @@ -1887,31 +1766,4 @@ pub mod test { logctx.cleanup_successful(); } - - /// Test that `AvailableIterator` correctly filters out items that are in - /// use. - #[proptest] - fn test_available_iterator(items: HashSet<(i32, bool)>) { - let mut in_use_map = HashSet::new(); - let mut expected_available = Vec::new(); - let items: Vec<_> = items - .into_iter() - .map(|(item, in_use)| { - if in_use { - in_use_map.insert(item); - } else { - expected_available.push(item); - } - item - }) - .collect(); - - let available = AvailableIterator::new(items.into_iter(), in_use_map); - let actual_available = available.collect::>(); - - assert_eq!( - expected_available, actual_available, - "available items match" - ); - } } diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/external_networking.rs b/nexus/reconfigurator/planning/src/blueprint_builder/external_networking.rs new file mode 100644 index 00000000000..b9100f518d0 --- /dev/null +++ b/nexus/reconfigurator/planning/src/blueprint_builder/external_networking.rs @@ -0,0 +1,331 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::Error; +use anyhow::bail; +use debug_ignore::DebugIgnore; +use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; +use nexus_types::deployment::Blueprint; +use nexus_types::deployment::BlueprintZoneFilter; +use nexus_types::deployment::BlueprintZoneType; +use nexus_types::deployment::PlanningInput; +use omicron_common::address::DNS_OPTE_IPV4_SUBNET; +use omicron_common::address::DNS_OPTE_IPV6_SUBNET; +use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; +use omicron_common::address::NEXUS_OPTE_IPV6_SUBNET; +use omicron_common::address::NTP_OPTE_IPV4_SUBNET; +use omicron_common::address::NTP_OPTE_IPV6_SUBNET; +use omicron_common::api::external::IpNet; +use omicron_common::api::external::MacAddr; +use std::collections::HashSet; +use std::hash::Hash; +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::Ipv6Addr; + +#[derive(Debug)] +pub(super) struct BuilderExternalNetworking<'a> { + // These fields mirror how RSS chooses addresses for zone NICs. + nexus_v4_ips: AvailableIterator<'static, Ipv4Addr>, + nexus_v6_ips: AvailableIterator<'static, Ipv6Addr>, + + // Iterator of available external IPs for service zones + available_external_ips: AvailableIterator<'a, IpAddr>, + + // Iterator of available MAC addresses in the system address range + available_system_macs: AvailableIterator<'a, MacAddr>, +} + +impl<'a> BuilderExternalNetworking<'a> { + pub(super) fn new( + parent_blueprint: &'a Blueprint, + input: &'a PlanningInput, + ) -> anyhow::Result { + // Scan through the parent blueprint and build several sets of "used + // resources". When adding new control plane zones to a sled, we may + // need to allocate new resources to that zone. However, allocation at + // this point is entirely optimistic and theoretical: our caller may + // discard the blueprint we create without ever making it the new + // target, or it might be an arbitrarily long time before it becomes + // the target. We need to be able to make allocation decisions that we + // expect the blueprint executor to be able to realize successfully if + // and when we become the target, but we cannot _actually_ perform + // resource allocation. + // + // To do this, we look at our parent blueprint's used resources, and + // then choose new resources that aren't already in use (if possible; + // if we need to allocate a new resource and the parent blueprint + // appears to be using all the resources of that kind, our blueprint + // generation will fail). + // + // For example, RSS assigns Nexus NIC IPs by stepping through a list of + // addresses based on `NEXUS_OPTE_IPVx_SUBNET` (as in the iterators + // below). We use the same list of addresses, but additionally need to + // filter out the existing IPs for any Nexus instances that already + // exist. + // + // Note that by building these iterators up front based on + // `parent_blueprint`, we cannot reuse resources in a case where we + // remove a zone that used a resource and then add another zone that + // wants the same kind of resource. That is mostly okay, but there are + // some cases in which we may have to do that -- particularly external + // DNS zones, which tend to have a small number of fixed IPs. Solving + // that is a TODO. + // + // Also note that currently, we don't perform any kind of garbage + // collection on sleds and zones that no longer have any attached + // resources. Once a sled or zone is marked expunged, it will always + // stay in that state. + // https://github.com/oxidecomputer/omicron/issues/5552 tracks + // implementing this kind of garbage collection, and we should do it + // very soon. + + let mut existing_nexus_v4_ips: HashSet = HashSet::new(); + let mut existing_nexus_v6_ips: HashSet = HashSet::new(); + let mut used_external_ips: HashSet = HashSet::new(); + let mut used_macs: HashSet = HashSet::new(); + + for (_, z) in + parent_blueprint.all_omicron_zones(BlueprintZoneFilter::All) + { + let zone_type = &z.zone_type; + if let BlueprintZoneType::Nexus(nexus) = zone_type { + match nexus.nic.ip { + IpAddr::V4(ip) => { + if !existing_nexus_v4_ips.insert(ip) { + bail!("duplicate Nexus NIC IP: {ip}"); + } + } + IpAddr::V6(ip) => { + if !existing_nexus_v6_ips.insert(ip) { + bail!("duplicate Nexus NIC IP: {ip}"); + } + } + } + } + + if let Some((external_ip, nic)) = zone_type.external_networking() { + // For the test suite, ignore localhost. It gets reused many + // times and that's okay. We don't expect to see localhost + // outside the test suite. + if !external_ip.ip().is_loopback() + && !used_external_ips.insert(external_ip.ip()) + { + bail!("duplicate external IP: {external_ip:?}"); + } + + if !used_macs.insert(nic.mac) { + bail!("duplicate service vNIC MAC: {}", nic.mac); + } + } + } + + // Check the planning input: there shouldn't be any external networking + // resources in the database (the source of `input`) that we don't know + // about from the parent blueprint. + for external_ip_entry in + input.network_resources().omicron_zone_external_ips() + { + // As above, ignore localhost (used by the test suite). + if external_ip_entry.ip.ip().is_loopback() { + continue; + } + if !used_external_ips.contains(&external_ip_entry.ip.ip()) { + bail!( + "planning input contains unexpected external IP \ + (IP not found in parent blueprint): {external_ip_entry:?}" + ); + } + } + for nic_entry in input.network_resources().omicron_zone_nics() { + if !used_macs.contains(&nic_entry.nic.mac) { + bail!( + "planning input contains unexpected NIC \ + (MAC not found in parent blueprint): {nic_entry:?}" + ); + } + match nic_entry.nic.ip { + IpAddr::V4(ip) if NEXUS_OPTE_IPV4_SUBNET.contains(ip) => { + if !existing_nexus_v4_ips.contains(&ip) { + bail!( + "planning input contains unexpected NIC \ + (IP not found in parent blueprint): {nic_entry:?}" + ); + } + } + IpAddr::V4(ip) if NTP_OPTE_IPV4_SUBNET.contains(ip) => { + // TODO check existing_ntp_v4_ips, once it exists + } + IpAddr::V4(ip) if DNS_OPTE_IPV4_SUBNET.contains(ip) => { + // TODO check existing_dns_v4_ips, once it exists + } + IpAddr::V6(ip) if NEXUS_OPTE_IPV6_SUBNET.contains(ip) => { + if !existing_nexus_v6_ips.contains(&ip) { + bail!( + "planning input contains unexpected NIC \ + (IP not found in parent blueprint): {nic_entry:?}" + ); + } + } + IpAddr::V6(ip) if NTP_OPTE_IPV6_SUBNET.contains(ip) => { + // TODO check existing_ntp_v6_ips, once it exists + } + IpAddr::V6(ip) if DNS_OPTE_IPV6_SUBNET.contains(ip) => { + // TODO check existing_dns_v6_ips, once it exists + } + _ => { + bail!( + "planning input contains unexpected NIC \ + (IP not contained in known OPTE subnet): {nic_entry:?}" + ) + } + } + } + + // TODO-performance Building these iterators as "walk through the list + // and skip anything we've used already" is fine as long as we're + // talking about a small number of resources (e.g., single-digit number + // of Nexus instances), but wouldn't be ideal if we have many resources + // we need to skip. We could do something smarter here based on the sets + // of used resources we built above if needed. + let nexus_v4_ips = AvailableIterator::new( + NEXUS_OPTE_IPV4_SUBNET + .0 + .iter() + .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES), + existing_nexus_v4_ips, + ); + let nexus_v6_ips = AvailableIterator::new( + NEXUS_OPTE_IPV6_SUBNET + .0 + .iter() + .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES), + existing_nexus_v6_ips, + ); + let available_external_ips = AvailableIterator::new( + input.service_ip_pool_ranges().iter().flat_map(|r| r.iter()), + used_external_ips, + ); + let available_system_macs = + AvailableIterator::new(MacAddr::iter_system(), used_macs); + + Ok(Self { + nexus_v4_ips, + nexus_v6_ips, + available_external_ips, + available_system_macs, + }) + } + + pub(super) fn for_new_nexus( + &mut self, + ) -> Result { + let external_ip = self + .available_external_ips + .next() + .ok_or(Error::NoExternalServiceIpAvailable)?; + let (nic_ip, nic_subnet) = match external_ip { + IpAddr::V4(_) => ( + self.nexus_v4_ips + .next() + .ok_or(Error::ExhaustedNexusIps)? + .into(), + IpNet::from(*NEXUS_OPTE_IPV4_SUBNET), + ), + IpAddr::V6(_) => ( + self.nexus_v6_ips + .next() + .ok_or(Error::ExhaustedNexusIps)? + .into(), + IpNet::from(*NEXUS_OPTE_IPV6_SUBNET), + ), + }; + let nic_mac = self + .available_system_macs + .next() + .ok_or(Error::NoSystemMacAddressAvailable)?; + + Ok(ExternalNetworkingChoice { + external_ip, + nic_ip, + nic_subnet, + nic_mac, + }) + } +} + +#[derive(Debug, Clone, Copy)] +pub(super) struct ExternalNetworkingChoice { + pub(super) external_ip: IpAddr, + pub(super) nic_ip: IpAddr, + pub(super) nic_subnet: IpNet, + pub(super) nic_mac: MacAddr, +} + +/// Combines a base iterator with an `in_use` set, filtering out any elements +/// that are in the "in_use" set. +/// +/// This can be done with a chained `.filter` on the iterator, but +/// `AvailableIterator` also allows for inspection of the `in_use` set. +/// +/// Note that this is a stateful iterator -- i.e. it implements `Iterator`, not +/// `IntoIterator`. That's what we currently need in the planner. +#[derive(Debug)] +struct AvailableIterator<'a, T> { + base: DebugIgnore + Send + 'a>>, + in_use: HashSet, +} + +impl<'a, T: Hash + Eq> AvailableIterator<'a, T> { + /// Creates a new `AvailableIterator` from a base iterator and a set of + /// elements that are in use. + fn new(base: I, in_use: impl IntoIterator) -> Self + where + I: Iterator + Send + 'a, + { + let in_use = in_use.into_iter().collect(); + AvailableIterator { base: DebugIgnore(Box::new(base)), in_use } + } +} + +impl Iterator for AvailableIterator<'_, T> { + type Item = T; + + fn next(&mut self) -> Option { + self.base.find(|item| !self.in_use.contains(item)) + } +} + +#[cfg(test)] +pub mod test { + use super::*; + use test_strategy::proptest; + + /// Test that `AvailableIterator` correctly filters out items that are in + /// use. + #[proptest] + fn test_available_iterator(items: HashSet<(i32, bool)>) { + let mut in_use_map = HashSet::new(); + let mut expected_available = Vec::new(); + let items: Vec<_> = items + .into_iter() + .map(|(item, in_use)| { + if in_use { + in_use_map.insert(item); + } else { + expected_available.push(item); + } + item + }) + .collect(); + + let available = AvailableIterator::new(items.into_iter(), in_use_map); + let actual_available = available.collect::>(); + + assert_eq!( + expected_available, actual_available, + "available items match" + ); + } +} diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/mod.rs b/nexus/reconfigurator/planning/src/blueprint_builder/mod.rs index e3afa2cdada..99d3b417726 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/mod.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/mod.rs @@ -5,6 +5,7 @@ //! Low-level facility for generating Blueprints mod builder; +mod external_networking; mod zones; pub use builder::*; diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 0c7ee8f5cb6..aca5f057d8a 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -504,6 +504,7 @@ mod test { use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::BlueprintZoneFilter; use nexus_types::deployment::BlueprintZoneType; + use nexus_types::deployment::OmicronZoneNetworkResources; use nexus_types::external_api::views::SledPolicy; use nexus_types::external_api::views::SledProvisionPolicy; use nexus_types::external_api::views::SledState; @@ -511,10 +512,12 @@ mod test { use omicron_common::api::external::Generation; use omicron_common::disk::DiskIdentity; use omicron_test_utils::dev::test_setup_log; + use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; use sled_agent_client::ZoneKind; + use std::mem; use typed_rng::TypedUuidRng; /// Runs through a basic sequence of blueprints for adding a sled @@ -734,6 +737,42 @@ mod test { blueprint.blueprint_zones.retain(|k, _v| keep_sled_id == *k); blueprint.blueprint_disks.retain(|k, _v| keep_sled_id == *k); + // Also remove all the networking resources for the zones we just + // stripped out; i.e., only keep those for `keep_sled_id`. + let mut new_network_resources = OmicronZoneNetworkResources::new(); + let old_network_resources = builder.network_resources_mut(); + for old_ip in old_network_resources.omicron_zone_external_ips() { + if blueprint.all_omicron_zones(BlueprintZoneFilter::All).any( + |(_, zone)| { + zone.zone_type + .external_networking() + .map(|(ip, _nic)| ip.id() == old_ip.ip.id()) + .unwrap_or(false) + }, + ) { + new_network_resources + .add_external_ip(old_ip.zone_id, old_ip.ip) + .expect("copied IP to new input"); + } + } + for old_nic in old_network_resources.omicron_zone_nics() { + if blueprint.all_omicron_zones(BlueprintZoneFilter::All).any( + |(_, zone)| { + zone.zone_type + .external_networking() + .map(|(_ip, nic)| { + nic.id == old_nic.nic.id.into_untyped_uuid() + }) + .unwrap_or(false) + }, + ) { + new_network_resources + .add_nic(old_nic.zone_id, old_nic.nic) + .expect("copied NIC to new input"); + } + } + mem::swap(old_network_resources, &mut &mut new_network_resources); + (keep_sled_id, blueprint, collection, builder.build()) }; diff --git a/nexus/types/src/deployment/network_resources.rs b/nexus/types/src/deployment/network_resources.rs index 15f495d87a7..c93e604af9e 100644 --- a/nexus/types/src/deployment/network_resources.rs +++ b/nexus/types/src/deployment/network_resources.rs @@ -59,6 +59,18 @@ impl OmicronZoneNetworkResources { } } + pub fn omicron_zone_external_ips( + &self, + ) -> impl Iterator + '_ { + self.omicron_zone_external_ips.iter().copied() + } + + pub fn omicron_zone_nics( + &self, + ) -> impl Iterator + '_ { + self.omicron_zone_nics.iter().copied() + } + pub fn add_external_ip( &mut self, zone_id: OmicronZoneUuid, @@ -79,7 +91,7 @@ impl OmicronZoneNetworkResources { zone_id: OmicronZoneUuid, nic: OmicronZoneNic, ) -> Result<(), AddNetworkResourceError> { - let entry = OmicronZoneNicEntry { zone_id, nic: nic.clone() }; + let entry = OmicronZoneNicEntry { zone_id, nic }; self.omicron_zone_nics.insert_no_dups(entry).map_err(|err| { AddNetworkResourceError::DuplicateOmicronZoneNic { zone_id, @@ -221,7 +233,7 @@ pub struct OmicronZoneExternalSnatIp { /// /// This is a slimmer `nexus_db_model::ServiceNetworkInterface` that only stores /// the fields necessary for blueprint planning. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub struct OmicronZoneNic { pub id: VnicUuid, pub mac: MacAddr, @@ -233,7 +245,7 @@ pub struct OmicronZoneNic { /// A pair of an Omicron zone ID and an external IP. /// /// Part of [`OmicronZoneNetworkResources`]. -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Copy, Debug, Deserialize, Serialize)] pub struct OmicronZoneExternalIpEntry { pub zone_id: OmicronZoneUuid, pub ip: OmicronZoneExternalIp, @@ -264,10 +276,10 @@ impl TriMapEntry for OmicronZoneExternalIpEntry { /// A pair of an Omicron zone ID and a network interface. /// /// Part of [`OmicronZoneNetworkResources`]. -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Copy, Debug, Deserialize, Serialize)] pub struct OmicronZoneNicEntry { - zone_id: OmicronZoneUuid, - nic: OmicronZoneNic, + pub zone_id: OmicronZoneUuid, + pub nic: OmicronZoneNic, } impl TriMapEntry for OmicronZoneNicEntry { diff --git a/nexus/types/src/deployment/planning_input.rs b/nexus/types/src/deployment/planning_input.rs index ccb15b858a6..c8cdeec15ba 100644 --- a/nexus/types/src/deployment/planning_input.rs +++ b/nexus/types/src/deployment/planning_input.rs @@ -548,6 +548,12 @@ impl PlanningInputBuilder { Ok(self.network_resources.add_nic(zone_id, nic)?) } + pub fn network_resources_mut( + &mut self, + ) -> &mut OmicronZoneNetworkResources { + &mut self.network_resources + } + pub fn policy_mut(&mut self) -> &mut Policy { &mut self.policy } diff --git a/nexus/types/src/deployment/tri_map.rs b/nexus/types/src/deployment/tri_map.rs index 52b64aec43c..e4ef320b4fd 100644 --- a/nexus/types/src/deployment/tri_map.rs +++ b/nexus/types/src/deployment/tri_map.rs @@ -92,6 +92,10 @@ impl TriMap { } } + pub(crate) fn iter(&self) -> impl Iterator { + self.entries.iter() + } + /// Checks general invariants of the map. /// /// The code below always upholds these invariants, but it's useful to have From 116590c584ccfe222333fcf581dfd2d331c6f74a Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Thu, 16 May 2024 18:23:29 -0400 Subject: [PATCH 21/37] Add multiline diffs to blueprint display output (#5785) --- .../planner_decommissions_sleds_1_2.txt | 42 ++++--- .../output/planner_nonprovisionable_1_2.txt | 42 ++++--- .../output/planner_nonprovisionable_2_2a.txt | 27 ++--- nexus/types/src/deployment.rs | 7 +- nexus/types/src/deployment/blueprint_diff.rs | 26 ++--- .../types/src/deployment/blueprint_display.rs | 103 ++++++++++++++++-- 6 files changed, 181 insertions(+), 66 deletions(-) diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt index 8c94c97188d..b939e69ba1a 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt @@ -60,21 +60,33 @@ to: blueprint 1ac2d88f-27dd-4506-8585-6b2be832528e omicron zones generation 2 -> 3: - ----------------------------------------------------------------------------------------------------- - zone type zone id disposition underlay IP - ----------------------------------------------------------------------------------------------------- -* crucible 1e1ed0cc-1adc-410f-943a-d1a3107de619 in service -> expunged fd00:1122:3344:103::27 -* crucible 2307bbed-02ba-493b-89e3-46585c74c8fc in service -> expunged fd00:1122:3344:103::28 -* crucible 4e36b7ef-5684-4304-b7c3-3c31aaf83d4f in service -> expunged fd00:1122:3344:103::23 -* crucible 603e629d-2599-400e-b879-4134d4cc426e in service -> expunged fd00:1122:3344:103::2c -* crucible 9179d6dc-387d-424e-8d62-ed59b2c728f6 in service -> expunged fd00:1122:3344:103::2a -* crucible c28d7b4b-a259-45ad-945d-f19ca3c6964c in service -> expunged fd00:1122:3344:103::29 -* crucible e29998e7-9ed2-46b6-bb70-4118159fe07f in service -> expunged fd00:1122:3344:103::26 -* crucible f06e91a1-0c17-4cca-adbc-1c9b67bdb11d in service -> expunged fd00:1122:3344:103::2b -* crucible f11f5c60-1ac7-4630-9a3a-a9bc85c75203 in service -> expunged fd00:1122:3344:103::25 -* crucible f231e4eb-3fc9-4964-9d71-2c41644852d9 in service -> expunged fd00:1122:3344:103::24 -* internal_ntp c62b87b6-b98d-4d22-ba4f-cee4499e2ba8 in service -> expunged fd00:1122:3344:103::21 -* nexus 6a70a233-1900-43c0-9c00-aa9d1f7adfbc in service -> expunged fd00:1122:3344:103::22 + ------------------------------------------------------------------------------------------- + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------- +* crucible 1e1ed0cc-1adc-410f-943a-d1a3107de619 - in service fd00:1122:3344:103::27 + └─ + expunged +* crucible 2307bbed-02ba-493b-89e3-46585c74c8fc - in service fd00:1122:3344:103::28 + └─ + expunged +* crucible 4e36b7ef-5684-4304-b7c3-3c31aaf83d4f - in service fd00:1122:3344:103::23 + └─ + expunged +* crucible 603e629d-2599-400e-b879-4134d4cc426e - in service fd00:1122:3344:103::2c + └─ + expunged +* crucible 9179d6dc-387d-424e-8d62-ed59b2c728f6 - in service fd00:1122:3344:103::2a + └─ + expunged +* crucible c28d7b4b-a259-45ad-945d-f19ca3c6964c - in service fd00:1122:3344:103::29 + └─ + expunged +* crucible e29998e7-9ed2-46b6-bb70-4118159fe07f - in service fd00:1122:3344:103::26 + └─ + expunged +* crucible f06e91a1-0c17-4cca-adbc-1c9b67bdb11d - in service fd00:1122:3344:103::2b + └─ + expunged +* crucible f11f5c60-1ac7-4630-9a3a-a9bc85c75203 - in service fd00:1122:3344:103::25 + └─ + expunged +* crucible f231e4eb-3fc9-4964-9d71-2c41644852d9 - in service fd00:1122:3344:103::24 + └─ + expunged +* internal_ntp c62b87b6-b98d-4d22-ba4f-cee4499e2ba8 - in service fd00:1122:3344:103::21 + └─ + expunged +* nexus 6a70a233-1900-43c0-9c00-aa9d1f7adfbc - in service fd00:1122:3344:103::22 + └─ + expunged sled fefcf4cf-f7e7-46b3-b629-058526ce440e: diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt index bc4d2abf710..c5876b0b410 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt @@ -60,21 +60,33 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 omicron zones generation 2 -> 3: - ----------------------------------------------------------------------------------------------------- - zone type zone id disposition underlay IP - ----------------------------------------------------------------------------------------------------- -* crucible 094f27af-1acb-4d1e-ba97-1fc1377d4bf2 in service -> expunged fd00:1122:3344:103::2c -* crucible 0dcfdfc5-481e-4153-b97c-11cf02b648ea in service -> expunged fd00:1122:3344:103::25 -* crucible 2f5e8010-a94d-43a4-9c5c-3f52832f5f7f in service -> expunged fd00:1122:3344:103::27 -* crucible 4a9a0a9d-87f0-4f1d-9181-27f6b435e637 in service -> expunged fd00:1122:3344:103::28 -* crucible 56ac1706-9e2a-49ba-bd6f-a99c44cb2ccb in service -> expunged fd00:1122:3344:103::24 -* crucible 67622d61-2df4-414d-aa0e-d1277265f405 in service -> expunged fd00:1122:3344:103::23 -* crucible b91b271d-8d80-4f49-99a0-34006ae86063 in service -> expunged fd00:1122:3344:103::2a -* crucible d6ee1338-3127-43ec-9aaa-b973ccf05496 in service -> expunged fd00:1122:3344:103::26 -* crucible e39d7c9e-182b-48af-af87-58079d723583 in service -> expunged fd00:1122:3344:103::29 -* crucible f69f92a1-5007-4bb0-a85b-604dc217154b in service -> expunged fd00:1122:3344:103::2b -* internal_ntp 67d913e0-0005-4599-9b28-0abbf6cc2916 in service -> expunged fd00:1122:3344:103::21 -* nexus 2aa0ea4f-3561-4989-a98c-9ab7d9a240fb in service -> expunged fd00:1122:3344:103::22 + ------------------------------------------------------------------------------------------- + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------- +* crucible 094f27af-1acb-4d1e-ba97-1fc1377d4bf2 - in service fd00:1122:3344:103::2c + └─ + expunged +* crucible 0dcfdfc5-481e-4153-b97c-11cf02b648ea - in service fd00:1122:3344:103::25 + └─ + expunged +* crucible 2f5e8010-a94d-43a4-9c5c-3f52832f5f7f - in service fd00:1122:3344:103::27 + └─ + expunged +* crucible 4a9a0a9d-87f0-4f1d-9181-27f6b435e637 - in service fd00:1122:3344:103::28 + └─ + expunged +* crucible 56ac1706-9e2a-49ba-bd6f-a99c44cb2ccb - in service fd00:1122:3344:103::24 + └─ + expunged +* crucible 67622d61-2df4-414d-aa0e-d1277265f405 - in service fd00:1122:3344:103::23 + └─ + expunged +* crucible b91b271d-8d80-4f49-99a0-34006ae86063 - in service fd00:1122:3344:103::2a + └─ + expunged +* crucible d6ee1338-3127-43ec-9aaa-b973ccf05496 - in service fd00:1122:3344:103::26 + └─ + expunged +* crucible e39d7c9e-182b-48af-af87-58079d723583 - in service fd00:1122:3344:103::29 + └─ + expunged +* crucible f69f92a1-5007-4bb0-a85b-604dc217154b - in service fd00:1122:3344:103::2b + └─ + expunged +* internal_ntp 67d913e0-0005-4599-9b28-0abbf6cc2916 - in service fd00:1122:3344:103::21 + └─ + expunged +* nexus 2aa0ea4f-3561-4989-a98c-9ab7d9a240fb - in service fd00:1122:3344:103::22 + └─ + expunged sled 68d24ac5-f341-49ea-a92a-0381b52ab387: diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt index c46b3c488eb..648c082c0f8 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt @@ -124,19 +124,20 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 omicron zones at generation 2: - -------------------------------------------------------------------------------------------------- - zone type zone id disposition underlay IP - -------------------------------------------------------------------------------------------------- - crucible 6b53ab2e-d98c-485f-87a3-4d5df595390f in service fd00:1122:3344:105::27 - crucible 93b137a1-a1d6-4b5b-b2cb-21a9f11e2883 in service fd00:1122:3344:105::23 - crucible 9f0abbad-dbd3-4d43-9675-78092217ffd9 in service fd00:1122:3344:105::25 - crucible b0c63f48-01ea-4aae-bb26-fb0dd59d1662 in service fd00:1122:3344:105::28 - crucible c406da50-34b9-4bb4-a460-8f49875d2a6a in service fd00:1122:3344:105::24 - crucible d660d7ed-28c0-45ae-9ace-dc3ecf7e8786 in service fd00:1122:3344:105::2a - crucible e98cc0de-abf6-4da4-a20d-d05c7a9bb1d7 in service fd00:1122:3344:105::2b - crucible f55e6aaf-e8fc-4913-9e3c-8cd1bd4bdad3 in service fd00:1122:3344:105::29 -- crucible 4f1ce8a2-d3a5-4a38-be4c-9817de52db37 in service fd00:1122:3344:105::2c -* crucible 19fbc4f8-a683-4f22-8f5a-e74782b935be in service -> quiesced fd00:1122:3344:105::26 + ---------------------------------------------------------------------------------------- + zone type zone id disposition underlay IP + ---------------------------------------------------------------------------------------- + crucible 6b53ab2e-d98c-485f-87a3-4d5df595390f in service fd00:1122:3344:105::27 + crucible 93b137a1-a1d6-4b5b-b2cb-21a9f11e2883 in service fd00:1122:3344:105::23 + crucible 9f0abbad-dbd3-4d43-9675-78092217ffd9 in service fd00:1122:3344:105::25 + crucible b0c63f48-01ea-4aae-bb26-fb0dd59d1662 in service fd00:1122:3344:105::28 + crucible c406da50-34b9-4bb4-a460-8f49875d2a6a in service fd00:1122:3344:105::24 + crucible d660d7ed-28c0-45ae-9ace-dc3ecf7e8786 in service fd00:1122:3344:105::2a + crucible e98cc0de-abf6-4da4-a20d-d05c7a9bb1d7 in service fd00:1122:3344:105::2b + crucible f55e6aaf-e8fc-4913-9e3c-8cd1bd4bdad3 in service fd00:1122:3344:105::29 +- crucible 4f1ce8a2-d3a5-4a38-be4c-9817de52db37 in service fd00:1122:3344:105::2c +* crucible 19fbc4f8-a683-4f22-8f5a-e74782b935be - in service fd00:1122:3344:105::26 + └─ + quiesced sled 48d95fef-bc9f-4f50-9a53-1e075836291d: diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index aafa631320b..f1be32f2587 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -304,7 +304,10 @@ impl BpSledSubtableData for &OmicronPhysicalDisksConfig { self.disks.iter().map(|d| d.identity.clone()).collect(); sorted_disk_ids.into_iter().map(move |d| { - BpSledSubtableRow::new(state, vec![d.vendor, d.model, d.serial]) + BpSledSubtableRow::from_strings( + state, + vec![d.vendor, d.model, d.serial], + ) }) } } @@ -319,7 +322,7 @@ impl BpSledSubtableData for BlueprintOrCollectionZonesConfig { state: BpDiffState, ) -> impl Iterator { self.zones().map(move |zone| { - BpSledSubtableRow::new( + BpSledSubtableRow::from_strings( state, vec![ zone.kind().to_string(), diff --git a/nexus/types/src/deployment/blueprint_diff.rs b/nexus/types/src/deployment/blueprint_diff.rs index c3c28a474ce..905dc3dd3d8 100644 --- a/nexus/types/src/deployment/blueprint_diff.rs +++ b/nexus/types/src/deployment/blueprint_diff.rs @@ -7,8 +7,8 @@ use super::blueprint_display::{ constants::*, linear_table_modified, linear_table_unchanged, BpDiffState, BpGeneration, BpOmicronZonesSubtableSchema, BpPhysicalDisksSubtableSchema, - BpSledSubtable, BpSledSubtableData, BpSledSubtableRow, KvListWithHeading, - KvPair, + BpSledSubtable, BpSledSubtableColumn, BpSledSubtableData, + BpSledSubtableRow, KvListWithHeading, KvPair, }; use super::zone_sort_key; use omicron_common::api::external::Generation; @@ -48,7 +48,7 @@ impl BpSledSubtableData for BpDiffZoneDetails { state: BpDiffState, ) -> impl Iterator { self.zones.iter().map(move |zone| { - BpSledSubtableRow::new( + BpSledSubtableRow::from_strings( state, vec![ zone.kind().to_string(), @@ -149,18 +149,18 @@ impl BpSledSubtableData for BpDiffZonesModified { state: BpDiffState, ) -> impl Iterator { self.zones.iter().map(move |zone| { - let disposition = format!( - "{} {ARROW} {}", - zone.prior_disposition, - zone.zone.disposition() - ); BpSledSubtableRow::new( state, vec![ - zone.zone.kind().to_string(), - zone.zone.id().to_string(), - disposition, - zone.zone.underlay_address().to_string(), + BpSledSubtableColumn::value(zone.zone.kind().to_string()), + BpSledSubtableColumn::value(zone.zone.id().to_string()), + BpSledSubtableColumn::diff( + zone.prior_disposition.to_string(), + zone.zone.disposition().to_string(), + ), + BpSledSubtableColumn::value( + zone.zone.underlay_address().to_string(), + ), ], ) }) @@ -421,7 +421,7 @@ impl BpSledSubtableData for DiffPhysicalDisksDetails { state: BpDiffState, ) -> impl Iterator { self.disks.iter().map(move |d| { - BpSledSubtableRow::new( + BpSledSubtableRow::from_strings( state, vec![d.vendor.clone(), d.model.clone(), d.serial.clone()], ) diff --git a/nexus/types/src/deployment/blueprint_display.rs b/nexus/types/src/deployment/blueprint_display.rs index d5dc5e3074a..fb5c58d5135 100644 --- a/nexus/types/src/deployment/blueprint_display.rs +++ b/nexus/types/src/deployment/blueprint_display.rs @@ -13,6 +13,10 @@ pub mod constants { pub(super) const MODIFIED_PREFIX: char = '*'; pub(super) const UNCHANGED_PREFIX: char = ' '; + #[allow(unused)] + pub(super) const SUB_NOT_LAST: &str = "├─"; + pub(super) const SUB_LAST: &str = "└─"; + pub const ARROW: &str = "->"; pub const METADATA_HEADING: &str = "METADATA"; pub const CREATED_BY: &str = "created by"; @@ -97,16 +101,57 @@ impl fmt::Display for BpGeneration { } } +pub enum BpSledSubtableColumn { + Value(String), + Diff { before: String, after: String }, +} + +impl BpSledSubtableColumn { + pub fn value(s: String) -> BpSledSubtableColumn { + BpSledSubtableColumn::Value(s) + } + + pub fn diff(before: String, after: String) -> BpSledSubtableColumn { + BpSledSubtableColumn::Diff { before, after } + } + + pub fn len(&self) -> usize { + match self { + BpSledSubtableColumn::Value(s) => s.len(), + BpSledSubtableColumn::Diff { before, after } => { + // Add 1 for the added/removed prefix and 1 for a space + // + // This will need to change if we change how we render diffs in + // the `Display` impl for `BpSledSubtable`. However, putting it + // here allows to minimize any extra horizontal spacing in case + // other values for the same column are already longer than the + // the before or after values + 2. + usize::max(before.len(), after.len()) + 2 + } + } + } +} + /// A row in a [`BpSledSubtable`] pub struct BpSledSubtableRow { state: BpDiffState, - columns: Vec, + columns: Vec, } impl BpSledSubtableRow { - pub fn new(state: BpDiffState, columns: Vec) -> Self { + pub fn new(state: BpDiffState, columns: Vec) -> Self { BpSledSubtableRow { state, columns } } + + pub fn from_strings(state: BpDiffState, columns: Vec) -> Self { + BpSledSubtableRow { + state, + columns: columns + .into_iter() + .map(BpSledSubtableColumn::Value) + .collect(), + } + } } /// Metadata about all instances of specific type of [`BpSledSubtable`], @@ -190,10 +235,10 @@ impl fmt::Display for BpSledSubtable { for (i, (column, width)) in self.column_names.iter().zip(&widths).enumerate() { - if i != 0 { - write!(f, "{: (s.clone(), false), + BpSledSubtableColumn::Diff { before, .. } => { + // If we remove the prefix and space, we'll need to also + // modify `BpSledSubtableColumn::len` to reflect this. + (format!("{REMOVED_PREFIX} {before}"), true) + } + }; + multiline_row |= needs_multiline; + + if i == 0 { write!(f, "{column: Date: Fri, 17 May 2024 15:11:02 -0700 Subject: [PATCH 22/37] Revert sidecar bump to v1.0.18 (#5790) This partially reverts commit 509bf0727e8bb98d88ed7346ce6469d70bd3c848 (#5781). --- tools/permslip_staging | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/permslip_staging b/tools/permslip_staging index 33b8b8c0271..a9cacd43137 100644 --- a/tools/permslip_staging +++ b/tools/permslip_staging @@ -1,4 +1,4 @@ 03df89d44ad8b653abbeb7fbb83821869f008733e9da946457e72a13cb11d6cc manifest-gimlet-v1.0.19.toml b973cc9feb20f7bba447e7f5291c4070387fa9992deab81301f67f0a3844cd0c manifest-oxide-rot-1-v1.0.11.toml aae829e02d79ec0fe19019c783b6426c6fcc1fe4427aea70b65afc2884f53db8 manifest-psc-v1.0.17.toml -5fc2aca37c2165c57cef4321fbaa4fe03ff38dcd992b6d6a076f54c167e0ad9f manifest-sidecar-v1.0.18.toml +16992e82dff635eda1e065e0f6e325c795b6e90c879c7442ae062c063940a60a manifest-sidecar-v1.0.17.toml From 649fab483a6b6b261dda7eddcccd6ac87aab9a6e Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Sun, 19 May 2024 14:38:01 -0400 Subject: [PATCH 23/37] Revert back to sidecar 1.0.16 (#5792) Based on dogfood today, 1.0.17 also fails to read transceivers correctly CC @Aaron-Hartwig since this may change what we think the problem is? --- tools/permslip_staging | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/permslip_staging b/tools/permslip_staging index a9cacd43137..f5ed8e873ae 100644 --- a/tools/permslip_staging +++ b/tools/permslip_staging @@ -1,4 +1,4 @@ 03df89d44ad8b653abbeb7fbb83821869f008733e9da946457e72a13cb11d6cc manifest-gimlet-v1.0.19.toml b973cc9feb20f7bba447e7f5291c4070387fa9992deab81301f67f0a3844cd0c manifest-oxide-rot-1-v1.0.11.toml aae829e02d79ec0fe19019c783b6426c6fcc1fe4427aea70b65afc2884f53db8 manifest-psc-v1.0.17.toml -16992e82dff635eda1e065e0f6e325c795b6e90c879c7442ae062c063940a60a manifest-sidecar-v1.0.17.toml +9bd043382ad5c7cdb8f00a66e401a6c4b88e8d588915f304d2c261ea7df4d1b5 manifest-sidecar-v1.0.16.toml From e30978a040d4f87cccafa3e4475cd3056df1cc31 Mon Sep 17 00:00:00 2001 From: "oxide-reflector-bot[bot]" <130185838+oxide-reflector-bot[bot]@users.noreply.github.com> Date: Mon, 20 May 2024 09:37:20 -0700 Subject: [PATCH 24/37] Update dendrite to 6334bf7 (#5365) Updated dendrite to commit 6334bf7. --------- Co-authored-by: reflector[bot] <130185838+reflector[bot]@users.noreply.github.com> --- package-manifest.toml | 12 ++++++------ tools/dendrite_openapi_version | 4 ++-- tools/dendrite_stub_checksums | 6 +++--- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/package-manifest.toml b/package-manifest.toml index 825aeea8a89..af0d4675e2b 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -613,8 +613,8 @@ only_for_targets.image = "standard" # 2. Copy dendrite.tar.gz from dendrite/out to omicron/out source.type = "prebuilt" source.repo = "dendrite" -source.commit = "3b84ea6516cafb4595a6f2a668df16c1a501b687" -source.sha256 = "1a18379522da75c034d66d15cf2e50a0d7289a746ef7c8a0ad98c4f61403f29b" +source.commit = "6334bf74fa21790c15f1c4e494ea2ec0edd1c83c" +source.sha256 = "5929f9abf0daf4bbf17d835e5d69fc842b9617b312fb5644fa99daf785203700" output.type = "zone" output.intermediate_only = true @@ -638,8 +638,8 @@ only_for_targets.image = "standard" # 2. Copy the output zone image from dendrite/out to omicron/out source.type = "prebuilt" source.repo = "dendrite" -source.commit = "3b84ea6516cafb4595a6f2a668df16c1a501b687" -source.sha256 = "470e61b4652992da882ef10dd1511599a9cffe885d75c9924ee38e0900677ba7" +source.commit = "6334bf74fa21790c15f1c4e494ea2ec0edd1c83c" +source.sha256 = "0294a1911212c4764d1034b5e0ca00cc9dfc51df482a9f6e5547b191b4481ad8" output.type = "zone" output.intermediate_only = true @@ -656,8 +656,8 @@ only_for_targets.image = "standard" # 2. Copy dendrite.tar.gz from dendrite/out to omicron/out/dendrite-softnpu.tar.gz source.type = "prebuilt" source.repo = "dendrite" -source.commit = "3b84ea6516cafb4595a6f2a668df16c1a501b687" -source.sha256 = "8a7cc20bcca7498c1e83eb6f898e967635e69089c1bb4f86f7acdf99f7b1f353" +source.commit = "6334bf74fa21790c15f1c4e494ea2ec0edd1c83c" +source.sha256 = "1a188da01dccf565058145b43573a549a2eb4d71fe8800170152b823af27a010" output.type = "zone" output.intermediate_only = true diff --git a/tools/dendrite_openapi_version b/tools/dendrite_openapi_version index a00d2be7a1f..6d710422502 100644 --- a/tools/dendrite_openapi_version +++ b/tools/dendrite_openapi_version @@ -1,2 +1,2 @@ -COMMIT="3b84ea6516cafb4595a6f2a668df16c1a501b687" -SHA2="fc397e254dc150850fba12013df147aa9b23296056fb4d93f65c1806e25b0823" +COMMIT="6334bf74fa21790c15f1c4e494ea2ec0edd1c83c" +SHA2="213031aa058f0aa355964e4a5ca350db30110454bad5c77cbc94ab77fdcbe013" diff --git a/tools/dendrite_stub_checksums b/tools/dendrite_stub_checksums index 18cfd54470c..3f4d9854c4e 100644 --- a/tools/dendrite_stub_checksums +++ b/tools/dendrite_stub_checksums @@ -1,3 +1,3 @@ -CIDL_SHA256_ILLUMOS="1a18379522da75c034d66d15cf2e50a0d7289a746ef7c8a0ad98c4f61403f29b" -CIDL_SHA256_LINUX_DPD="c255370062ce616f6fce5630fad7a1fbfa63aa7db964aeb6de410ba4a4bc3193" -CIDL_SHA256_LINUX_SWADM="7899b1603518489b2276f9d9043107a267060e3b05be51f08002adf3d118a7c9" +CIDL_SHA256_ILLUMOS="5929f9abf0daf4bbf17d835e5d69fc842b9617b312fb5644fa99daf785203700" +CIDL_SHA256_LINUX_DPD="fa38138db9ce1c2cababd11dd9ef1289295e4a8185c78372f6ff1a090c75a05b" +CIDL_SHA256_LINUX_SWADM="ebda6c0a8e29f40c389337fe2e37c1191eeeb34d729de7724b6d707bb6c9a882" From c2f51e25ac03482c671a2558433bada20599c831 Mon Sep 17 00:00:00 2001 From: bnaecker Date: Mon, 20 May 2024 11:58:01 -0700 Subject: [PATCH 25/37] Update propolis and crucible deps (#5794) --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 12 ++++++------ nexus/src/app/sagas/common_storage.rs | 8 ++++++-- nexus/src/app/sagas/disk_create.rs | 2 +- nexus/src/app/sagas/snapshot_create.rs | 2 +- package-manifest.toml | 12 ++++++------ 6 files changed, 33 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ca2a2e519c8..d01cc0cc15d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -463,9 +463,9 @@ dependencies = [ [[package]] name = "bhyve_api" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=27e2789d381c0fcc237fbe30cec2ec66bd750c12#27e2789d381c0fcc237fbe30cec2ec66bd750c12" +source = "git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76#6d7ed9a033babc054db9eff5b59dee978d2b0d76" dependencies = [ - "bhyve_api_sys 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=27e2789d381c0fcc237fbe30cec2ec66bd750c12)", + "bhyve_api_sys 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76)", "libc", "strum", ] @@ -483,7 +483,7 @@ dependencies = [ [[package]] name = "bhyve_api_sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=27e2789d381c0fcc237fbe30cec2ec66bd750c12#27e2789d381c0fcc237fbe30cec2ec66bd750c12" +source = "git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76#6d7ed9a033babc054db9eff5b59dee978d2b0d76" dependencies = [ "libc", "strum", @@ -1383,7 +1383,7 @@ dependencies = [ [[package]] name = "crucible-agent-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=62cc2cfe64ca09c6876be7633355026fa65c8545#62cc2cfe64ca09c6876be7633355026fa65c8545" +source = "git+https://github.com/oxidecomputer/crucible?rev=8c6d485110ecfae5409575246b986a145c386dc4#8c6d485110ecfae5409575246b986a145c386dc4" dependencies = [ "anyhow", "chrono", @@ -1399,7 +1399,7 @@ dependencies = [ [[package]] name = "crucible-pantry-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=62cc2cfe64ca09c6876be7633355026fa65c8545#62cc2cfe64ca09c6876be7633355026fa65c8545" +source = "git+https://github.com/oxidecomputer/crucible?rev=8c6d485110ecfae5409575246b986a145c386dc4#8c6d485110ecfae5409575246b986a145c386dc4" dependencies = [ "anyhow", "chrono", @@ -1416,7 +1416,7 @@ dependencies = [ [[package]] name = "crucible-smf" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/crucible?rev=62cc2cfe64ca09c6876be7633355026fa65c8545#62cc2cfe64ca09c6876be7633355026fa65c8545" +source = "git+https://github.com/oxidecomputer/crucible?rev=8c6d485110ecfae5409575246b986a145c386dc4#8c6d485110ecfae5409575246b986a145c386dc4" dependencies = [ "crucible-workspace-hack", "libc", @@ -3490,7 +3490,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "bhyve_api 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=27e2789d381c0fcc237fbe30cec2ec66bd750c12)", + "bhyve_api 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76)", "byteorder", "camino", "camino-tempfile", @@ -5436,7 +5436,7 @@ dependencies = [ "pq-sys", "pretty_assertions", "progenitor-client", - "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=27e2789d381c0fcc237fbe30cec2ec66bd750c12)", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76)", "rand 0.8.5", "rcgen", "ref-cast", @@ -5682,7 +5682,7 @@ dependencies = [ "oximeter-instruments", "oximeter-producer", "pretty_assertions", - "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=27e2789d381c0fcc237fbe30cec2ec66bd750c12)", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76)", "propolis-mock-server", "rand 0.8.5", "rcgen", @@ -7106,7 +7106,7 @@ dependencies = [ [[package]] name = "propolis-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=27e2789d381c0fcc237fbe30cec2ec66bd750c12#27e2789d381c0fcc237fbe30cec2ec66bd750c12" +source = "git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76#6d7ed9a033babc054db9eff5b59dee978d2b0d76" dependencies = [ "async-trait", "base64 0.21.7", @@ -7148,7 +7148,7 @@ dependencies = [ [[package]] name = "propolis-mock-server" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=27e2789d381c0fcc237fbe30cec2ec66bd750c12#27e2789d381c0fcc237fbe30cec2ec66bd750c12" +source = "git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76#6d7ed9a033babc054db9eff5b59dee978d2b0d76" dependencies = [ "anyhow", "atty", @@ -7158,7 +7158,7 @@ dependencies = [ "futures", "hyper 0.14.28", "progenitor", - "propolis_types 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=27e2789d381c0fcc237fbe30cec2ec66bd750c12)", + "propolis_types 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76)", "rand 0.8.5", "reqwest", "schemars", @@ -7190,7 +7190,7 @@ dependencies = [ [[package]] name = "propolis_types" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=27e2789d381c0fcc237fbe30cec2ec66bd750c12#27e2789d381c0fcc237fbe30cec2ec66bd750c12" +source = "git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76#6d7ed9a033babc054db9eff5b59dee978d2b0d76" dependencies = [ "schemars", "serde", diff --git a/Cargo.toml b/Cargo.toml index ceac9801bb2..29e2a8cbd00 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -240,9 +240,9 @@ cookie = "0.18" criterion = { version = "0.5.1", features = [ "async_tokio" ] } crossbeam = "0.8" crossterm = { version = "0.27.0", features = ["event-stream"] } -crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "62cc2cfe64ca09c6876be7633355026fa65c8545" } -crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "62cc2cfe64ca09c6876be7633355026fa65c8545" } -crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "62cc2cfe64ca09c6876be7633355026fa65c8545" } +crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "8c6d485110ecfae5409575246b986a145c386dc4" } +crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "8c6d485110ecfae5409575246b986a145c386dc4" } +crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "8c6d485110ecfae5409575246b986a145c386dc4" } csv = "1.3.0" curve25519-dalek = "4" datatest-stable = "0.2.9" @@ -381,9 +381,9 @@ prettyplease = { version = "0.2.20", features = ["verbatim"] } proc-macro2 = "1.0" progenitor = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } progenitor-client = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } -bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "27e2789d381c0fcc237fbe30cec2ec66bd750c12" } -propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "27e2789d381c0fcc237fbe30cec2ec66bd750c12" } -propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "27e2789d381c0fcc237fbe30cec2ec66bd750c12" } +bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "6d7ed9a033babc054db9eff5b59dee978d2b0d76" } +propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "6d7ed9a033babc054db9eff5b59dee978d2b0d76" } +propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "6d7ed9a033babc054db9eff5b59dee978d2b0d76" } proptest = "1.4.0" quote = "1.0" rand = "0.8.5" diff --git a/nexus/src/app/sagas/common_storage.rs b/nexus/src/app/sagas/common_storage.rs index 0fe14f6d2a3..611fcc3258f 100644 --- a/nexus/src/app/sagas/common_storage.rs +++ b/nexus/src/app/sagas/common_storage.rs @@ -37,10 +37,14 @@ pub(crate) async fn ensure_region_in_dataset( ) -> Result { let url = format!("http://{}", dataset.address()); let client = CrucibleAgentClient::new(&url); - + let Ok(extent_count) = u32::try_from(region.extent_count()) else { + return Err(Error::internal_error( + "Extent count out of range for a u32", + )); + }; let region_request = CreateRegion { block_size: region.block_size().to_bytes(), - extent_count: region.extent_count(), + extent_count, extent_size: region.blocks_per_extent(), // TODO: Can we avoid casting from UUID to string? // NOTE: This'll require updating the crucible agent client. diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index 5c4f5bf1eee..5e1d386ed14 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -485,7 +485,7 @@ async fn sdc_regions_ensure( sub_volumes: vec![VolumeConstructionRequest::Region { block_size, blocks_per_extent, - extent_count: extent_count.try_into().unwrap(), + extent_count, gen: 1, opts: CrucibleOpts { id: disk_id, diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 53e06e310d7..2a5deeff510 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -400,7 +400,7 @@ async fn ssc_regions_ensure( sub_volumes: vec![VolumeConstructionRequest::Region { block_size, blocks_per_extent, - extent_count: extent_count.try_into().unwrap(), + extent_count, gen: 1, opts: CrucibleOpts { id: destination_volume_id, diff --git a/package-manifest.toml b/package-manifest.toml index af0d4675e2b..2bfc51d533b 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -490,10 +490,10 @@ only_for_targets.image = "standard" # 3. Use source.type = "manual" instead of "prebuilt" source.type = "prebuilt" source.repo = "crucible" -source.commit = "62cc2cfe64ca09c6876be7633355026fa65c8545" +source.commit = "8c6d485110ecfae5409575246b986a145c386dc4" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible.sha256.txt -source.sha256 = "5b9e3ce16c8b3558e2ce20567165268088bd1b40a518636daf658eee28dd5843" +source.sha256 = "a974c976babbbbe4d126fe324e28093b4f69b689e1cf607ce38323befcfa494e" output.type = "zone" output.intermediate_only = true @@ -502,10 +502,10 @@ service_name = "crucible_pantry_prebuilt" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "crucible" -source.commit = "62cc2cfe64ca09c6876be7633355026fa65c8545" +source.commit = "8c6d485110ecfae5409575246b986a145c386dc4" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible-pantry.sha256.txt -source.sha256 = "4aaff4a683d44f7c4d52a102a6c0592ff6135e5255f3a3317b1a38b962abd745" +source.sha256 = "34418c60ecccade796e604997a11b1fa7f01c364996fa4b57131466e910700a8" output.type = "zone" output.intermediate_only = true @@ -517,10 +517,10 @@ service_name = "propolis-server" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "propolis" -source.commit = "0b71410d3a12045d34fa1c6c1d9ba1d8dd652564" +source.commit = "6d7ed9a033babc054db9eff5b59dee978d2b0d76" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/propolis/image//propolis-server.sha256.txt -source.sha256 = "b6494d26886f196c2d1f70633c8745f278e2413357cf38d1e6f473a22191c802" +source.sha256 = "f8f41b47bc00811fefe2ba75e0f6f8ab77765776c04021e0b31f09c3b21108a9" output.type = "zone" [package.mg-ddm-gz] From a5213df36ed20f5ee8a63e46fe0f277460950158 Mon Sep 17 00:00:00 2001 From: iliana etaoin Date: Mon, 20 May 2024 12:27:16 -0700 Subject: [PATCH 26/37] "external xtasks" pattern; wire up `cargo xtask releng` (#5783) --- .github/buildomat/jobs/tuf-repo.sh | 2 +- dev-tools/releng/src/main.rs | 3 +- dev-tools/xtask/src/external.rs | 72 ++++++++++++++++++++++++++++++ dev-tools/xtask/src/main.rs | 20 ++++++++- 4 files changed, 93 insertions(+), 4 deletions(-) create mode 100644 dev-tools/xtask/src/external.rs diff --git a/.github/buildomat/jobs/tuf-repo.sh b/.github/buildomat/jobs/tuf-repo.sh index 2ed1ae08c34..2e3050b4892 100755 --- a/.github/buildomat/jobs/tuf-repo.sh +++ b/.github/buildomat/jobs/tuf-repo.sh @@ -65,4 +65,4 @@ esac pfexec zfs create -p "rpool/images/$USER/host" pfexec zfs create -p "rpool/images/$USER/recovery" -cargo run --release --bin omicron-releng -- --output-dir /work +cargo xtask releng --output-dir /work diff --git a/dev-tools/releng/src/main.rs b/dev-tools/releng/src/main.rs index f382f5f222e..445090115d9 100644 --- a/dev-tools/releng/src/main.rs +++ b/dev-tools/releng/src/main.rs @@ -96,7 +96,6 @@ static WORKSPACE_DIR: Lazy = Lazy::new(|| { dir }); -#[derive(Parser)] /// Run the Oxide release engineering process and produce a TUF repo that can be /// used to update a rack. /// @@ -104,6 +103,8 @@ static WORKSPACE_DIR: Lazy = Lazy::new(|| { /// /// Note that `--host-dataset` and `--recovery-dataset` must be set to different /// values to build the two OS images in parallel. This is strongly recommended. +#[derive(Parser)] +#[command(name = "cargo xtask releng", bin_name = "cargo xtask releng")] struct Args { /// ZFS dataset to use for `helios-build` when building the host image #[clap(long, default_value_t = Self::default_dataset("host"))] diff --git a/dev-tools/xtask/src/external.rs b/dev-tools/xtask/src/external.rs new file mode 100644 index 00000000000..9c0bc69b55f --- /dev/null +++ b/dev-tools/xtask/src/external.rs @@ -0,0 +1,72 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! External xtasks. (extasks?) + +use std::ffi::{OsStr, OsString}; +use std::os::unix::process::CommandExt; +use std::process::Command; + +use anyhow::{Context, Result}; +use clap::Parser; + +/// Argument parser for external xtasks. +/// +/// In general we want all developer tasks to be discoverable simply by running +/// `cargo xtask`, but some development tools end up with a particularly +/// large dependency tree. It's not ideal to have to pay the cost of building +/// our release engineering tooling if all the user wants to do is check for +/// workspace dependency issues. +/// +/// `External` provides a pattern for creating xtasks that live in other crates. +/// An external xtask is defined on `crate::Cmds` as a tuple variant containing +/// `External`, which captures all arguments and options (even `--help`) as +/// a `Vec`. The main function then calls `External::exec` with the +/// appropriate bin target name and any additional Cargo arguments. +#[derive(Parser)] +#[clap( + disable_help_flag(true), + disable_help_subcommand(true), + disable_version_flag(true) +)] +pub struct External { + #[clap(trailing_var_arg(true), allow_hyphen_values(true))] + args: Vec, + + // This stores an in-progress Command builder. `cargo_args` appends args + // to it, and `exec` consumes it. Clap does not treat this as a command + // (`skip`), but fills in this field by calling `new_command`. + #[clap(skip = new_command())] + command: Command, +} + +impl External { + /// Add additional arguments to `cargo run` (for instance, to run the + /// external xtask in release mode). + pub fn cargo_args( + mut self, + args: impl IntoIterator>, + ) -> External { + self.command.args(args); + self + } + + pub fn exec(mut self, bin_target: impl AsRef) -> Result<()> { + let error = self + .command + .arg("--bin") + .arg(bin_target) + .arg("--") + .args(self.args) + .exec(); + Err(error).context("failed to exec `cargo run`") + } +} + +fn new_command() -> Command { + let cargo = std::env::var_os("CARGO").unwrap_or_else(|| "cargo".into()); + let mut command = Command::new(cargo); + command.arg("run"); + command +} diff --git a/dev-tools/xtask/src/main.rs b/dev-tools/xtask/src/main.rs index 56d01d0ff0e..9f1131e7588 100644 --- a/dev-tools/xtask/src/main.rs +++ b/dev-tools/xtask/src/main.rs @@ -12,6 +12,8 @@ use clap::{Parser, Subcommand}; mod check_workspace_deps; mod clippy; +#[cfg_attr(not(target_os = "illumos"), allow(dead_code))] +mod external; #[cfg(target_os = "illumos")] mod verify_libraries; @@ -19,7 +21,11 @@ mod verify_libraries; mod virtual_hardware; #[derive(Parser)] -#[command(name = "cargo xtask", about = "Workspace-related developer tools")] +#[command( + name = "cargo xtask", + bin_name = "cargo xtask", + about = "Workspace-related developer tools" +)] struct Args { #[command(subcommand)] cmd: Cmds, @@ -33,6 +39,9 @@ enum Cmds { /// Run configured clippy checks Clippy(clippy::ClippyArgs), + #[cfg(target_os = "illumos")] + /// Build a TUF repo + Releng(external::External), /// Verify we are not leaking library bindings outside of intended /// crates #[cfg(target_os = "illumos")] @@ -41,6 +50,9 @@ enum Cmds { #[cfg(target_os = "illumos")] VirtualHardware(virtual_hardware::Args), + /// (this command is only available on illumos) + #[cfg(not(target_os = "illumos"))] + Releng, /// (this command is only available on illumos) #[cfg(not(target_os = "illumos"))] VerifyLibraries, @@ -55,13 +67,17 @@ fn main() -> Result<()> { Cmds::Clippy(args) => clippy::run_cmd(args), Cmds::CheckWorkspaceDeps => check_workspace_deps::run_cmd(), + #[cfg(target_os = "illumos")] + Cmds::Releng(external) => { + external.cargo_args(["--release"]).exec("omicron-releng") + } #[cfg(target_os = "illumos")] Cmds::VerifyLibraries(args) => verify_libraries::run_cmd(args), #[cfg(target_os = "illumos")] Cmds::VirtualHardware(args) => virtual_hardware::run_cmd(args), #[cfg(not(target_os = "illumos"))] - Cmds::VerifyLibraries | Cmds::VirtualHardware => { + Cmds::Releng | Cmds::VerifyLibraries | Cmds::VirtualHardware => { anyhow::bail!("this command is only available on illumos"); } } From 94ef8e50d0c1310bfc41899395cbe4a562ed88b2 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Mon, 20 May 2024 16:27:07 -0400 Subject: [PATCH 27/37] Allow different versions for differently signed RoT images (#5580) Currently we restrict all RoT archives for {Gimlet, PSC, Sidecar} to be the same version. This restricts us to having staging and production images to be the same version. What we actually want is all archives for a particular kind _signed with the same key_ to have the same version. --- Cargo.lock | 13 +- tufaceous-lib/src/assemble/manifest.rs | 3 + update-common/src/artifacts/update_plan.rs | 440 +++++++++++++++++++-- update-common/src/errors.rs | 12 +- 4 files changed, 429 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d01cc0cc15d..f95db6c1d26 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3259,9 +3259,10 @@ dependencies = [ [[package]] name = "hubtools" -version = "0.4.1" -source = "git+https://github.com/oxidecomputer/hubtools.git?branch=main#73cd5a84689d59ecce9da66ad4389c540d315168" +version = "0.4.6" +source = "git+https://github.com/oxidecomputer/hubtools.git?branch=main#943c4bbe6b50d1ab635d085d6204895fb4154e79" dependencies = [ + "hex", "lpc55_areas", "lpc55_sign", "object 0.30.4", @@ -4147,8 +4148,8 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "lpc55_areas" -version = "0.2.4" -source = "git+https://github.com/oxidecomputer/lpc55_support#96f064eaae5e95930efaab6c29fd1b2e22225dac" +version = "0.2.5" +source = "git+https://github.com/oxidecomputer/lpc55_support#131520fc913ecce9b80557e854751953f743a7d2" dependencies = [ "bitfield", "clap", @@ -4158,8 +4159,8 @@ dependencies = [ [[package]] name = "lpc55_sign" -version = "0.3.3" -source = "git+https://github.com/oxidecomputer/lpc55_support#96f064eaae5e95930efaab6c29fd1b2e22225dac" +version = "0.3.4" +source = "git+https://github.com/oxidecomputer/lpc55_support#131520fc913ecce9b80557e854751953f743a7d2" dependencies = [ "byteorder", "const-oid", diff --git a/tufaceous-lib/src/assemble/manifest.rs b/tufaceous-lib/src/assemble/manifest.rs index 1c4a676f4c1..2236580b752 100644 --- a/tufaceous-lib/src/assemble/manifest.rs +++ b/tufaceous-lib/src/assemble/manifest.rs @@ -294,11 +294,14 @@ impl<'a> FakeDataAttributes<'a> { KnownArtifactKind::SwitchRot => "fake-sidecar-rot", }; + // For our purposes sign = board represents what we want for the RoT + // and we don't care about the SP at this point let caboose = CabooseBuilder::default() .git_commit("this-is-fake-data") .board(board) .version(self.version.to_string()) .name(self.name) + .sign(board) .build(); let mut builder = HubrisArchiveBuilder::with_fake_image(); diff --git a/update-common/src/artifacts/update_plan.rs b/update-common/src/artifacts/update_plan.rs index c5b171d6483..53286ee09ad 100644 --- a/update-common/src/artifacts/update_plan.rs +++ b/update-common/src/artifacts/update_plan.rs @@ -33,6 +33,7 @@ use std::collections::btree_map; use std::collections::BTreeMap; use std::collections::HashMap; use std::io; +use tokio::io::AsyncReadExt; use tufaceous_lib::HostPhaseImages; use tufaceous_lib::RotArchives; @@ -73,6 +74,15 @@ pub struct UpdatePlan { pub control_plane_hash: ArtifactHash, } +// Used to represent the information extracted from signed RoT images. This +// is used when going from `UpdatePlanBuilder` -> `UpdatePlan` to check +// the versions on the RoT images +#[derive(Debug, Eq, Hash, PartialEq)] +struct RotSignData { + kind: KnownArtifactKind, + sign: Vec, +} + /// `UpdatePlanBuilder` mirrors all the fields of `UpdatePlan`, but they're all /// optional: it can be filled in as we read a TUF repository. /// [`UpdatePlanBuilder::build()`] will (fallibly) convert from the builder to @@ -114,6 +124,9 @@ pub struct UpdatePlanBuilder<'a> { by_hash: HashMap, artifacts_meta: Vec, + // map for RoT signing information, used in `ArtifactsWithPlan` + rot_by_sign: HashMap>, + // extra fields we use to build the plan extracted_artifacts: ExtractedArtifacts, log: &'a Logger, @@ -144,6 +157,7 @@ impl<'a> UpdatePlanBuilder<'a> { by_id: BTreeMap::new(), by_hash: HashMap::new(), + rot_by_sign: HashMap::new(), artifacts_meta: Vec::new(), extracted_artifacts, @@ -317,6 +331,56 @@ impl<'a> UpdatePlanBuilder<'a> { }, )?; + // We need to get all the signing information now to properly check + // version at builder time (builder time is not async) + let image_a_stream = rot_a_data + .reader_stream() + .await + .map_err(RepositoryError::CreateReaderStream)?; + let mut image_a = Vec::with_capacity(rot_a_data.file_size()); + tokio_util::io::StreamReader::new(image_a_stream) + .read_to_end(&mut image_a) + .await + .map_err(|error| RepositoryError::ReadExtractedArchive { + artifact: ArtifactHashId { + kind: artifact_id.kind.clone(), + hash: rot_a_data.hash(), + }, + error, + })?; + + let (artifact_id, image_a_sign) = + read_hubris_sign_from_archive(artifact_id, image_a)?; + + self.rot_by_sign + .entry(RotSignData { kind: artifact_kind, sign: image_a_sign }) + .or_default() + .push(artifact_id.clone()); + + let image_b_stream = rot_b_data + .reader_stream() + .await + .map_err(RepositoryError::CreateReaderStream)?; + let mut image_b = Vec::with_capacity(rot_b_data.file_size()); + tokio_util::io::StreamReader::new(image_b_stream) + .read_to_end(&mut image_b) + .await + .map_err(|error| RepositoryError::ReadExtractedArchive { + artifact: ArtifactHashId { + kind: artifact_id.kind.clone(), + hash: rot_b_data.hash(), + }, + error, + })?; + + let (artifact_id, image_b_sign) = + read_hubris_sign_from_archive(artifact_id, image_b)?; + + self.rot_by_sign + .entry(RotSignData { kind: artifact_kind, sign: image_b_sign }) + .or_default() + .push(artifact_id.clone()); + // Technically we've done all we _need_ to do with the RoT images. We // send them directly to MGS ourself, so don't expect anyone to ask for // them via `by_id` or `by_hash`. However, it's more convenient to @@ -700,38 +764,26 @@ impl<'a> UpdatePlanBuilder<'a> { } } - // Ensure that all A/B RoT images for each board kind have the same - // version number. - for (kind, mut single_board_rot_artifacts) in [ - ( - KnownArtifactKind::GimletRot, - self.gimlet_rot_a.iter().chain(&self.gimlet_rot_b), - ), - ( - KnownArtifactKind::PscRot, - self.psc_rot_a.iter().chain(&self.psc_rot_b), - ), - ( - KnownArtifactKind::SwitchRot, - self.sidecar_rot_a.iter().chain(&self.sidecar_rot_b), - ), - ] { - // We know each of these iterators has at least 2 elements (one from - // the A artifacts and one from the B artifacts, checked above) so - // we can safely unwrap the first. - let version = - &single_board_rot_artifacts.next().unwrap().id.version; - for artifact in single_board_rot_artifacts { - if artifact.id.version != *version { + // Ensure that all A/B RoT images for each board kind and same + // signing key have the same version. (i.e. allow gimlet_rot signed + // with a staging key to be a different version from gimlet_rot signed + // with a production key) + for (entry, versions) in self.rot_by_sign { + let kind = entry.kind; + // This unwrap is safe because we check above that each of the types + // has at least one entry + let version = &versions.first().unwrap().version; + match versions.iter().find(|x| x.version != *version) { + None => continue, + Some(v) => { return Err(RepositoryError::MultipleVersionsPresent { kind, v1: version.clone(), - v2: artifact.id.version.clone(), - }); + v2: v.version.clone(), + }) } } } - // Repeat the same version check for all SP images. (This is a separate // loop because the types of the iterators don't match.) for (kind, mut single_board_sp_artifacts) in [ @@ -803,6 +855,32 @@ pub struct UpdatePlanBuildOutput { pub artifacts_meta: Vec, } +// We take id solely to be able to output error messages +fn read_hubris_sign_from_archive( + id: ArtifactId, + data: Vec, +) -> Result<(ArtifactId, Vec), RepositoryError> { + let archive = match RawHubrisArchive::from_vec(data).map_err(Box::new) { + Ok(archive) => archive, + Err(error) => { + return Err(RepositoryError::ParsingHubrisArchive { id, error }); + } + }; + let caboose = match archive.read_caboose().map_err(Box::new) { + Ok(caboose) => caboose, + Err(error) => { + return Err(RepositoryError::ReadHubrisCaboose { id, error }); + } + }; + let sign = match caboose.sign() { + Ok(sign) => sign, + Err(error) => { + return Err(RepositoryError::ReadHubrisCabooseBoard { id, error }); + } + }; + Ok((id, sign.to_vec())) +} + // This function takes and returns `id` to avoid an unnecessary clone; `id` will // be present in either the Ok tuple or the error. fn read_hubris_board_from_archive( @@ -895,11 +973,11 @@ mod tests { tarball: Bytes, } - fn make_random_rot_image() -> RandomRotImage { + fn make_random_rot_image(sign: &str, board: &str) -> RandomRotImage { use tufaceous_lib::CompositeRotArchiveBuilder; - let archive_a = make_random_bytes(); - let archive_b = make_random_bytes(); + let archive_a = make_fake_rot_image(sign, board); + let archive_b = make_fake_rot_image(sign, board); let mut builder = CompositeRotArchiveBuilder::new(Vec::new(), MtimeSource::Zero) @@ -926,6 +1004,22 @@ mod tests { } } + fn make_fake_rot_image(sign: &str, board: &str) -> Vec { + use hubtools::{CabooseBuilder, HubrisArchiveBuilder}; + + let caboose = CabooseBuilder::default() + .git_commit("this-is-fake-data") + .board(board) + .version("0.0.0") + .name("rot-bord") + .sign(sign) + .build(); + + let mut builder = HubrisArchiveBuilder::with_fake_image(); + builder.write_caboose(caboose.as_slice()).unwrap(); + builder.build_to_vec().unwrap() + } + fn make_fake_sp_image(board: &str) -> Vec { use hubtools::{CabooseBuilder, HubrisArchiveBuilder}; @@ -941,6 +1035,288 @@ mod tests { builder.build_to_vec().unwrap() } + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_bad_rot_versions() { + const VERSION_0: SemverVersion = SemverVersion::new(0, 0, 0); + const VERSION_1: SemverVersion = SemverVersion::new(0, 0, 1); + + let logctx = test_setup_log("test_multi_rot_version"); + + let mut plan_builder = + UpdatePlanBuilder::new(VERSION_0, &logctx.log).unwrap(); + + // The control plane artifact can be arbitrary bytes; just populate it + // with random data. + { + let kind = KnownArtifactKind::ControlPlane; + let data = make_random_bytes(); + let hash = ArtifactHash(Sha256::digest(&data).into()); + let id = ArtifactId { + name: format!("{kind:?}"), + version: VERSION_0, + kind: kind.into(), + }; + plan_builder + .add_artifact( + id, + hash, + futures::stream::iter([Ok(Bytes::from(data))]), + ) + .await + .unwrap(); + } + + // For each SP image, we'll insert two artifacts: these should end up in + // the update plan's SP image maps keyed by their "board". Normally the + // board is read from the archive itself via hubtools; we'll inject a + // test function that returns the artifact ID name as the board instead. + for (kind, boards) in [ + (KnownArtifactKind::GimletSp, ["test-gimlet-a", "test-gimlet-b"]), + (KnownArtifactKind::PscSp, ["test-psc-a", "test-psc-b"]), + (KnownArtifactKind::SwitchSp, ["test-switch-a", "test-switch-b"]), + ] { + for board in boards { + let data = make_fake_sp_image(board); + let hash = ArtifactHash(Sha256::digest(&data).into()); + let id = ArtifactId { + name: board.to_string(), + version: VERSION_0, + kind: kind.into(), + }; + plan_builder + .add_artifact( + id, + hash, + futures::stream::iter([Ok(Bytes::from(data))]), + ) + .await + .unwrap(); + } + } + + // The Host, Trampoline, and RoT artifacts must be structed the way we + // expect (i.e., .tar.gz's containing multiple inner artifacts). + let host = make_random_host_os_image(); + let trampoline = make_random_host_os_image(); + + for (kind, image) in [ + (KnownArtifactKind::Host, &host), + (KnownArtifactKind::Trampoline, &trampoline), + ] { + let data = &image.tarball; + let hash = ArtifactHash(Sha256::digest(data).into()); + let id = ArtifactId { + name: format!("{kind:?}"), + version: VERSION_0, + kind: kind.into(), + }; + plan_builder + .add_artifact( + id, + hash, + futures::stream::iter([Ok(data.clone())]), + ) + .await + .unwrap(); + } + + let gimlet_rot = make_random_rot_image("gimlet", "gimlet"); + let psc_rot = make_random_rot_image("psc", "psc"); + let sidecar_rot = make_random_rot_image("sidecar", "sidecar"); + + let gimlet_rot_2 = make_random_rot_image("gimlet", "gimlet-the second"); + + for (kind, artifact) in [ + (KnownArtifactKind::GimletRot, &gimlet_rot), + (KnownArtifactKind::PscRot, &psc_rot), + (KnownArtifactKind::SwitchRot, &sidecar_rot), + ] { + let data = &artifact.tarball; + let hash = ArtifactHash(Sha256::digest(data).into()); + let id = ArtifactId { + name: format!("{kind:?}"), + version: VERSION_0, + kind: kind.into(), + }; + plan_builder + .add_artifact( + id, + hash, + futures::stream::iter([Ok(data.clone())]), + ) + .await + .unwrap(); + } + + let bad_kind = KnownArtifactKind::GimletRot; + let data = &gimlet_rot_2.tarball; + let hash = ArtifactHash(Sha256::digest(data).into()); + let id = ArtifactId { + name: format!("{bad_kind:?}"), + version: VERSION_1, + kind: bad_kind.into(), + }; + plan_builder + .add_artifact(id, hash, futures::stream::iter([Ok(data.clone())])) + .await + .unwrap(); + + match plan_builder.build() { + Err(_) => (), + Ok(_) => panic!("Added two artifacts with the same version"), + } + logctx.cleanup_successful(); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_multi_rot_version() { + const VERSION_0: SemverVersion = SemverVersion::new(0, 0, 0); + const VERSION_1: SemverVersion = SemverVersion::new(0, 0, 1); + + let logctx = test_setup_log("test_multi_rot_version"); + + let mut plan_builder = + UpdatePlanBuilder::new("0.0.0".parse().unwrap(), &logctx.log) + .unwrap(); + + // The control plane artifact can be arbitrary bytes; just populate it + // with random data. + { + let kind = KnownArtifactKind::ControlPlane; + let data = make_random_bytes(); + let hash = ArtifactHash(Sha256::digest(&data).into()); + let id = ArtifactId { + name: format!("{kind:?}"), + version: VERSION_0, + kind: kind.into(), + }; + plan_builder + .add_artifact( + id, + hash, + futures::stream::iter([Ok(Bytes::from(data))]), + ) + .await + .unwrap(); + } + + // For each SP image, we'll insert two artifacts: these should end up in + // the update plan's SP image maps keyed by their "board". Normally the + // board is read from the archive itself via hubtools; we'll inject a + // test function that returns the artifact ID name as the board instead. + for (kind, boards) in [ + (KnownArtifactKind::GimletSp, ["test-gimlet-a", "test-gimlet-b"]), + (KnownArtifactKind::PscSp, ["test-psc-a", "test-psc-b"]), + (KnownArtifactKind::SwitchSp, ["test-switch-a", "test-switch-b"]), + ] { + for board in boards { + let data = make_fake_sp_image(board); + let hash = ArtifactHash(Sha256::digest(&data).into()); + let id = ArtifactId { + name: board.to_string(), + version: VERSION_0, + kind: kind.into(), + }; + plan_builder + .add_artifact( + id, + hash, + futures::stream::iter([Ok(Bytes::from(data))]), + ) + .await + .unwrap(); + } + } + + // The Host, Trampoline, and RoT artifacts must be structed the way we + // expect (i.e., .tar.gz's containing multiple inner artifacts). + let host = make_random_host_os_image(); + let trampoline = make_random_host_os_image(); + + for (kind, image) in [ + (KnownArtifactKind::Host, &host), + (KnownArtifactKind::Trampoline, &trampoline), + ] { + let data = &image.tarball; + let hash = ArtifactHash(Sha256::digest(data).into()); + let id = ArtifactId { + name: format!("{kind:?}"), + version: VERSION_0, + kind: kind.into(), + }; + plan_builder + .add_artifact( + id, + hash, + futures::stream::iter([Ok(data.clone())]), + ) + .await + .unwrap(); + } + + let gimlet_rot = make_random_rot_image("gimlet", "gimlet"); + let psc_rot = make_random_rot_image("psc", "psc"); + let sidecar_rot = make_random_rot_image("sidecar", "sidecar"); + + let gimlet_rot_2 = make_random_rot_image("gimlet2", "gimlet"); + let psc_rot_2 = make_random_rot_image("psc2", "psc"); + let sidecar_rot_2 = make_random_rot_image("sidecar2", "sidecar"); + + for (kind, artifact) in [ + (KnownArtifactKind::GimletRot, &gimlet_rot), + (KnownArtifactKind::PscRot, &psc_rot), + (KnownArtifactKind::SwitchRot, &sidecar_rot), + ] { + let data = &artifact.tarball; + let hash = ArtifactHash(Sha256::digest(data).into()); + let id = ArtifactId { + name: format!("{kind:?}"), + version: VERSION_0, + kind: kind.into(), + }; + plan_builder + .add_artifact( + id, + hash, + futures::stream::iter([Ok(data.clone())]), + ) + .await + .unwrap(); + } + + for (kind, artifact) in [ + (KnownArtifactKind::GimletRot, &gimlet_rot_2), + (KnownArtifactKind::PscRot, &psc_rot_2), + (KnownArtifactKind::SwitchRot, &sidecar_rot_2), + ] { + let data = &artifact.tarball; + let hash = ArtifactHash(Sha256::digest(data).into()); + let id = ArtifactId { + name: format!("{kind:?}"), + version: VERSION_1, + kind: kind.into(), + }; + plan_builder + .add_artifact( + id, + hash, + futures::stream::iter([Ok(data.clone())]), + ) + .await + .unwrap(); + } + + let UpdatePlanBuildOutput { plan, .. } = plan_builder.build().unwrap(); + + assert_eq!(plan.gimlet_rot_a.len(), 2); + assert_eq!(plan.gimlet_rot_b.len(), 2); + assert_eq!(plan.psc_rot_a.len(), 2); + assert_eq!(plan.psc_rot_b.len(), 2); + assert_eq!(plan.sidecar_rot_a.len(), 2); + assert_eq!(plan.sidecar_rot_b.len(), 2); + logctx.cleanup_successful(); + } + // See documentation for extract_nested_artifact_pair for why multi_thread // is required. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -1051,9 +1427,9 @@ mod tests { .unwrap(); } - let gimlet_rot = make_random_rot_image(); - let psc_rot = make_random_rot_image(); - let sidecar_rot = make_random_rot_image(); + let gimlet_rot = make_random_rot_image("gimlet", "gimlet"); + let psc_rot = make_random_rot_image("psc", "psc"); + let sidecar_rot = make_random_rot_image("sidecar", "sidecar"); for (kind, artifact) in [ (KnownArtifactKind::GimletRot, &gimlet_rot), diff --git a/update-common/src/errors.rs b/update-common/src/errors.rs index 0d65312c56c..3d57fb6ab56 100644 --- a/update-common/src/errors.rs +++ b/update-common/src/errors.rs @@ -140,6 +140,14 @@ pub enum RepositoryError { "duplicate hash entries found in artifacts.json for kind `{}`, hash `{}`", .0.kind, .0.hash )] DuplicateHashEntry(ArtifactHashId), + #[error("error creating reader stream")] + CreateReaderStream(#[source] anyhow::Error), + #[error("error reading extracted archive kind {}, hash {}", .artifact.kind, .artifact.hash)] + ReadExtractedArchive { + artifact: ArtifactHashId, + #[source] + error: std::io::Error, + }, } impl RepositoryError { @@ -153,7 +161,9 @@ impl RepositoryError { | RepositoryError::TempFileCreate(_) | RepositoryError::TempFileWrite(_) | RepositoryError::TempFileFlush(_) - | RepositoryError::NamedTempFileCreate { .. } => { + | RepositoryError::NamedTempFileCreate { .. } + | RepositoryError::ReadExtractedArchive { .. } + | RepositoryError::CreateReaderStream { .. } => { HttpError::for_unavail(None, message) } From 13b56f93e8941115b7eb8183bdb9ca282cc0b467 Mon Sep 17 00:00:00 2001 From: iliana etaoin Date: Mon, 20 May 2024 13:54:59 -0700 Subject: [PATCH 28/37] Lock file maintenance (#5786) --- Cargo.lock | 1396 +++++++++-------- bootstore/Cargo.toml | 2 +- internal-dns/tests/output/test-server.json | 2 +- openapi/bootstrap-agent.json | 11 +- openapi/dns-server.json | 2 +- openapi/gateway.json | 2 +- openapi/installinator-artifactd.json | 2 +- openapi/nexus-internal.json | 11 +- openapi/nexus.json | 3 +- openapi/oximeter.json | 2 +- openapi/sled-agent.json | 11 +- openapi/wicketd.json | 13 +- .../tests/output/self-stat-schema.json | 6 +- schema/all-zone-requests.json | 1 - schema/rss-sled-plan.json | 9 - workspace-hack/Cargo.toml | 126 +- 16 files changed, 786 insertions(+), 813 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f95db6c1d26..5cf0c5aa73c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -29,9 +29,9 @@ dependencies = [ [[package]] name = "aes" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if", "cipher", @@ -54,31 +54,31 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.8" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42cd52102d3df161c77a887b608d7a4897d7cc112886a9537b738a887a03aaff" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", - "getrandom 0.2.12", + "getrandom 0.2.14", "once_cell", "version_check", - "zerocopy 0.7.32", + "zerocopy 0.7.34", ] [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "android-tzdata" @@ -103,15 +103,16 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.11" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" +checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] @@ -123,27 +124,27 @@ checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" [[package]] name = "anstyle-parse" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" +checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -165,7 +166,7 @@ dependencies = [ "omicron-workspace-hack", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -179,9 +180,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "argon2" @@ -238,7 +239,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed72493ac66d5804837f480ab3766c72bdfab91a65e565fc54fa9e42db0073a8" dependencies = [ "anstyle", - "bstr 1.9.0", + "bstr 1.9.1", "doc-comment", "predicates", "predicates-core", @@ -272,7 +273,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -294,7 +295,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -305,23 +306,23 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "atomic-polyfill" -version = "0.1.11" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3ff7eb3f316534d83a8a2c3d1674ace8a5a71198eba31e2e2b597833f699b28" +checksum = "8cf2bce30dfe09ef0bfaef228b9d414faaf7e563035494d7fe092dba54b300f4" dependencies = [ "critical-section", ] [[package]] name = "atomic-waker" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "atomicwrites" @@ -358,14 +359,14 @@ dependencies = [ "quote", "serde", "serde_tokenstream 0.2.0", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "autocfg" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "backoff" @@ -374,7 +375,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" dependencies = [ "futures-core", - "getrandom 0.2.12", + "getrandom 0.2.14", "instant", "pin-project-lite", "rand 0.8.5", @@ -383,16 +384,16 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", "cfg-if", "libc", "miniz_oxide", - "object 0.32.1", + "object 0.32.2", "rustc-demangle", ] @@ -435,7 +436,7 @@ dependencies = [ "async-trait", "futures-channel", "futures-util", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "tokio", ] @@ -500,24 +501,24 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.69.2" +version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c69fae65a523209d34240b60abe0c42d33d1045d445c0839d8a4894a736e2d" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cexpr", "clang-sys", + "itertools 0.12.1", "lazy_static", "lazycell", "log", - "peeking_take_while", "prettyplease", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.60", + "syn 2.0.64", "which", ] @@ -550,9 +551,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" dependencies = [ "serde", ] @@ -600,26 +601,26 @@ dependencies = [ [[package]] name = "blake2b_simd" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c2f0dc9a68c6317d884f97cc36cf5a3d20ba14ce404227df55e1af708ab04bc" +checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" dependencies = [ "arrayref", "arrayvec", - "constant_time_eq 0.2.6", + "constant_time_eq", ] [[package]] name = "blake3" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" +checksum = "30cca6d3674597c30ddf2c587bf8d9d65c9a84d2326d941cc79c9842dfe0ef52" dependencies = [ "arrayref", "arrayvec", "cc", "cfg-if", - "constant_time_eq 0.3.0", + "constant_time_eq", "memmap2", "rayon", ] @@ -718,12 +719,12 @@ dependencies = [ [[package]] name = "bstr" -version = "1.9.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c48f0051a4b4c5e0b6d365cd04af53aeaa209e3cc15ec2cdb69e73cc87fbd0dc" +checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" dependencies = [ "memchr", - "regex-automata 0.4.5", + "regex-automata 0.4.6", "serde", ] @@ -739,15 +740,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.13.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytecount" -version = "0.6.3" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c" +checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce" [[package]] name = "byteorder" @@ -819,9 +820,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.3" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cfa25e60aea747ec7e1124f238816749faa93759c6ff5b31f1ccdda137f4479" +checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" dependencies = [ "serde", ] @@ -847,7 +848,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8cb1d556b8b8f36e5ca74938008be3ac102f5dcb5b68a0477e4249ae2291cd3" dependencies = [ "serde", - "toml 0.8.12", + "toml 0.8.13", ] [[package]] @@ -882,12 +883,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.83" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" -dependencies = [ - "libc", -] +checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4" [[package]] name = "cexpr" @@ -900,11 +898,11 @@ dependencies = [ [[package]] name = "cfg-expr" -version = "0.15.6" +version = "0.15.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6100bc57b6209840798d95cb2775684849d332f7bd788db2a8c8caf7ef82a41a" +checksum = "d067ad48b8650848b989a59a86c6c36a995d02d2bf778d45c3c5d57bc2718f02" dependencies = [ - "smallvec 1.13.1", + "smallvec 1.13.2", "target-lexicon", ] @@ -956,7 +954,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.0", + "windows-targets 0.52.5", ] [[package]] @@ -999,9 +997,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" +checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" dependencies = [ "glob", "libc", @@ -1027,7 +1025,7 @@ dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.11.0", + "strsim", "terminal_size", ] @@ -1040,7 +1038,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -1051,9 +1049,9 @@ checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "clipboard-win" -version = "5.0.0" +version = "5.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c57002a5d9be777c1ef967e33674dac9ebd310d8893e4e3437b14d5f0f6372cc" +checksum = "79f4473f5144e20d9aceaf2972478f06ddf687831eafeeb434fbaf0acc4144ad" dependencies = [ "error-code", ] @@ -1066,9 +1064,9 @@ checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" [[package]] name = "colored" @@ -1132,12 +1130,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "constant_time_eq" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a53c0a4d288377e7415b53dcfc3c04da5cdc2cc95c8d5ac178b58f0b861ad6" - [[package]] name = "constant_time_eq" version = "0.3.0" @@ -1190,9 +1182,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -1200,9 +1192,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "corncobs" @@ -1233,30 +1225,30 @@ dependencies = [ [[package]] name = "crc" -version = "3.0.1" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" +checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" dependencies = [ "crc-catalog", ] [[package]] name = "crc-any" -version = "2.4.3" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774646b687f63643eb0f4bf13dc263cb581c8c9e57973b6ddf78bda3994d88df" +checksum = "a62ec9ff5f7965e4d7280bd5482acd20aadb50d632cf6c1d74493856b011fa73" [[package]] name = "crc-catalog" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32fast" -version = "1.3.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" dependencies = [ "cfg-if", ] @@ -1319,21 +1311,19 @@ checksum = "7059fff8937831a9ae6f0fe4d658ffabf58f2ca96aa9dec1c889f936f705f216" [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" dependencies = [ - "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] @@ -1359,12 +1349,12 @@ version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "crossterm_winapi", "futures-core", "libc", "mio", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "serde", "signal-hook", "signal-hook-mio", @@ -1439,9 +1429,9 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.5.2" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", "rand_core 0.6.4", @@ -1520,20 +1510,20 @@ dependencies = [ [[package]] name = "curve25519-dalek-derive" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "darling" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391" +checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" dependencies = [ "darling_core", "darling_macro", @@ -1541,34 +1531,34 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f" +checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", - "syn 2.0.60", + "strsim", + "syn 2.0.64", ] [[package]] name = "darling_macro" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" +checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" dependencies = [ "darling_core", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "data-encoding" -version = "2.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] name = "datatest-stable" @@ -1595,7 +1585,7 @@ dependencies = [ "quote", "serde", "serde_tokenstream 0.2.0", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -1620,9 +1610,9 @@ checksum = "ffe7ed1d93f4553003e20b629abe9085e1e81b1429520f897f8f8860bc6dfc21" [[package]] name = "defmt" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a2d011b2fee29fb7d659b83c43fce9a2cb4df453e16d441a51448e448f3f98" +checksum = "a99dd22262668b887121d4672af5a64b238f026099f1a2a1b322066c9ecfe9e0" dependencies = [ "bitflags 1.3.2", "defmt-macros", @@ -1630,31 +1620,31 @@ dependencies = [ [[package]] name = "defmt-macros" -version = "0.3.6" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54f0216f6c5acb5ae1a47050a6645024e6edafc2ee32d421955eccfef12ef92e" +checksum = "e3a9f309eff1f79b3ebdf252954d90ae440599c26c2c553fe87a2d17195f2dcb" dependencies = [ "defmt-parser", "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "defmt-parser" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "269924c02afd7f94bc4cecbfa5c379f6ffcf9766b3408fe63d22c728654eccd0" +checksum = "ff4a5fefe330e8d7f31b16a318f9ce81000d8e35e69b93eae154d16d2278f70f" dependencies = [ "thiserror", ] [[package]] name = "der" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", "der_derive", @@ -1671,7 +1661,7 @@ checksum = "5fe87ce4529967e0ba1dcf8450bab64d97dfd5010a6256187ffe2e43e6f0e049" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -1692,7 +1682,7 @@ checksum = "62d671cc41a825ebabc75757b62d3d168c577f9149b2d49ece1dad1f72119d25" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -1713,7 +1703,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -1723,7 +1713,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "206868b8242f27cecce124c19fd88157fbd0dd334df2587f36417bafbc85097b" dependencies = [ "derive_builder_core", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -1747,7 +1737,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -1777,7 +1767,7 @@ version = "2.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff236accb9a5069572099f0b350a92e9560e8e63a9b8d546162f4a5e03026bb2" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "byteorder", "chrono", "diesel_derives", @@ -1804,14 +1794,14 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.1.2" +version = "2.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef8337737574f55a468005a83499da720f20c65586241ffea339db9ecdfd2b44" +checksum = "14701062d6bed917b5c7103bdffaee1e4609279e240488ad24e7bd979ca6866c" dependencies = [ "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -1820,7 +1810,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" dependencies = [ - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -1947,7 +1937,7 @@ dependencies = [ "tempfile", "thiserror", "tokio", - "toml 0.8.12", + "toml 0.8.13", "trust-dns-client", "trust-dns-proto", "trust-dns-resolver", @@ -1984,7 +1974,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e6b21a1211455e82b1245d6e1b024f30606afbb734c114515d40d0e0b34ce81" dependencies = [ "thiserror", - "zerocopy 0.3.0", + "zerocopy 0.3.2", ] [[package]] @@ -1998,7 +1988,7 @@ dependencies = [ "serde", "serde_json", "thiserror", - "zerocopy 0.7.32", + "zerocopy 0.7.34", ] [[package]] @@ -2029,14 +2019,14 @@ dependencies = [ "serde", "serde_json", "slog", - "toml 0.8.12", + "toml 0.8.13", "uuid", ] [[package]] name = "dropshot" -version = "0.10.1-dev" -source = "git+https://github.com/oxidecomputer/dropshot?branch=main#283d8978abfcfa9b720a155b18cc124c82cb29c9" +version = "0.10.2-dev" +source = "git+https://github.com/oxidecomputer/dropshot?branch=main#2fdf37183d2fac385e0f66f48903bc567f2e8e26" dependencies = [ "async-stream", "async-trait", @@ -2072,7 +2062,7 @@ dependencies = [ "slog-term", "tokio", "tokio-rustls 0.25.0", - "toml 0.8.12", + "toml 0.8.13", "usdt 0.5.0", "uuid", "version_check", @@ -2081,14 +2071,14 @@ dependencies = [ [[package]] name = "dropshot_endpoint" -version = "0.10.1-dev" -source = "git+https://github.com/oxidecomputer/dropshot?branch=main#283d8978abfcfa9b720a155b18cc124c82cb29c9" +version = "0.10.2-dev" +source = "git+https://github.com/oxidecomputer/dropshot?branch=main#2fdf37183d2fac385e0f66f48903bc567f2e8e26" dependencies = [ "proc-macro2", "quote", "serde", "serde_tokenstream 0.2.0", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -2129,7 +2119,7 @@ dependencies = [ "digest", "elliptic-curve", "rfc6979", - "signature 2.1.0", + "signature 2.2.0", "spki", ] @@ -2144,33 +2134,34 @@ dependencies = [ [[package]] name = "ed25519" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ "pkcs8", - "signature 2.1.0", + "signature 2.2.0", ] [[package]] name = "ed25519-dalek" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" +checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", - "ed25519 2.2.2", + "ed25519 2.2.3", "rand_core 0.6.4", "serde", "sha2", + "subtle", "zeroize", ] [[package]] name = "either" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" +checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" [[package]] name = "elliptic-curve" @@ -2201,9 +2192,9 @@ checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" [[package]] name = "ena" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c533630cf40e9caa44bd91aadc88a75d75a4c3a12b4cfde353cbed41daa1e1f1" +checksum = "3d248bdd43ce613d87415282f69b9bb99d947d290b10962dd6c56233312c2ad5" dependencies = [ "log", ] @@ -2216,9 +2207,9 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ "cfg-if", ] @@ -2251,9 +2242,9 @@ dependencies = [ "russh-keys", "serde", "serde_json", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", - "toml 0.8.12", + "toml 0.8.13", "trust-dns-resolver", "uuid", ] @@ -2291,9 +2282,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.10.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" +checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" dependencies = [ "is-terminal", "log", @@ -2317,9 +2308,9 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", @@ -2327,9 +2318,9 @@ dependencies = [ [[package]] name = "error-code" -version = "3.0.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "281e452d3bad4005426416cdba5ccfd4f5c1280e10099e21db27f7c1c28347fc" +checksum = "a0474425d51df81997e2f90a21591180b38eccf27292d755f3e30750225c175b" [[package]] name = "escape8259" @@ -2364,15 +2355,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "531e46835a22af56d1e3b66f04844bed63158bc094a628bec1d321d9b4c44bf2" dependencies = [ "bit-set", - "regex-automata 0.4.5", - "regex-syntax 0.8.2", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", ] [[package]] name = "fastrand" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "fatfs" @@ -2409,9 +2400,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.1" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "filetime" @@ -2425,6 +2416,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "finl_unicode" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" + [[package]] name = "fixedbitset" version = "0.4.2" @@ -2433,9 +2430,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flagset" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda653ca797810c02f7ca4b804b40b8b95ae046eb989d356bce17919a8c25499" +checksum = "cdeb3aa5e95cf9aabc17f060cfa0ced7b83f042390760ca53bf09df9968acaa1" [[package]] name = "flate2" @@ -2501,7 +2498,7 @@ checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -2613,7 +2610,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -2630,9 +2627,9 @@ checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-timer" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" @@ -2713,7 +2710,7 @@ dependencies = [ "serde_repr", "smoltcp 0.9.1", "static_assertions", - "strum_macros 0.25.2", + "strum_macros 0.25.3", "uuid", "zerocopy 0.6.6", ] @@ -2738,7 +2735,7 @@ dependencies = [ "serde", "serde-big-array 0.5.1", "slog", - "socket2 0.5.6", + "socket2 0.5.7", "string_cache", "thiserror", "tlvc 0.3.1 (git+https://github.com/oxidecomputer/tlvc.git?branch=main)", @@ -2808,9 +2805,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if", "js-sys", @@ -2821,9 +2818,9 @@ dependencies = [ [[package]] name = "ghash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" dependencies = [ "opaque-debug", "polyval", @@ -2831,9 +2828,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "glob" @@ -2843,22 +2840,22 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "globset" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759c97c1e17c55525b57192c06a267cda0ac5210b222d6b82189a2338fa1c13d" +checksum = "57da3b9b5b85bd66f31093f8c408b90a74431672542466497dcbdfdc02034be1" dependencies = [ "aho-corasick", - "bstr 1.9.0", - "fnv", + "bstr 1.9.1", "log", - "regex", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", ] [[package]] name = "goblin" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb07a4ffed2093b118a525b1d8f5204ae274faed5604537caf7135d0f18d9887" +checksum = "1b363a30c165f666402fe6a3024d3bec7ebc898f96a4a23bd1c99f8dbf3f4f47" dependencies = [ "log", "plain", @@ -2898,7 +2895,7 @@ dependencies = [ "semver 1.0.23", "serde", "serde_json", - "smallvec 1.13.1", + "smallvec 1.13.2", "static_assertions", "target-spec", ] @@ -2930,9 +2927,9 @@ dependencies = [ [[package]] name = "half" -version = "2.3.1" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc52e53916c08643f1b56ec082790d1e86a32e58dc5268f897f313fbae7b4872" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" dependencies = [ "cfg-if", "crunchy", @@ -2973,9 +2970,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", @@ -3007,9 +3004,9 @@ dependencies = [ [[package]] name = "heapless" -version = "0.7.16" +version = "0.7.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db04bc24a18b9ea980628ecf00e6c0264f3c1426dac36c00cb49b6fbad8b0743" +checksum = "cdc6457c0eb62c71aac4bc17216026d8410337c4126773b9c5daba343f17964f" dependencies = [ "atomic-polyfill", "hash32 0.2.1", @@ -3060,9 +3057,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.2" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -3105,11 +3102,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.5" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -3131,7 +3128,7 @@ checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" dependencies = [ "cfg-if", "libc", - "windows 0.52.0", + "windows", ] [[package]] @@ -3147,9 +3144,9 @@ dependencies = [ [[package]] name = "http" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -3158,9 +3155,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", "http 0.2.12", @@ -3174,7 +3171,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" dependencies = [ "bytes", - "http 1.0.0", + "http 1.1.0", ] [[package]] @@ -3220,7 +3217,7 @@ dependencies = [ [[package]] name = "hubpack" version = "0.1.0" -source = "git+https://github.com/cbiffle/hubpack?rev=df08cc3a6e1f97381cd0472ae348e310f0119e25#df08cc3a6e1f97381cd0472ae348e310f0119e25" +source = "git+https://github.com/cbiffle/hubpack.git?rev=df08cc3a6e1f97381cd0472ae348e310f0119e25#df08cc3a6e1f97381cd0472ae348e310f0119e25" dependencies = [ "hubpack_derive 0.1.0", "serde", @@ -3239,7 +3236,7 @@ dependencies = [ [[package]] name = "hubpack_derive" version = "0.1.0" -source = "git+https://github.com/cbiffle/hubpack?rev=df08cc3a6e1f97381cd0472ae348e310f0119e25#df08cc3a6e1f97381cd0472ae348e310f0119e25" +source = "git+https://github.com/cbiffle/hubpack.git?rev=df08cc3a6e1f97381cd0472ae348e310f0119e25#df08cc3a6e1f97381cd0472ae348e310f0119e25" dependencies = [ "proc-macro2", "quote", @@ -3269,7 +3266,7 @@ dependencies = [ "path-slash", "rsa", "thiserror", - "tlvc 0.3.1 (git+https://github.com/oxidecomputer/tlvc.git)", + "tlvc 0.3.1 (git+https://github.com/oxidecomputer/tlvc)", "tlvc-text", "toml 0.7.8", "x509-cert", @@ -3295,12 +3292,12 @@ dependencies = [ "futures-util", "h2", "http 0.2.12", - "http-body 0.4.5", + "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -3309,18 +3306,19 @@ dependencies = [ [[package]] name = "hyper" -version = "1.1.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5aa53871fc917b1a9ed87b683a5d86db645e23acb32c2e0785a353e522fb75" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "httparse", "itoa", "pin-project-lite", + "smallvec 1.13.2", "tokio", "want", ] @@ -3334,7 +3332,7 @@ dependencies = [ "futures-util", "http 0.2.12", "hyper 0.14.28", - "rustls 0.21.9", + "rustls 0.21.12", "tokio", "tokio-rustls 0.24.1", ] @@ -3346,8 +3344,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" dependencies = [ "futures-util", - "http 1.0.0", - "hyper 1.1.0", + "http 1.1.0", + "hyper 1.3.1", "hyper-util", "log", "rustls 0.22.4", @@ -3392,18 +3390,18 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdea9aac0dbe5a9240d68cfd9501e2db94222c6dc06843e06640b9e07f0fdc67" +checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", - "hyper 1.1.0", + "hyper 1.3.1", "pin-project-lite", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tower", "tower-service", @@ -3412,16 +3410,16 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.57" +version = "0.1.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows 0.48.0", + "windows-core", ] [[package]] @@ -3516,7 +3514,7 @@ dependencies = [ "smf", "thiserror", "tokio", - "toml 0.8.12", + "toml 0.8.13", "uuid", "whoami", "zone 0.3.0", @@ -3551,7 +3549,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "serde", ] @@ -3577,9 +3575,9 @@ checksum = "bfa799dd5ed20a7e349f3b4639aa80d74549c81716d9ec4f994c9b5815598306" [[package]] name = "indoc" -version = "2.0.3" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c785eefb63ebd0e33416dfcb8d6da0bf27ce752843a45632a67bf10d4d4b5c4" +checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" [[package]] name = "inout" @@ -3782,7 +3780,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.6", + "socket2 0.5.7", "widestring", "windows-sys 0.48.0", "winreg", @@ -3806,13 +3804,13 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.9" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" dependencies = [ - "hermit-abi 0.3.2", - "rustix", - "windows-sys 0.48.0", + "hermit-abi 0.3.9", + "libc", + "windows-sys 0.52.0", ] [[package]] @@ -3821,6 +3819,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7655c9839580ee829dfacba1d1278c2b7883e50a277ff7541299489d6bdfdc45" +[[package]] +name = "is_terminal_polyfill" +version = "1.70.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" + [[package]] name = "ispf" version = "0.1.0" @@ -3849,24 +3853,24 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] [[package]] name = "keccak" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" dependencies = [ "cpufeatures", ] @@ -3893,7 +3897,7 @@ version = "0.1.0" source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" dependencies = [ "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -4014,24 +4018,24 @@ dependencies = [ [[package]] name = "libloading" -version = "0.7.4" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "winapi", + "windows-targets 0.52.5", ] [[package]] name = "libm" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libnet" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/netadm-sys?branch=main#d44d9e084f39e844f8083d4d9b39a331061ebbcc" +source = "git+https://github.com/oxidecomputer/netadm-sys?branch=main#4ceaf96e02acb8258ea4aa403326c08932324835" dependencies = [ "anyhow", "cfg-if", @@ -4042,7 +4046,7 @@ dependencies = [ "nvpair", "nvpair-sys", "rusty-doors", - "socket2 0.4.9", + "socket2 0.4.10", "thiserror", "tracing", ] @@ -4050,7 +4054,7 @@ dependencies = [ [[package]] name = "libnet" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/netadm-sys#f114bd0d543d886cd453932e9f0967de57289bc2" +source = "git+https://github.com/oxidecomputer/netadm-sys#4ceaf96e02acb8258ea4aa403326c08932324835" dependencies = [ "anyhow", "cfg-if", @@ -4061,7 +4065,7 @@ dependencies = [ "nvpair", "nvpair-sys", "rusty-doors", - "socket2 0.4.9", + "socket2 0.4.10", "thiserror", "tracing", ] @@ -4080,6 +4084,16 @@ name = "libnvme-sys" version = "0.0.0" source = "git+https://github.com/oxidecomputer/libnvme?rev=6fffcc81d2c423ed2d2e6c5c2827485554c4ecbe#6fffcc81d2c423ed2d2e6c5c2827485554c4ecbe" +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.5.0", + "libc", +] + [[package]] name = "libsw" version = "3.3.1" @@ -4091,9 +4105,9 @@ dependencies = [ [[package]] name = "libtest-mimic" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fefdf21230d6143476a28adbee3d930e2b68a3d56443c777cae3fe9340eebff9" +checksum = "cc0bda45ed5b3a2904262c1bb91e526127aa70e7ef3758aba2ef93cf896b9b58" dependencies = [ "clap", "escape8259", @@ -4132,9 +4146,9 @@ checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "lock_api" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -4166,7 +4180,7 @@ dependencies = [ "const-oid", "crc-any", "der", - "env_logger 0.10.0", + "env_logger 0.10.2", "hex", "log", "lpc55_areas", @@ -4184,11 +4198,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7" +checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -4241,10 +4255,11 @@ checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "md-5" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ + "cfg-if", "digest", ] @@ -4256,9 +4271,9 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "memmap" @@ -4272,9 +4287,9 @@ dependencies = [ [[package]] name = "memmap2" -version = "0.7.1" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f49388d20533534cd19360ad3d6a7dadc885944aa802ba3995040c5ec11288c6" +checksum = "fe751422e4a8caa417e13c3ea66452215d7d63e19e604f4980461212f3ae1322" dependencies = [ "libc", ] @@ -4328,9 +4343,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" dependencies = [ "adler", ] @@ -4371,7 +4386,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -4383,7 +4398,7 @@ dependencies = [ "bytes", "encoding_rs", "futures-util", - "http 1.0.0", + "http 1.1.0", "httparse", "memchr", "mime", @@ -4406,7 +4421,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ - "getrandom 0.2.12", + "getrandom 0.2.14", ] [[package]] @@ -4435,9 +4450,9 @@ checksum = "ca2b420f638f07fe83056b55ea190bb815f609ec5a35e7017884a10f78839c9e" [[package]] name = "new_debug_unreachable" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54" +checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" [[package]] name = "newline-converter" @@ -4506,7 +4521,7 @@ dependencies = [ "serde_json", "serde_with", "tokio-postgres", - "toml 0.8.12", + "toml 0.8.13", "uuid", ] @@ -4686,7 +4701,7 @@ dependencies = [ "omicron-workspace-hack", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -4880,7 +4895,7 @@ version = "0.1.0" dependencies = [ "omicron-workspace-hack", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -4927,7 +4942,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43" dependencies = [ - "smallvec 1.13.1", + "smallvec 1.13.2", ] [[package]] @@ -4949,7 +4964,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cfg-if", "cfg_aliases", "libc", @@ -5001,11 +5016,10 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" dependencies = [ - "autocfg", "num-integer", "num-traits", "rand 0.8.5", @@ -5025,7 +5039,7 @@ dependencies = [ "num-traits", "rand 0.8.5", "serde", - "smallvec 1.13.1", + "smallvec 1.13.2", "zeroize", ] @@ -5046,13 +5060,13 @@ checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" [[package]] name = "num-derive" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e6a0fd4f737c707bd9086cc16c925f294943eb62eb71499e9fd4cf71f8b9f4e" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -5101,7 +5115,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.9", "libc", ] @@ -5128,9 +5142,9 @@ dependencies = [ [[package]] name = "num_threads" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" dependencies = [ "libc", ] @@ -5170,9 +5184,9 @@ dependencies = [ [[package]] name = "object" -version = "0.32.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] @@ -5246,7 +5260,7 @@ dependencies = [ "test-strategy", "thiserror", "tokio", - "toml 0.8.12", + "toml 0.8.13", "uuid", ] @@ -5271,7 +5285,7 @@ dependencies = [ "slog", "thiserror", "tokio", - "toml 0.8.12", + "toml 0.8.13", "uuid", ] @@ -5306,7 +5320,7 @@ dependencies = [ "subprocess", "tokio", "tokio-postgres", - "toml 0.8.12", + "toml 0.8.13", ] [[package]] @@ -5348,7 +5362,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-tungstenite 0.20.1", - "toml 0.8.12", + "toml 0.8.13", "uuid", ] @@ -5566,7 +5580,7 @@ dependencies = [ "tar", "thiserror", "tokio", - "toml 0.8.12", + "toml 0.8.13", "walkdir", ] @@ -5612,7 +5626,7 @@ dependencies = [ "slog-term", "tar", "tokio", - "toml 0.8.12", + "toml 0.8.13", "tufaceous-lib", ] @@ -5714,7 +5728,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "toml 0.8.12", + "toml 0.8.13", "usdt 0.5.0", "uuid", "zeroize", @@ -5779,9 +5793,8 @@ dependencies = [ "bit-set", "bit-vec", "bitflags 1.3.2", - "bitflags 2.4.2", - "bstr 0.2.17", - "bstr 1.9.0", + "bitflags 2.5.0", + "bstr 1.9.1", "byteorder", "bytes", "chrono", @@ -5812,9 +5825,9 @@ dependencies = [ "futures-util", "gateway-messages", "generic-array", - "getrandom 0.2.12", + "getrandom 0.2.14", "group", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "hex", "hmac", "hyper 0.14.28", @@ -5822,9 +5835,11 @@ dependencies = [ "inout", "ipnetwork", "itertools 0.10.5", + "itertools 0.12.1", "lalrpop-util", "lazy_static", "libc", + "linux-raw-sys", "log", "managed", "memchr", @@ -5843,8 +5858,8 @@ dependencies = [ "predicates", "proc-macro2", "regex", - "regex-automata 0.4.5", - "regex-syntax 0.8.2", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", "reqwest", "ring 0.17.8", "rustix", @@ -5856,11 +5871,12 @@ dependencies = [ "sha2", "similar", "slog", + "smallvec 1.13.2", "spin 0.9.8", "string_cache", "subtle", "syn 1.0.109", - "syn 2.0.60", + "syn 2.0.64", "time", "time-macros", "tokio", @@ -5870,7 +5886,7 @@ dependencies = [ "toml 0.7.8", "toml_datetime", "toml_edit 0.19.15", - "toml_edit 0.22.12", + "toml_edit 0.22.13", "tracing", "trust-dns-proto", "unicode-bidi", @@ -5879,7 +5895,7 @@ dependencies = [ "usdt-impl 0.5.0", "uuid", "yasna", - "zerocopy 0.7.32", + "zerocopy 0.7.34", "zeroize", ] @@ -5929,9 +5945,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "opaque-debug" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openapi-lint" @@ -5962,7 +5978,7 @@ version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cfg-if", "foreign-types 0.3.2", "libc", @@ -5979,7 +5995,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -6116,7 +6132,7 @@ dependencies = [ "serde", "smoltcp 0.11.0", "tabwriter", - "zerocopy 0.7.32", + "zerocopy 0.7.34", ] [[package]] @@ -6192,7 +6208,7 @@ dependencies = [ "subprocess", "thiserror", "tokio", - "toml 0.8.12", + "toml 0.8.13", "uuid", ] @@ -6271,7 +6287,7 @@ dependencies = [ "omicron-workspace-hack", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -6384,12 +6400,12 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" dependencies = [ "lock_api", - "parking_lot_core 0.9.8", + "parking_lot_core 0.9.10", ] [[package]] @@ -6402,21 +6418,21 @@ dependencies = [ "instant", "libc", "redox_syscall 0.2.16", - "smallvec 1.13.1", + "smallvec 1.13.2", "winapi", ] [[package]] name = "parking_lot_core" -version = "0.9.8" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.3.5", - "smallvec 1.13.1", - "windows-targets 0.48.5", + "redox_syscall 0.5.1", + "smallvec 1.13.2", + "windows-targets 0.52.5", ] [[package]] @@ -6427,7 +6443,7 @@ checksum = "06af5f9333eb47bd9ba8462d612e37a8328a5cb80b13f0af4de4c3b89f52dee5" dependencies = [ "parse-display-derive", "regex", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", ] [[package]] @@ -6439,9 +6455,9 @@ dependencies = [ "proc-macro2", "quote", "regex", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", "structmeta 0.3.0", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -6526,12 +6542,6 @@ dependencies = [ "digest", ] -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "peg" version = "0.8.3" @@ -6586,9 +6596,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.6" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f200d8d83c44a45b21764d1916299752ca035d15ecd46faca3e9a2a2bf6ad06" +checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" dependencies = [ "memchr", "thiserror", @@ -6597,9 +6607,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.6" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcd6ab1236bbdb3a49027e920e693192ebfe8913f6d60e294de57463a493cfde" +checksum = "26293c9193fbca7b1a3bf9b79dc1e388e927e6cacaa78b4a3ab705a1d3d41459" dependencies = [ "pest", "pest_generator", @@ -6607,22 +6617,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.6" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a31940305ffc96863a735bef7c7994a00b325a7138fdbc5bda0f1a0476d3275" +checksum = "3ec22af7d3fb470a85dd2ca96b7c577a1eb4ef6f1683a9fe9a8c16e136c04687" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "pest_meta" -version = "2.7.6" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7ff62f5259e53b78d1af898941cdcdccfae7385cf7d793a6e55de5d05bb4b7d" +checksum = "d7a240022f37c361ec1878d646fc5b7d7c4d28d5946e1a80ad5a7a4f4ca0bdcd" dependencies = [ "once_cell", "pest", @@ -6670,29 +6680,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -6723,9 +6733,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "plain" @@ -6735,9 +6745,9 @@ checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" [[package]] name = "platforms" -version = "3.0.2" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d7ddaed09e0eb771a79ab0fd64609ba0afb0a8366421957936ad14cbd13630" +checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" [[package]] name = "plotters" @@ -6796,9 +6806,9 @@ dependencies = [ [[package]] name = "polyval" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ "cfg-if", "cpufeatures", @@ -6813,9 +6823,9 @@ source = "git+https://github.com/oxidecomputer/poptrie?branch=multipath#ca52bef3 [[package]] name = "portable-atomic" -version = "1.4.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31114a898e107c51bb1609ffaf55a0e011cf6a4d7f1170d0015a165082c0338b" +checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" [[package]] name = "portpicker" @@ -6955,7 +6965,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -7012,8 +7022,8 @@ dependencies = [ [[package]] name = "progenitor" -version = "0.6.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#0aeb3a723ec515561a93ab38ae4da5cbf071306f" +version = "0.7.0" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#c59c6d64ed2a206bbbc9949abd3457bc0e3810e2" dependencies = [ "progenitor-client", "progenitor-impl", @@ -7023,8 +7033,8 @@ dependencies = [ [[package]] name = "progenitor-client" -version = "0.6.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#0aeb3a723ec515561a93ab38ae4da5cbf071306f" +version = "0.7.0" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#c59c6d64ed2a206bbbc9949abd3457bc0e3810e2" dependencies = [ "bytes", "futures-core", @@ -7037,8 +7047,8 @@ dependencies = [ [[package]] name = "progenitor-impl" -version = "0.6.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#0aeb3a723ec515561a93ab38ae4da5cbf071306f" +version = "0.7.0" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#c59c6d64ed2a206bbbc9949abd3457bc0e3810e2" dependencies = [ "getopts", "heck 0.5.0", @@ -7051,7 +7061,7 @@ dependencies = [ "schemars", "serde", "serde_json", - "syn 2.0.60", + "syn 2.0.64", "thiserror", "typify", "unicode-ident", @@ -7059,8 +7069,8 @@ dependencies = [ [[package]] name = "progenitor-macro" -version = "0.6.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#0aeb3a723ec515561a93ab38ae4da5cbf071306f" +version = "0.7.0" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#c59c6d64ed2a206bbbc9949abd3457bc0e3810e2" dependencies = [ "openapiv3", "proc-macro2", @@ -7071,7 +7081,7 @@ dependencies = [ "serde_json", "serde_tokenstream 0.2.0", "serde_yaml", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -7081,7 +7091,7 @@ source = "git+https://github.com/oxidecomputer/propolis?rev=6dceb9ef69c217cb78a2 dependencies = [ "anyhow", "bhyve_api 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=6dceb9ef69c217cb78a2018bbedafbc19f6ec1af)", - "bitflags 2.4.2", + "bitflags 2.5.0", "bitstruct", "byteorder", "dladm", @@ -7214,13 +7224,13 @@ checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.2", + "bitflags 2.5.0", "lazy_static", "num-traits", "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", "rusty-fork", "tempfile", "unarray", @@ -7274,7 +7284,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" dependencies = [ "log", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "scheduled-thread-pool", ] @@ -7353,7 +7363,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.12", + "getrandom 0.2.14", ] [[package]] @@ -7389,11 +7399,11 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a564a852040e82671dc50a37d88f3aa83bbc690dfc6844cfe7a2591620206a80" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cassowary", "compact_str", "crossterm", - "indoc 2.0.3", + "indoc 2.0.5", "itertools 0.12.1", "lru", "paste", @@ -7487,30 +7497,30 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.3.5" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", ] [[package]] name = "redox_users" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ - "getrandom 0.2.12", - "redox_syscall 0.2.16", + "getrandom 0.2.14", + "libredox", "thiserror", ] @@ -7528,7 +7538,7 @@ dependencies = [ "serde", "strip-ansi-escapes", "strum", - "strum_macros 0.26.1", + "strum_macros 0.26.2", "thiserror", "unicode-segmentation", "unicode-width", @@ -7551,7 +7561,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -7562,8 +7572,8 @@ checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.5", - "regex-syntax 0.8.2", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", ] [[package]] @@ -7574,13 +7584,13 @@ checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" [[package]] name = "regex-automata" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", ] [[package]] @@ -7591,9 +7601,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "regress" @@ -7601,15 +7611,15 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eae2a1ebfecc58aff952ef8ccd364329abe627762f5bf09ff42eb9d98522479" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", "memchr", ] [[package]] name = "relative-path" -version = "1.9.0" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c707298afce11da2efef2f600116fa93ffa7a032b5d7b628aa17711ec81383ca" +checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" [[package]] name = "reqwest" @@ -7626,7 +7636,7 @@ dependencies = [ "futures-util", "h2", "http 0.2.12", - "http-body 0.4.5", + "http-body 0.4.6", "hyper 0.14.28", "hyper-rustls 0.24.2", "hyper-tls", @@ -7638,8 +7648,8 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.9", - "rustls-pemfile 1.0.3", + "rustls 0.21.12", + "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", @@ -7717,7 +7727,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.12", + "getrandom 0.2.14", "libc", "spin 0.9.8", "untrusted 0.9.0", @@ -7742,7 +7752,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" dependencies = [ "base64 0.21.7", - "bitflags 2.4.2", + "bitflags 2.5.0", "serde", "serde_derive", ] @@ -7760,23 +7770,21 @@ dependencies = [ [[package]] name = "rsa" -version = "0.9.2" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ab43bb47d23c1a631b4b680199a45255dce26fa9ab2fa902581f624ff13e6a8" +checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc" dependencies = [ - "byteorder", "const-oid", "digest", "num-bigint-dig", "num-integer", - "num-iter", "num-traits", "pkcs1", "pkcs8", "rand_core 0.6.4", "serde", "sha2", - "signature 2.1.0", + "signature 2.2.0", "spki", "subtle", "zeroize", @@ -7807,18 +7815,18 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.0", - "syn 2.0.60", + "syn 2.0.64", "unicode-ident", ] [[package]] name = "rtoolbox" -version = "0.0.1" +version = "0.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "034e22c514f5c0cb8a10ff341b9b048b5ceb21591f31c8f44c43b960f9b3524a" +checksum = "c247d24e63230cdb56463ae328478bd5eac8b8faa8c69461a77e8e323afac90e" dependencies = [ "libc", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -7830,7 +7838,7 @@ dependencies = [ "aes", "aes-gcm", "async-trait", - "bitflags 2.4.2", + "bitflags 2.5.0", "byteorder", "chacha20", "ctr", @@ -7858,9 +7866,9 @@ dependencies = [ [[package]] name = "russh-cryptovec" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fdf036c2216b554053d19d4af45c1722d13b00ac494ea19825daf4beac034e" +checksum = "2b077b6dd8d8c085dac62f7fcc5a83df60c7f7a22d49bfba994f2f4dbf60bc74" dependencies = [ "libc", "winapi", @@ -7913,14 +7921,14 @@ checksum = "9d9848531d60c9cbbcf9d166c885316c24bc0e2a9d3eba0956bb6cbbd79bc6e8" dependencies = [ "base64 0.21.7", "blake2b_simd", - "constant_time_eq 0.3.0", + "constant_time_eq", ] [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" @@ -7955,17 +7963,17 @@ dependencies = [ "serde", "tempfile", "thiserror", - "toml 0.8.12", + "toml 0.8.13", "toolchain_find", ] [[package]] name = "rustix" -version = "0.38.31" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "errno", "libc", "linux-raw-sys", @@ -7974,9 +7982,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.9" +version = "0.21.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", "ring 0.17.8", @@ -7993,7 +8001,7 @@ dependencies = [ "log", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.1", + "rustls-webpki 0.102.4", "subtle", "zeroize", ] @@ -8013,9 +8021,9 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ "base64 0.21.7", ] @@ -8032,9 +8040,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.3.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048a63e5b3ac996d78d402940b5fa47973d2d080c6c6fffa1d0f19c4445310b7" +checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" [[package]] name = "rustls-webpki" @@ -8048,9 +8056,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.1" +version = "0.102.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef4ca26037c909dedb327b48c3327d0ba91d3dd3c4e05dad328f210ffb68e95b" +checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -8059,9 +8067,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] name = "rusty-doors" @@ -8099,7 +8107,7 @@ version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7803e8936da37efd9b6d4478277f4b2b9bb5cdb37a113e8d63222e58da647e63" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cfg-if", "clipboard-win", "fd-lock", @@ -8117,9 +8125,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.15" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "salty" @@ -8170,11 +8178,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -8183,14 +8191,14 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ - "parking_lot 0.12.1", + "parking_lot 0.12.2", ] [[package]] name = "schemars" -version = "0.8.17" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f55c82c700538496bdc329bb4918a81f87cc8888811bd123cf325a0f2f8d309" +checksum = "fc6e7ed6919cb46507fb01ff1654309219f62b4d603822501b0b80d42f6f21ef" dependencies = [ "bytes", "chrono", @@ -8203,14 +8211,14 @@ dependencies = [ [[package]] name = "schemars_derive" -version = "0.8.17" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83263746fe5e32097f06356968a077f96089739c927a61450efa069905eec108" +checksum = "185f2b7aa7e02d418e453790dde16890256bbd2bcd04b7dc5348811052b53f49" dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -8236,17 +8244,17 @@ checksum = "7f81c2fde025af7e69b1d1420531c8a8811ca898919db177141a85313b1cb932" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "sct" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", + "ring 0.17.8", + "untrusted 0.9.0", ] [[package]] @@ -8274,11 +8282,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.2" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", "core-foundation", "core-foundation-sys", "libc", @@ -8287,9 +8295,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -8312,9 +8320,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.201" +version = "1.0.202" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "780f1cebed1629e4753a1a38a3c72d30b97ec044f0aef68cb26650a3c5cf363c" +checksum = "226b61a0d411b2ba5ff6d7f73a476ac4f8bb900373459cd00fab8512828ba395" dependencies = [ "serde_derive", ] @@ -8359,24 +8367,24 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.201" +version = "1.0.202" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e405930b9796f1c00bee880d03fc7e0bb4b9a11afc776885ffe84320da2865" +checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "serde_derive_internals" -version = "0.29.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "330f01ce65a3a5fe59a60c82f3c9a024b573b8a6e875bd233fe5f934e71d54e3" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -8420,20 +8428,20 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.16" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "serde_spanned" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" dependencies = [ "serde", ] @@ -8458,7 +8466,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -8475,11 +8483,11 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.7.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee80b0e361bbf88fd2f6e242ccd19cfda072cb0faa6ae694ecee08199938569a" +checksum = "0ad483d2ab0149d5a5ebcd9972a3852711e0153d863bf5a5d0391d28883c4a20" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", @@ -8493,21 +8501,21 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.7.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6561dc161a9224638a31d876ccdfefbc1df91d3f3a8342eddb35f055d48c7655" +checksum = "65569b702f41443e8bc8bbb1c5779bd0450bbe723b56198980e80ec45780bce2" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "serde_yaml" -version = "0.9.25" +version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ "indexmap 2.2.6", "itoa", @@ -8583,9 +8591,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -8610,9 +8618,9 @@ checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" [[package]] name = "signature" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest", "rand_core 0.6.4", @@ -8629,9 +8637,9 @@ dependencies = [ [[package]] name = "similar" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32fea41aca09ee824cc9724996433064c89f7777e60762749a4170a14abbfa21" +checksum = "fa42c91313f1d05da9b26f267f931cf178d4aba455b4c4622dd7355eb80c6640" dependencies = [ "bstr 0.2.17", "unicode-segmentation", @@ -8848,7 +8856,7 @@ source = "git+https://github.com/oxidecomputer/slog-error-chain?branch=main#15f6 dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -8909,15 +8917,15 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "smawk" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f67ad224767faa3c7d8b6d91985b78e70a1324408abcb1cfcc2be4c06bc06043" +checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" [[package]] name = "smf" @@ -8937,7 +8945,7 @@ dependencies = [ "bitflags 1.3.2", "byteorder", "cfg-if", - "heapless 0.7.16", + "heapless 0.7.17", "managed", ] @@ -8975,14 +8983,14 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "socket2" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" dependencies = [ "libc", "winapi", @@ -8990,9 +8998,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", @@ -9018,7 +9026,7 @@ dependencies = [ "sprockets-rot", "thiserror", "tokio", - "toml 0.8.12", + "toml 0.8.13", ] [[package]] @@ -9038,9 +9046,9 @@ dependencies = [ [[package]] name = "spki" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", "der", @@ -9102,7 +9110,7 @@ checksum = "01b2e185515564f15375f593fb966b5718bc624ba77fe49fa4616ad619690554" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -9112,7 +9120,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ff9eaf853dec4c8802325d8b6d3dffa86cc707fd7a1a4cdbf416e13b061787a" dependencies = [ "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -9157,7 +9165,7 @@ checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b" dependencies = [ "new_debug_unreachable", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "phf_shared 0.10.0", "precomputed-hash", "serde", @@ -9165,10 +9173,11 @@ dependencies = [ [[package]] name = "stringprep" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3737bde7edce97102e0e2b15365bf7a20bfdb5f60f4f9e8d7004258a51a8da" +checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" dependencies = [ + "finl_unicode", "unicode-bidi", "unicode-normalization", ] @@ -9184,15 +9193,9 @@ dependencies = [ [[package]] name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - -[[package]] -name = "strsim" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "structmeta" @@ -9203,7 +9206,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive 0.2.0", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -9215,7 +9218,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive 0.3.0", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -9226,7 +9229,7 @@ checksum = "a60bcaff7397072dca0017d1db428e30d5002e00b6847703e2e42005c95fbe00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -9237,7 +9240,7 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -9246,7 +9249,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" dependencies = [ - "strum_macros 0.26.1", + "strum_macros 0.26.2", ] [[package]] @@ -9264,28 +9267,28 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.25.2" +version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad8d03b598d3d0fff69bf533ee3ef19b8eeb342729596df84bcc7e1f96ec4059" +checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ "heck 0.4.1", "proc-macro2", "quote", "rustversion", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "strum_macros" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a3417fc93d76740d974a01654a09777cb500428cc874ca9f45edfe0c4d4cd18" +checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" dependencies = [ "heck 0.4.1", "proc-macro2", "quote", "rustversion", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -9332,9 +9335,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.60" +version = "2.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" +checksum = "7ad3dee41f36859875573074334c200d1add8e4a87bb37113ebd31d926b7b11f" dependencies = [ "proc-macro2", "quote", @@ -9438,9 +9441,9 @@ dependencies = [ [[package]] name = "target-lexicon" -version = "0.12.13" +version = "0.12.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69758bda2e78f098e4ccb393021a0963bb3442eac05f135c30f61b7370bbafae" +checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f" [[package]] name = "target-spec" @@ -9479,9 +9482,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.2.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] @@ -9520,7 +9523,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta 0.2.0", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -9551,7 +9554,7 @@ checksum = "e2470041c06ec3ac1ab38d0356a6119054dedaea53e12fbefc0de730a1c08524" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -9576,20 +9579,19 @@ dependencies = [ [[package]] name = "thread-id" -version = "4.2.0" +version = "4.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79474f573561cdc4871a0de34a51c92f7f5a56039113fbb5b9c9f96bdb756669" +checksum = "f0ec81c46e9eb50deaa257be2f148adf052d1fb7701cfd55ccfab2525280b70b" dependencies = [ "libc", - "redox_syscall 0.2.16", "winapi", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if", "once_cell", @@ -9606,9 +9608,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.34" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", @@ -9629,9 +9631,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", @@ -9671,6 +9673,27 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" +[[package]] +name = "tls_codec" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e78c9c330f8c85b2bae7c8368f2739157db9991235123aa1b15ef9502bfb6a" +dependencies = [ + "tls_codec_derive", + "zeroize", +] + +[[package]] +name = "tls_codec_derive" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9ef545650e79f30233c0003bcc2504d7efac6dad25fca40744de773fe2049c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.64", +] + [[package]] name = "tlvc" version = "0.3.1" @@ -9684,7 +9707,7 @@ dependencies = [ [[package]] name = "tlvc" version = "0.3.1" -source = "git+https://github.com/oxidecomputer/tlvc.git#e644a21a7ca973ed31499106ea926bd63ebccc6f" +source = "git+https://github.com/oxidecomputer/tlvc#e644a21a7ca973ed31499106ea926bd63ebccc6f" dependencies = [ "byteorder", "crc", @@ -9694,11 +9717,11 @@ dependencies = [ [[package]] name = "tlvc-text" version = "0.3.0" -source = "git+https://github.com/oxidecomputer/tlvc.git#e644a21a7ca973ed31499106ea926bd63ebccc6f" +source = "git+https://github.com/oxidecomputer/tlvc#e644a21a7ca973ed31499106ea926bd63ebccc6f" dependencies = [ "ron 0.8.1", "serde", - "tlvc 0.3.1 (git+https://github.com/oxidecomputer/tlvc.git)", + "tlvc 0.3.1 (git+https://github.com/oxidecomputer/tlvc)", "zerocopy 0.6.6", ] @@ -9723,10 +9746,10 @@ dependencies = [ "libc", "mio", "num_cpus", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.6", + "socket2 0.5.7", "tokio-macros", "windows-sys 0.48.0", ] @@ -9739,7 +9762,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -9765,14 +9788,14 @@ dependencies = [ "futures-channel", "futures-util", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "percent-encoding", "phf", "pin-project-lite", "postgres-protocol", "postgres-types", "rand 0.8.5", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tokio-util", "whoami", @@ -9784,7 +9807,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.9", + "rustls 0.21.12", "tokio", ] @@ -9836,16 +9859,15 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", - "tracing", ] [[package]] @@ -9871,21 +9893,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.12" +version = "0.8.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3" +checksum = "a4e43f8cc456c9704c851ae29c67e17ef65d2c30017c17a9765b89c382dc8bba" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.12", + "toml_edit 0.22.13", ] [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" dependencies = [ "serde", ] @@ -9900,20 +9922,20 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow 0.5.15", + "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.22.12" +version = "0.22.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef" +checksum = "c127785850e8c20836d49732ae6abfa47616e60bf9d9f57c43c250361a9db96c" dependencies = [ "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.1", + "winnow 0.6.8", ] [[package]] @@ -10017,7 +10039,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -10066,7 +10088,7 @@ dependencies = [ "ipnet", "lazy_static", "rand 0.8.5", - "smallvec 1.13.1", + "smallvec 1.13.2", "thiserror", "tinyvec", "tokio", @@ -10085,9 +10107,9 @@ dependencies = [ "ipconfig", "lazy_static", "lru-cache", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "resolv-conf", - "smallvec 1.13.1", + "smallvec 1.13.2", "thiserror", "tokio", "tracing", @@ -10118,23 +10140,22 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.91" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad7eb6319ebadebca3dacf1f85a93bc54b73dd81b9036795f73de7ddfe27d5a" +checksum = "33a5f13f11071020bb12de7a16b925d2d58636175c20c11dc5f96cb64bb6c9b3" dependencies = [ "glob", - "once_cell", "serde", "serde_derive", "serde_json", "termcolor", - "toml 0.8.12", + "toml 0.8.13", ] [[package]] @@ -10194,7 +10215,7 @@ dependencies = [ "slog", "tar", "tokio", - "toml 0.8.12", + "toml 0.8.13", "tough", "url", "zip", @@ -10238,7 +10259,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http 1.0.0", + "http 1.1.0", "httparse", "log", "rand 0.8.5", @@ -10250,9 +10271,9 @@ dependencies = [ [[package]] name = "typed-path" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a90726108dab678edab76459751e1cc7c597c3484a6384d6423191255fa641b" +checksum = "668404597c2c687647f6f8934f97c280fd500db28557f52b07c56b92d3dc500a" [[package]] name = "typed-rng" @@ -10268,14 +10289,14 @@ dependencies = [ [[package]] name = "typenum" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "typify" -version = "0.0.16" -source = "git+https://github.com/oxidecomputer/typify#336a042b8242587f57c5e8ec3ac0e54d3610e59c" +version = "0.1.0" +source = "git+https://github.com/oxidecomputer/typify#ad1296f6ceb998ae8c247d999b7828703a232bdd" dependencies = [ "typify-impl", "typify-macro", @@ -10283,8 +10304,8 @@ dependencies = [ [[package]] name = "typify-impl" -version = "0.0.16" -source = "git+https://github.com/oxidecomputer/typify#336a042b8242587f57c5e8ec3ac0e54d3610e59c" +version = "0.1.0" +source = "git+https://github.com/oxidecomputer/typify#ad1296f6ceb998ae8c247d999b7828703a232bdd" dependencies = [ "heck 0.5.0", "log", @@ -10292,24 +10313,27 @@ dependencies = [ "quote", "regress", "schemars", + "semver 1.0.23", + "serde", "serde_json", - "syn 2.0.60", + "syn 2.0.64", "thiserror", "unicode-ident", ] [[package]] name = "typify-macro" -version = "0.0.16" -source = "git+https://github.com/oxidecomputer/typify#336a042b8242587f57c5e8ec3ac0e54d3610e59c" +version = "0.1.0" +source = "git+https://github.com/oxidecomputer/typify#ad1296f6ceb998ae8c247d999b7828703a232bdd" dependencies = [ "proc-macro2", "quote", "schemars", + "semver 1.0.23", "serde", "serde_json", "serde_tokenstream 0.2.0", - "syn 2.0.60", + "syn 2.0.64", "typify-impl", ] @@ -10354,24 +10378,24 @@ checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" +checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" [[package]] name = "unicode-width" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" +checksum = "68f5e5f3158ecfd4b8ff6fe086db7c8467a2dfdac97fe420f2b7c4aa97af66d6" [[package]] name = "unicode-xid" @@ -10397,9 +10421,9 @@ dependencies = [ [[package]] name = "unsafe-libyaml" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" [[package]] name = "untrusted" @@ -10543,7 +10567,7 @@ dependencies = [ "proc-macro2", "quote", "serde_tokenstream 0.2.0", - "syn 2.0.60", + "syn 2.0.64", "usdt-impl 0.5.0", ] @@ -10581,7 +10605,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.60", + "syn 2.0.64", "thiserror", "thread-id", "version_check", @@ -10611,7 +10635,7 @@ dependencies = [ "proc-macro2", "quote", "serde_tokenstream 0.2.0", - "syn 2.0.60", + "syn 2.0.64", "usdt-impl 0.5.0", ] @@ -10633,7 +10657,7 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ - "getrandom 0.2.12", + "getrandom 0.2.14", "serde", ] @@ -10771,9 +10795,9 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -10781,24 +10805,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if", "js-sys", @@ -10808,9 +10832,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -10818,22 +10842,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "wasm-streams" @@ -10850,9 +10874,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -10860,19 +10884,20 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.25.2" +version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" +checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "which" -version = "4.4.0" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" dependencies = [ "either", - "libc", + "home", "once_cell", + "rustix", ] [[package]] @@ -10926,8 +10951,8 @@ dependencies = [ "textwrap", "tokio", "tokio-util", - "toml 0.8.12", - "toml_edit 0.22.12", + "toml 0.8.13", + "toml_edit 0.22.13", "tui-tree-widget", "unicode-width", "update-engine", @@ -10953,7 +10978,7 @@ dependencies = [ "sha2", "sled-hardware-types", "thiserror", - "toml 0.8.12", + "toml 0.8.13", "update-engine", ] @@ -11041,7 +11066,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "toml 0.8.12", + "toml 0.8.13", "tough", "trust-dns-resolver", "tufaceous", @@ -11078,9 +11103,9 @@ dependencies = [ [[package]] name = "widestring" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "winapi" @@ -11100,11 +11125,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" dependencies = [ - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -11113,15 +11138,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" -dependencies = [ - "windows-targets 0.48.5", -] - [[package]] name = "windows" version = "0.52.0" @@ -11129,7 +11145,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ "windows-core", - "windows-targets 0.52.0", + "windows-targets 0.52.5", ] [[package]] @@ -11138,7 +11154,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.5", ] [[package]] @@ -11156,7 +11172,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.5", ] [[package]] @@ -11176,17 +11192,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -11197,9 +11214,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -11209,9 +11226,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -11221,9 +11238,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -11233,9 +11256,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -11245,9 +11268,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -11257,9 +11280,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -11269,24 +11292,24 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winnow" -version = "0.5.15" +version = "0.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c2e3184b9c4e92ad5167ca73039d0c42476302ab603e2fec4487511f38ccefc" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" dependencies = [ "memchr", ] [[package]] name = "winnow" -version = "0.6.1" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d90f4e0f530c4c69f62b80d839e9ef3855edc9cba471a160c4d692deed62b401" +checksum = "c3c52e9c97a68071b23e836c9380edae937f17b9c4667bd021973efc689f618d" dependencies = [ "memchr", ] @@ -11312,22 +11335,25 @@ dependencies = [ [[package]] name = "x509-cert" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25eefca1d99701da3a57feb07e5079fc62abba059fc139e98c13bbb250f3ef29" +checksum = "1301e935010a701ae5f8655edc0ad17c44bad3ac5ce8c39185f75453b720ae94" dependencies = [ "const-oid", "der", "spki", + "tls_codec", ] [[package]] name = "xattr" -version = "1.0.1" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4686009f71ff3e5c4dbcf1a282d0a44db3f021ba69350cd42086b3e5f1c6985" +checksum = "8da84f1a25939b27f6820d92aed108f83ff920fdf11a7b19366c27c4cda81d4f" dependencies = [ "libc", + "linux-raw-sys", + "rustix", ] [[package]] @@ -11343,7 +11369,7 @@ dependencies = [ "macaddr", "serde", "swrite", - "toml 0.8.12", + "toml 0.8.13", ] [[package]] @@ -11365,9 +11391,9 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.3.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6580539ad917b7c026220c4b3f2c08d52ce54d6ce0dc491e66002e35388fab46" +checksum = "da091bab2bd35db397c46f5b81748b56f28f8fda837087fab9b6b07b6d66e3f1" dependencies = [ "byteorder", "zerocopy-derive 0.2.0", @@ -11385,12 +11411,12 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" dependencies = [ "byteorder", - "zerocopy-derive 0.7.32", + "zerocopy-derive 0.7.34", ] [[package]] @@ -11412,18 +11438,18 @@ checksum = "125139de3f6b9d625c39e2efdd73d41bdac468ccd556556440e322be0e1bbd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -11443,7 +11469,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] diff --git a/bootstore/Cargo.toml b/bootstore/Cargo.toml index 6d1ca97e449..3dc62159170 100644 --- a/bootstore/Cargo.toml +++ b/bootstore/Cargo.toml @@ -30,7 +30,7 @@ slog.workspace = true thiserror.workspace = true tokio.workspace = true uuid.workspace = true -vsss-rs = { version = "3.3.4", features = ["std", "curve25519"] } +vsss-rs = { version = "=3.3.4", features = ["std", "curve25519"] } zeroize.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. diff --git a/internal-dns/tests/output/test-server.json b/internal-dns/tests/output/test-server.json index 5720dec19fd..5ed5d371612 100644 --- a/internal-dns/tests/output/test-server.json +++ b/internal-dns/tests/output/test-server.json @@ -67,4 +67,4 @@ } } } -} \ No newline at end of file +} diff --git a/openapi/bootstrap-agent.json b/openapi/bootstrap-agent.json index 6ac9c034e2d..f177c27f552 100644 --- a/openapi/bootstrap-agent.json +++ b/openapi/bootstrap-agent.json @@ -328,7 +328,6 @@ "checker": { "nullable": true, "description": "Checker to apply to incoming messages.", - "default": null, "type": "string" }, "originate": { @@ -341,7 +340,6 @@ "shaper": { "nullable": true, "description": "Shaper to apply to outgoing messages.", - "default": null, "type": "string" } }, @@ -439,7 +437,6 @@ "local_pref": { "nullable": true, "description": "Apply a local preference to routes received from this peer.", - "default": null, "type": "integer", "format": "uint32", "minimum": 0 @@ -447,13 +444,11 @@ "md5_auth_key": { "nullable": true, "description": "Use the given key for TCP-MD5 authentication with the peer.", - "default": null, "type": "string" }, "min_ttl": { "nullable": true, "description": "Require messages from a peer have a minimum IP time to live field.", - "default": null, "type": "integer", "format": "uint8", "minimum": 0 @@ -461,7 +456,6 @@ "multi_exit_discriminator": { "nullable": true, "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", - "default": null, "type": "integer", "format": "uint32", "minimum": 0 @@ -473,7 +467,6 @@ "remote_asn": { "nullable": true, "description": "Require that a peer has a specified ASN.", - "default": null, "type": "integer", "format": "uint32", "minimum": 0 @@ -481,7 +474,6 @@ "vlan_id": { "nullable": true, "description": "Associate a VLAN ID with a BGP peer session.", - "default": null, "type": "integer", "format": "uint16", "minimum": 0 @@ -1216,7 +1208,6 @@ "vlan_id": { "nullable": true, "description": "The VLAN id associated with this route.", - "default": null, "type": "integer", "format": "uint16", "minimum": 0 @@ -1268,4 +1259,4 @@ } } } -} \ No newline at end of file +} diff --git a/openapi/dns-server.json b/openapi/dns-server.json index 41b351d4c1e..1b02199b765 100644 --- a/openapi/dns-server.json +++ b/openapi/dns-server.json @@ -253,4 +253,4 @@ } } } -} \ No newline at end of file +} diff --git a/openapi/gateway.json b/openapi/gateway.json index f3a5642b6ee..c5d0eab0b13 100644 --- a/openapi/gateway.json +++ b/openapi/gateway.json @@ -3285,4 +3285,4 @@ } } } -} \ No newline at end of file +} diff --git a/openapi/installinator-artifactd.json b/openapi/installinator-artifactd.json index 136e60a8c40..61f555e10de 100644 --- a/openapi/installinator-artifactd.json +++ b/openapi/installinator-artifactd.json @@ -2325,4 +2325,4 @@ } } } -} \ No newline at end of file +} diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 5ec8e584175..c7d476994dc 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -1567,7 +1567,6 @@ "checker": { "nullable": true, "description": "Checker to apply to incoming messages.", - "default": null, "type": "string" }, "originate": { @@ -1580,7 +1579,6 @@ "shaper": { "nullable": true, "description": "Shaper to apply to outgoing messages.", - "default": null, "type": "string" } }, @@ -1678,7 +1676,6 @@ "local_pref": { "nullable": true, "description": "Apply a local preference to routes received from this peer.", - "default": null, "type": "integer", "format": "uint32", "minimum": 0 @@ -1686,13 +1683,11 @@ "md5_auth_key": { "nullable": true, "description": "Use the given key for TCP-MD5 authentication with the peer.", - "default": null, "type": "string" }, "min_ttl": { "nullable": true, "description": "Require messages from a peer have a minimum IP time to live field.", - "default": null, "type": "integer", "format": "uint8", "minimum": 0 @@ -1700,7 +1695,6 @@ "multi_exit_discriminator": { "nullable": true, "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", - "default": null, "type": "integer", "format": "uint32", "minimum": 0 @@ -1712,7 +1706,6 @@ "remote_asn": { "nullable": true, "description": "Require that a peer has a specified ASN.", - "default": null, "type": "integer", "format": "uint32", "minimum": 0 @@ -1720,7 +1713,6 @@ "vlan_id": { "nullable": true, "description": "Associate a VLAN ID with a BGP peer session.", - "default": null, "type": "integer", "format": "uint16", "minimum": 0 @@ -4267,7 +4259,6 @@ "vlan_id": { "nullable": true, "description": "The VLAN id associated with this route.", - "default": null, "type": "integer", "format": "uint16", "minimum": 0 @@ -5021,4 +5012,4 @@ } } } -} \ No newline at end of file +} diff --git a/openapi/nexus.json b/openapi/nexus.json index c50291cf38a..2bf6f0a6ffe 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -15970,7 +15970,6 @@ "signing_keypair": { "nullable": true, "description": "request signing key pair", - "default": null, "allOf": [ { "$ref": "#/components/schemas/DerEncodedKeyPair" @@ -19356,4 +19355,4 @@ } } ] -} \ No newline at end of file +} diff --git a/openapi/oximeter.json b/openapi/oximeter.json index c567e9421d9..d4a37957ab8 100644 --- a/openapi/oximeter.json +++ b/openapi/oximeter.json @@ -290,4 +290,4 @@ } } } -} \ No newline at end of file +} diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index fab597a7610..5da2b5c7976 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -1465,7 +1465,6 @@ "checker": { "nullable": true, "description": "Checker to apply to incoming messages.", - "default": null, "type": "string" }, "originate": { @@ -1478,7 +1477,6 @@ "shaper": { "nullable": true, "description": "Shaper to apply to outgoing messages.", - "default": null, "type": "string" } }, @@ -1576,7 +1574,6 @@ "local_pref": { "nullable": true, "description": "Apply a local preference to routes received from this peer.", - "default": null, "type": "integer", "format": "uint32", "minimum": 0 @@ -1584,13 +1581,11 @@ "md5_auth_key": { "nullable": true, "description": "Use the given key for TCP-MD5 authentication with the peer.", - "default": null, "type": "string" }, "min_ttl": { "nullable": true, "description": "Require messages from a peer have a minimum IP time to live field.", - "default": null, "type": "integer", "format": "uint8", "minimum": 0 @@ -1598,7 +1593,6 @@ "multi_exit_discriminator": { "nullable": true, "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", - "default": null, "type": "integer", "format": "uint32", "minimum": 0 @@ -1610,7 +1604,6 @@ "remote_asn": { "nullable": true, "description": "Require that a peer has a specified ASN.", - "default": null, "type": "integer", "format": "uint32", "minimum": 0 @@ -1618,7 +1611,6 @@ "vlan_id": { "nullable": true, "description": "Associate a VLAN ID with a BGP peer session.", - "default": null, "type": "integer", "format": "uint16", "minimum": 0 @@ -4279,7 +4271,6 @@ "vlan_id": { "nullable": true, "description": "The VLAN id associated with this route.", - "default": null, "type": "integer", "format": "uint16", "minimum": 0 @@ -5008,4 +4999,4 @@ } } } -} \ No newline at end of file +} diff --git a/openapi/wicketd.json b/openapi/wicketd.json index 934069fb549..cb06c0cadf0 100644 --- a/openapi/wicketd.json +++ b/openapi/wicketd.json @@ -1049,7 +1049,6 @@ "checker": { "nullable": true, "description": "Checker to apply to incoming messages.", - "default": null, "type": "string" }, "originate": { @@ -1062,7 +1061,6 @@ "shaper": { "nullable": true, "description": "Shaper to apply to outgoing messages.", - "default": null, "type": "string" } }, @@ -2751,7 +2749,6 @@ "vlan_id": { "nullable": true, "description": "The VLAN id associated with this route.", - "default": null, "type": "integer", "format": "uint16", "minimum": 0 @@ -4908,7 +4905,6 @@ }, "allowed_export": { "description": "Apply export policy to this peer with an allow list.", - "default": null, "allOf": [ { "$ref": "#/components/schemas/UserSpecifiedImportExportPolicy" @@ -4917,7 +4913,6 @@ }, "allowed_import": { "description": "Apply import policy to this peer with an allow list.", - "default": null, "allOf": [ { "$ref": "#/components/schemas/UserSpecifiedImportExportPolicy" @@ -4933,7 +4928,6 @@ "auth_key_id": { "nullable": true, "description": "The key identifier for authentication to use with the peer.", - "default": null, "allOf": [ { "$ref": "#/components/schemas/BgpAuthKeyId" @@ -4993,7 +4987,6 @@ "local_pref": { "nullable": true, "description": "Apply a local preference to routes received from this peer.", - "default": null, "type": "integer", "format": "uint32", "minimum": 0 @@ -5001,7 +4994,6 @@ "min_ttl": { "nullable": true, "description": "Require messages from a peer have a minimum IP time to live field.", - "default": null, "type": "integer", "format": "uint8", "minimum": 0 @@ -5009,7 +5001,6 @@ "multi_exit_discriminator": { "nullable": true, "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", - "default": null, "type": "integer", "format": "uint32", "minimum": 0 @@ -5021,7 +5012,6 @@ "remote_asn": { "nullable": true, "description": "Require that a peer has a specified ASN.", - "default": null, "type": "integer", "format": "uint32", "minimum": 0 @@ -5029,7 +5019,6 @@ "vlan_id": { "nullable": true, "description": "Associate a VLAN ID with a BGP peer session.", - "default": null, "type": "integer", "format": "uint16", "minimum": 0 @@ -5154,4 +5143,4 @@ } } } -} \ No newline at end of file +} diff --git a/oximeter/collector/tests/output/self-stat-schema.json b/oximeter/collector/tests/output/self-stat-schema.json index 4a56a81c867..111d7c0ed2c 100644 --- a/oximeter/collector/tests/output/self-stat-schema.json +++ b/oximeter/collector/tests/output/self-stat-schema.json @@ -39,7 +39,7 @@ } ], "datum_type": "cumulative_u64", - "created": "2024-05-11T09:41:23.361298682Z" + "created": "2024-05-17T01:26:16.797600385Z" }, "oximeter_collector:failed_collections": { "timeseries_name": "oximeter_collector:failed_collections", @@ -86,6 +86,6 @@ } ], "datum_type": "cumulative_u64", - "created": "2024-05-11T09:41:23.361907436Z" + "created": "2024-05-17T01:26:16.798713487Z" } -} +} \ No newline at end of file diff --git a/schema/all-zone-requests.json b/schema/all-zone-requests.json index 66792f52c44..7fe9b139eb1 100644 --- a/schema/all-zone-requests.json +++ b/schema/all-zone-requests.json @@ -668,7 +668,6 @@ } }, "dataset": { - "default": null, "anyOf": [ { "$ref": "#/definitions/DatasetRequest" diff --git a/schema/rss-sled-plan.json b/schema/rss-sled-plan.json index a349fbb6058..95ca5b90ba0 100644 --- a/schema/rss-sled-plan.json +++ b/schema/rss-sled-plan.json @@ -195,7 +195,6 @@ }, "checker": { "description": "Checker to apply to incoming messages.", - "default": null, "type": [ "string", "null" @@ -210,7 +209,6 @@ }, "shaper": { "description": "Shaper to apply to outgoing messages.", - "default": null, "type": [ "string", "null" @@ -321,7 +319,6 @@ }, "local_pref": { "description": "Apply a local preference to routes received from this peer.", - "default": null, "type": [ "integer", "null" @@ -331,7 +328,6 @@ }, "md5_auth_key": { "description": "Use the given key for TCP-MD5 authentication with the peer.", - "default": null, "type": [ "string", "null" @@ -339,7 +335,6 @@ }, "min_ttl": { "description": "Require messages from a peer have a minimum IP time to live field.", - "default": null, "type": [ "integer", "null" @@ -349,7 +344,6 @@ }, "multi_exit_discriminator": { "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", - "default": null, "type": [ "integer", "null" @@ -363,7 +357,6 @@ }, "remote_asn": { "description": "Require that a peer has a specified ASN.", - "default": null, "type": [ "integer", "null" @@ -373,7 +366,6 @@ }, "vlan_id": { "description": "Associate a VLAN ID with a BGP peer session.", - "default": null, "type": [ "integer", "null" @@ -918,7 +910,6 @@ }, "vlan_id": { "description": "The VLAN id associated with this route.", - "default": null, "type": [ "integer", "null" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 9bbbc28e3f1..d8c9e7c634d 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -17,16 +17,15 @@ workspace = true ### BEGIN HAKARI SECTION [dependencies] -ahash = { version = "0.8.8" } -aho-corasick = { version = "1.1.2" } +ahash = { version = "0.8.11" } +aho-corasick = { version = "1.1.3" } anyhow = { version = "1.0.83", features = ["backtrace"] } base16ct = { version = "0.2.0", default-features = false, features = ["alloc"] } bit-set = { version = "0.5.3" } bit-vec = { version = "0.6.3" } bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1.3.2" } -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["serde"] } -bstr-6f8ce4dd05d13bba = { package = "bstr", version = "0.2.17" } -bstr-dff4ba8e3ae991db = { package = "bstr", version = "1.9.0" } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.5.0", default-features = false, features = ["serde", "std"] } +bstr = { version = "1.9.1" } byteorder = { version = "1.5.0" } bytes = { version = "1.6.0", features = ["serde"] } chrono = { version = "0.4.38", features = ["serde"] } @@ -39,10 +38,10 @@ crossbeam-epoch = { version = "0.9.18" } crossbeam-utils = { version = "0.8.19" } crossterm = { version = "0.27.0", features = ["event-stream", "serde"] } crypto-common = { version = "0.1.6", default-features = false, features = ["getrandom", "std"] } -der = { version = "0.7.8", default-features = false, features = ["derive", "flagset", "oid", "pem", "std"] } +der = { version = "0.7.9", default-features = false, features = ["derive", "flagset", "oid", "pem", "std"] } diesel = { version = "2.1.6", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } digest = { version = "0.10.7", features = ["mac", "oid", "std"] } -either = { version = "1.11.0" } +either = { version = "1.12.0" } elliptic-curve = { version = "0.13.8", features = ["ecdh", "hazmat", "pem", "std"] } ff = { version = "0.13.0", default-features = false, features = ["alloc"] } flate2 = { version = "1.0.30" } @@ -56,24 +55,25 @@ futures-task = { version = "0.3.30", default-features = false, features = ["std" futures-util = { version = "0.3.30", features = ["channel", "io", "sink"] } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "2739c18e80697aa6bc235c935176d14b4d757ee9", features = ["std"] } generic-array = { version = "0.14.7", default-features = false, features = ["more_lengths", "zeroize"] } -getrandom = { version = "0.2.12", default-features = false, features = ["js", "rdrand", "std"] } +getrandom = { version = "0.2.14", default-features = false, features = ["js", "rdrand", "std"] } group = { version = "0.13.0", default-features = false, features = ["alloc"] } -hashbrown = { version = "0.14.3", features = ["raw"] } +hashbrown = { version = "0.14.5", features = ["raw"] } hex = { version = "0.4.3", features = ["serde"] } hmac = { version = "0.12.1", default-features = false, features = ["reset"] } hyper = { version = "0.14.28", features = ["full"] } indexmap = { version = "2.2.6", features = ["serde"] } inout = { version = "0.1.3", default-features = false, features = ["std"] } ipnetwork = { version = "0.20.0", features = ["schemars"] } -itertools = { version = "0.10.5" } +itertools-5ef9efb8ec2df382 = { package = "itertools", version = "0.12.1" } +itertools-93f6ce9d446188ac = { package = "itertools", version = "0.10.5" } lalrpop-util = { version = "0.19.12" } lazy_static = { version = "1.4.0", default-features = false, features = ["spin_no_std"] } libc = { version = "0.2.153", features = ["extra_traits"] } log = { version = "0.4.21", default-features = false, features = ["std"] } managed = { version = "0.8.0", default-features = false, features = ["alloc", "map"] } -memchr = { version = "2.7.1" } +memchr = { version = "2.7.2" } nom = { version = "7.1.3" } -num-bigint = { version = "0.4.4", features = ["rand"] } +num-bigint = { version = "0.4.5", features = ["rand"] } num-integer = { version = "0.1.46", features = ["i128"] } num-iter = { version = "0.1.45", default-features = false, features = ["i128"] } num-traits = { version = "0.2.19", features = ["i128", "libm"] } @@ -85,52 +85,52 @@ postgres-types = { version = "0.2.6", default-features = false, features = ["wit predicates = { version = "3.1.0" } proc-macro2 = { version = "1.0.82" } regex = { version = "1.10.4" } -regex-automata = { version = "0.4.5", default-features = false, features = ["dfa", "hybrid", "meta", "nfa", "perf", "unicode"] } -regex-syntax = { version = "0.8.2" } +regex-automata = { version = "0.4.6", default-features = false, features = ["dfa", "hybrid", "meta", "nfa", "perf", "unicode"] } +regex-syntax = { version = "0.8.3" } reqwest = { version = "0.11.27", features = ["blocking", "cookies", "json", "rustls-tls", "stream"] } ring = { version = "0.17.8", features = ["std"] } -schemars = { version = "0.8.17", features = ["bytes", "chrono", "uuid1"] } +schemars = { version = "0.8.19", features = ["bytes", "chrono", "uuid1"] } scopeguard = { version = "1.2.0" } semver = { version = "1.0.23", features = ["serde"] } -serde = { version = "1.0.201", features = ["alloc", "derive", "rc"] } +serde = { version = "1.0.202", features = ["alloc", "derive", "rc"] } serde_json = { version = "1.0.117", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } -similar = { version = "2.4.0", features = ["inline", "unicode"] } +similar = { version = "2.5.0", features = ["inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } +smallvec = { version = "1.13.2", default-features = false, features = ["const_new"] } spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } subtle = { version = "2.5.0" } syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit"] } -syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.60", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } -time = { version = "0.3.34", features = ["formatting", "local-offset", "macros", "parsing"] } +syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.64", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } +time = { version = "0.3.36", features = ["formatting", "local-offset", "macros", "parsing"] } tokio = { version = "1.37.0", features = ["full", "test-util"] } tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } tokio-stream = { version = "0.1.15", features = ["net"] } -tokio-util = { version = "0.7.10", features = ["codec", "io-util"] } +tokio-util = { version = "0.7.11", features = ["codec", "io-util"] } toml = { version = "0.7.8" } -toml_edit-3c51e837cfc5589a = { package = "toml_edit", version = "0.22.12", features = ["serde"] } +toml_edit-3c51e837cfc5589a = { package = "toml_edit", version = "0.22.13", features = ["serde"] } tracing = { version = "0.1.40", features = ["log"] } trust-dns-proto = { version = "0.22.0" } unicode-bidi = { version = "0.3.15" } -unicode-normalization = { version = "0.1.22" } +unicode-normalization = { version = "0.1.23" } usdt = { version = "0.5.0" } usdt-impl = { version = "0.5.0", default-features = false, features = ["asm", "des"] } uuid = { version = "1.8.0", features = ["serde", "v4"] } yasna = { version = "0.5.2", features = ["bit-vec", "num-bigint", "std", "time"] } -zerocopy = { version = "0.7.32", features = ["derive", "simd"] } +zerocopy = { version = "0.7.34", features = ["derive", "simd"] } zeroize = { version = "1.7.0", features = ["std", "zeroize_derive"] } [build-dependencies] -ahash = { version = "0.8.8" } -aho-corasick = { version = "1.1.2" } +ahash = { version = "0.8.11" } +aho-corasick = { version = "1.1.3" } anyhow = { version = "1.0.83", features = ["backtrace"] } base16ct = { version = "0.2.0", default-features = false, features = ["alloc"] } bit-set = { version = "0.5.3" } bit-vec = { version = "0.6.3" } bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1.3.2" } -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["serde"] } -bstr-6f8ce4dd05d13bba = { package = "bstr", version = "0.2.17" } -bstr-dff4ba8e3ae991db = { package = "bstr", version = "1.9.0" } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.5.0", default-features = false, features = ["serde", "std"] } +bstr = { version = "1.9.1" } byteorder = { version = "1.5.0" } bytes = { version = "1.6.0", features = ["serde"] } chrono = { version = "0.4.38", features = ["serde"] } @@ -143,10 +143,10 @@ crossbeam-epoch = { version = "0.9.18" } crossbeam-utils = { version = "0.8.19" } crossterm = { version = "0.27.0", features = ["event-stream", "serde"] } crypto-common = { version = "0.1.6", default-features = false, features = ["getrandom", "std"] } -der = { version = "0.7.8", default-features = false, features = ["derive", "flagset", "oid", "pem", "std"] } +der = { version = "0.7.9", default-features = false, features = ["derive", "flagset", "oid", "pem", "std"] } diesel = { version = "2.1.6", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } digest = { version = "0.10.7", features = ["mac", "oid", "std"] } -either = { version = "1.11.0" } +either = { version = "1.12.0" } elliptic-curve = { version = "0.13.8", features = ["ecdh", "hazmat", "pem", "std"] } ff = { version = "0.13.0", default-features = false, features = ["alloc"] } flate2 = { version = "1.0.30" } @@ -160,24 +160,25 @@ futures-task = { version = "0.3.30", default-features = false, features = ["std" futures-util = { version = "0.3.30", features = ["channel", "io", "sink"] } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "2739c18e80697aa6bc235c935176d14b4d757ee9", features = ["std"] } generic-array = { version = "0.14.7", default-features = false, features = ["more_lengths", "zeroize"] } -getrandom = { version = "0.2.12", default-features = false, features = ["js", "rdrand", "std"] } +getrandom = { version = "0.2.14", default-features = false, features = ["js", "rdrand", "std"] } group = { version = "0.13.0", default-features = false, features = ["alloc"] } -hashbrown = { version = "0.14.3", features = ["raw"] } +hashbrown = { version = "0.14.5", features = ["raw"] } hex = { version = "0.4.3", features = ["serde"] } hmac = { version = "0.12.1", default-features = false, features = ["reset"] } hyper = { version = "0.14.28", features = ["full"] } indexmap = { version = "2.2.6", features = ["serde"] } inout = { version = "0.1.3", default-features = false, features = ["std"] } ipnetwork = { version = "0.20.0", features = ["schemars"] } -itertools = { version = "0.10.5" } +itertools-5ef9efb8ec2df382 = { package = "itertools", version = "0.12.1" } +itertools-93f6ce9d446188ac = { package = "itertools", version = "0.10.5" } lalrpop-util = { version = "0.19.12" } lazy_static = { version = "1.4.0", default-features = false, features = ["spin_no_std"] } libc = { version = "0.2.153", features = ["extra_traits"] } log = { version = "0.4.21", default-features = false, features = ["std"] } managed = { version = "0.8.0", default-features = false, features = ["alloc", "map"] } -memchr = { version = "2.7.1" } +memchr = { version = "2.7.2" } nom = { version = "7.1.3" } -num-bigint = { version = "0.4.4", features = ["rand"] } +num-bigint = { version = "0.4.5", features = ["rand"] } num-integer = { version = "0.1.46", features = ["i128"] } num-iter = { version = "0.1.45", default-features = false, features = ["i128"] } num-traits = { version = "0.2.19", features = ["i128", "libm"] } @@ -189,96 +190,91 @@ postgres-types = { version = "0.2.6", default-features = false, features = ["wit predicates = { version = "3.1.0" } proc-macro2 = { version = "1.0.82" } regex = { version = "1.10.4" } -regex-automata = { version = "0.4.5", default-features = false, features = ["dfa", "hybrid", "meta", "nfa", "perf", "unicode"] } -regex-syntax = { version = "0.8.2" } +regex-automata = { version = "0.4.6", default-features = false, features = ["dfa", "hybrid", "meta", "nfa", "perf", "unicode"] } +regex-syntax = { version = "0.8.3" } reqwest = { version = "0.11.27", features = ["blocking", "cookies", "json", "rustls-tls", "stream"] } ring = { version = "0.17.8", features = ["std"] } -schemars = { version = "0.8.17", features = ["bytes", "chrono", "uuid1"] } +schemars = { version = "0.8.19", features = ["bytes", "chrono", "uuid1"] } scopeguard = { version = "1.2.0" } semver = { version = "1.0.23", features = ["serde"] } -serde = { version = "1.0.201", features = ["alloc", "derive", "rc"] } +serde = { version = "1.0.202", features = ["alloc", "derive", "rc"] } serde_json = { version = "1.0.117", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } -similar = { version = "2.4.0", features = ["inline", "unicode"] } +similar = { version = "2.5.0", features = ["inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } +smallvec = { version = "1.13.2", default-features = false, features = ["const_new"] } spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } subtle = { version = "2.5.0" } syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit"] } -syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.60", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } -time = { version = "0.3.34", features = ["formatting", "local-offset", "macros", "parsing"] } -time-macros = { version = "0.2.17", default-features = false, features = ["formatting", "parsing"] } +syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.64", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } +time = { version = "0.3.36", features = ["formatting", "local-offset", "macros", "parsing"] } +time-macros = { version = "0.2.18", default-features = false, features = ["formatting", "parsing"] } tokio = { version = "1.37.0", features = ["full", "test-util"] } tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } tokio-stream = { version = "0.1.15", features = ["net"] } -tokio-util = { version = "0.7.10", features = ["codec", "io-util"] } +tokio-util = { version = "0.7.11", features = ["codec", "io-util"] } toml = { version = "0.7.8" } -toml_edit-3c51e837cfc5589a = { package = "toml_edit", version = "0.22.12", features = ["serde"] } +toml_edit-3c51e837cfc5589a = { package = "toml_edit", version = "0.22.13", features = ["serde"] } tracing = { version = "0.1.40", features = ["log"] } trust-dns-proto = { version = "0.22.0" } unicode-bidi = { version = "0.3.15" } -unicode-normalization = { version = "0.1.22" } +unicode-normalization = { version = "0.1.23" } usdt = { version = "0.5.0" } usdt-impl = { version = "0.5.0", default-features = false, features = ["asm", "des"] } uuid = { version = "1.8.0", features = ["serde", "v4"] } yasna = { version = "0.5.2", features = ["bit-vec", "num-bigint", "std", "time"] } -zerocopy = { version = "0.7.32", features = ["derive", "simd"] } +zerocopy = { version = "0.7.34", features = ["derive", "simd"] } zeroize = { version = "1.7.0", features = ["std", "zeroize_derive"] } [target.x86_64-unknown-linux-gnu.dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["std"] } dof = { version = "0.3.0", default-features = false, features = ["des"] } +linux-raw-sys = { version = "0.4.13", default-features = false, features = ["elf", "errno", "general", "ioctl", "no_std", "std"] } mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } -rustix = { version = "0.38.31", features = ["fs", "termios"] } +rustix = { version = "0.38.34", features = ["fs", "termios"] } [target.x86_64-unknown-linux-gnu.build-dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["std"] } dof = { version = "0.3.0", default-features = false, features = ["des"] } +linux-raw-sys = { version = "0.4.13", default-features = false, features = ["elf", "errno", "general", "ioctl", "no_std", "std"] } mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } -rustix = { version = "0.38.31", features = ["fs", "termios"] } +rustix = { version = "0.38.34", features = ["fs", "termios"] } [target.x86_64-apple-darwin.dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["std"] } mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } -rustix = { version = "0.38.31", features = ["fs", "termios"] } +rustix = { version = "0.38.34", features = ["fs", "termios"] } [target.x86_64-apple-darwin.build-dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["std"] } mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } -rustix = { version = "0.38.31", features = ["fs", "termios"] } +rustix = { version = "0.38.34", features = ["fs", "termios"] } [target.aarch64-apple-darwin.dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["std"] } mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } -rustix = { version = "0.38.31", features = ["fs", "termios"] } +rustix = { version = "0.38.34", features = ["fs", "termios"] } [target.aarch64-apple-darwin.build-dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["std"] } mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } -rustix = { version = "0.38.31", features = ["fs", "termios"] } +rustix = { version = "0.38.34", features = ["fs", "termios"] } [target.x86_64-unknown-illumos.dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["std"] } dof = { version = "0.3.0", default-features = false, features = ["des"] } mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } -rustix = { version = "0.38.31", features = ["fs", "termios"] } -toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } +rustix = { version = "0.38.34", features = ["fs", "termios"] } +toml_datetime = { version = "0.6.6", default-features = false, features = ["serde"] } toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] } [target.x86_64-unknown-illumos.build-dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["std"] } dof = { version = "0.3.0", default-features = false, features = ["des"] } mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } -rustix = { version = "0.38.31", features = ["fs", "termios"] } -toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } +rustix = { version = "0.38.34", features = ["fs", "termios"] } +toml_datetime = { version = "0.6.6", default-features = false, features = ["serde"] } toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] } ### END HAKARI SECTION From b79c1f8c99d785ba08f29bf7d3b1c9c9902ca37b Mon Sep 17 00:00:00 2001 From: iliana etaoin Date: Mon, 20 May 2024 14:48:01 -0700 Subject: [PATCH 29/37] ensure omicron-common can be built outside of omicron (#5795) --- .github/buildomat/jobs/omicron-common.sh | 27 ++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100755 .github/buildomat/jobs/omicron-common.sh diff --git a/.github/buildomat/jobs/omicron-common.sh b/.github/buildomat/jobs/omicron-common.sh new file mode 100755 index 00000000000..2d5f51f4328 --- /dev/null +++ b/.github/buildomat/jobs/omicron-common.sh @@ -0,0 +1,27 @@ +#!/bin/bash +#: +#: name = "omicron-common (helios)" +#: variety = "basic" +#: target = "helios-2.0" +#: rust_toolchain = "1.77.2" +#: output_rules = [] +#: skip_clone = true + +# Verify that omicron-common builds successfully when used as a dependency +# in an external project. It must not leak anything that requires an external +# dependency (apart from OpenSSL/pkg-config). + +set -o errexit +set -o pipefail +set -o xtrace + +cargo --version +rustc --version + +cargo new --lib test-project +cd test-project +cargo add omicron-common \ + --git https://github.com/oxidecomputer/omicron.git \ + --rev "$GITHUB_SHA" +cargo check +cargo build --release From a0db11b880f79273f3b4aadac7c02d821ea4c03d Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 21 May 2024 04:13:43 +0000 Subject: [PATCH 30/37] Update actions/checkout digest to a5ac7e5 (#5800) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [actions/checkout](https://togithub.com/actions/checkout) | action | digest | [`0ad4b8f` -> `a5ac7e5`](https://togithub.com/actions/checkout/compare/0ad4b8f...a5ac7e5) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. â™» **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index d6207dc0f06..04f8d73b666 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -17,7 +17,7 @@ jobs: env: RUSTFLAGS: -D warnings steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af # v1 From b3fa3bc340e89d4b8ba83a562413b46563f51b7f Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 21 May 2024 04:16:27 +0000 Subject: [PATCH 31/37] Update taiki-e/install-action digest to 0fc5600 (#5801) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`2f990e9` -> `0fc5600`](https://togithub.com/taiki-e/install-action/compare/2f990e9...0fc5600) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. â™» **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index 04f8d73b666..5c861c56bf2 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@2f990e9c484f0590cb76a07296e9677b417493e9 # v2 + uses: taiki-e/install-action@0fc560009ad92371154ca652dcf2620d19331eee # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From ebcc2acd27455ade7ddfa78b8ef49582adf8493f Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Tue, 21 May 2024 10:54:41 -0400 Subject: [PATCH 32/37] Automatic bump of permslip manifest to sidecar-v1.0.19 (#5804) Automated bump --- tools/permslip_staging | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/permslip_staging b/tools/permslip_staging index f5ed8e873ae..20a362ade09 100644 --- a/tools/permslip_staging +++ b/tools/permslip_staging @@ -1,4 +1,4 @@ 03df89d44ad8b653abbeb7fbb83821869f008733e9da946457e72a13cb11d6cc manifest-gimlet-v1.0.19.toml b973cc9feb20f7bba447e7f5291c4070387fa9992deab81301f67f0a3844cd0c manifest-oxide-rot-1-v1.0.11.toml aae829e02d79ec0fe19019c783b6426c6fcc1fe4427aea70b65afc2884f53db8 manifest-psc-v1.0.17.toml -9bd043382ad5c7cdb8f00a66e401a6c4b88e8d588915f304d2c261ea7df4d1b5 manifest-sidecar-v1.0.16.toml +ae00003c288ec4f520167c68de4999e1dfa15b63afe2f89e5ed1cfb8d707ebb9 manifest-sidecar-v1.0.19.toml From 279cb8cf89f8df9f7511006efefbb1d4d29bd90b Mon Sep 17 00:00:00 2001 From: iliana etaoin Date: Tue, 21 May 2024 15:28:47 -0700 Subject: [PATCH 33/37] ci: fix omicron-common job for PRs from forks (#5806) --- .github/buildomat/jobs/omicron-common.sh | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/buildomat/jobs/omicron-common.sh b/.github/buildomat/jobs/omicron-common.sh index 2d5f51f4328..b238eec7c6c 100755 --- a/.github/buildomat/jobs/omicron-common.sh +++ b/.github/buildomat/jobs/omicron-common.sh @@ -5,7 +5,6 @@ #: target = "helios-2.0" #: rust_toolchain = "1.77.2" #: output_rules = [] -#: skip_clone = true # Verify that omicron-common builds successfully when used as a dependency # in an external project. It must not leak anything that requires an external @@ -18,10 +17,9 @@ set -o xtrace cargo --version rustc --version +cd /tmp cargo new --lib test-project cd test-project -cargo add omicron-common \ - --git https://github.com/oxidecomputer/omicron.git \ - --rev "$GITHUB_SHA" +cargo add omicron-common --path /work/oxidecomputer/omicron/common cargo check cargo build --release From 82c77f2751c57f3dc9560eea79657871ba1bafc9 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Wed, 22 May 2024 10:15:48 -0400 Subject: [PATCH 34/37] Blueprints: extract discretionary zone placement into its own module (#5788) Previously, we chose a sled for new Nexus instances by taking the sled with the lowest number of current Nexus zones and tiebreaking by sled-id (arbitrary but deterministic). This PR moves the placement decisions into a new submodule and adds some additional requirements: 1. We need to know the number of zpools present on each sled. 2. We refuse to start a service if there are already more instances of that service than there are zpools on the sled. (This isn't required today for Nexus, but will be important for services with non-transient datasets, like CRDB, and will eventually be required for all zones once we track transient dataset assignment in blueprints.) 3. If there are multiple sleds are tied on "lowest count of current instances of this zone type", we tiebreak by "lowest total number of discretionary zones", and only if we're still tied do we tiebreak on sled-id. This module only supports Nexus at the moment, but adding additional zone kinds (assuming all the same requirements are valid for them) is nearly trivial - I'll add cockroachdb support in a subsequent PR. --- .../planner/omicron_zone_placement.txt | 7 + nexus/reconfigurator/planning/src/planner.rs | 124 ++--- .../src/planner/omicron_zone_placement.rs | 493 ++++++++++++++++++ 3 files changed, 564 insertions(+), 60 deletions(-) create mode 100644 nexus/reconfigurator/planning/proptest-regressions/planner/omicron_zone_placement.txt create mode 100644 nexus/reconfigurator/planning/src/planner/omicron_zone_placement.rs diff --git a/nexus/reconfigurator/planning/proptest-regressions/planner/omicron_zone_placement.txt b/nexus/reconfigurator/planning/proptest-regressions/planner/omicron_zone_placement.txt new file mode 100644 index 00000000000..bb2ad481bc8 --- /dev/null +++ b/nexus/reconfigurator/planning/proptest-regressions/planner/omicron_zone_placement.txt @@ -0,0 +1,7 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc 72b902d1405681df2dd46efc097da6840ff1234dc9d0d7c0ecf07bed0b0e7d8d # shrinks to input = _TestPlaceOmicronZonesArgs { input: ArbitraryTestInput { existing_sleds: {[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]: ExistingSled { zones: ZonesToPlace { zones: [] }, waiting_for_ntp: false, num_disks: 1 }}, zones_to_place: ZonesToPlace { zones: [Nexus] } } } diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index aca5f057d8a..3708d212ec7 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -10,6 +10,7 @@ use crate::blueprint_builder::BlueprintBuilder; use crate::blueprint_builder::Ensure; use crate::blueprint_builder::EnsureMultiple; use crate::blueprint_builder::Error; +use crate::planner::omicron_zone_placement::PlacementError; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::PlanningInput; @@ -25,6 +26,12 @@ use std::collections::BTreeMap; use std::collections::BTreeSet; use std::hash::Hash; +use self::omicron_zone_placement::DiscretionaryOmicronZone; +use self::omicron_zone_placement::OmicronZonePlacement; +use self::omicron_zone_placement::OmicronZonePlacementSledState; + +mod omicron_zone_placement; + pub struct Planner<'a> { log: Logger, input: &'a PlanningInput, @@ -214,7 +221,7 @@ impl<'a> Planner<'a> { // We will not mark sleds getting Crucible zones as ineligible; other // control plane service zones starting concurrently with Crucible zones // is fine. - let mut sleds_waiting_for_ntp_zones = BTreeSet::new(); + let mut sleds_waiting_for_ntp_zone = BTreeSet::new(); for (sled_id, sled_resources) in self.input.all_sled_resources(SledFilter::InService) @@ -252,7 +259,7 @@ impl<'a> Planner<'a> { // Don't make any other changes to this sled. However, this // change is compatible with any other changes to other sleds, // so we can "continue" here rather than "break". - sleds_waiting_for_ntp_zones.insert(sled_id); + sleds_waiting_for_ntp_zone.insert(sled_id); continue; } @@ -321,9 +328,7 @@ impl<'a> Planner<'a> { } } - self.ensure_correct_number_of_nexus_zones( - &sleds_waiting_for_ntp_zones, - )?; + self.ensure_correct_number_of_nexus_zones(&sleds_waiting_for_ntp_zone)?; Ok(()) } @@ -344,7 +349,7 @@ impl<'a> Planner<'a> { // TODO-correctness What should we do if we have _too many_ Nexus // instances? For now, just log it the number of zones any time we have // at least the minimum number. - let nexus_to_add = self + let mut nexus_to_add = self .input .target_nexus_zone_count() .saturating_sub(num_total_nexus); @@ -357,70 +362,69 @@ impl<'a> Planner<'a> { return Ok(()); } - // Now bin all the sleds which are eligible choices for a new Nexus zone - // by their current Nexus zone count. Skip sleds with a policy/state - // that should be eligible for Nexus but that don't yet have an NTP - // zone. - let mut sleds_by_num_nexus: BTreeMap> = - BTreeMap::new(); - for sled_id in self - .input - .all_sled_ids(SledFilter::Discretionary) - .filter(|sled_id| !sleds_waiting_for_ntp_zone.contains(sled_id)) - { - let num_nexus = self.blueprint.sled_num_nexus_zones(sled_id); - sleds_by_num_nexus.entry(num_nexus).or_default().push(sled_id); - } - - // Ensure we have at least one sled on which we can add Nexus zones. If - // we don't, we have nothing else to do. This isn't a hard error, - // because we might be waiting for NTP on all eligible sleds (although - // it would be weird, since we're presumably running from within Nexus - // on some sled). - if sleds_by_num_nexus.is_empty() { - warn!(self.log, "want to add Nexus zones, but no eligible sleds"); - return Ok(()); - } + let mut zone_placement = OmicronZonePlacement::new( + self.input + .all_sled_resources(SledFilter::Discretionary) + .filter(|(sled_id, _)| { + !sleds_waiting_for_ntp_zone.contains(&sled_id) + }) + .map(|(sled_id, sled_resources)| { + OmicronZonePlacementSledState { + sled_id, + num_zpools: sled_resources + .all_zpools(ZpoolFilter::InService) + .count(), + discretionary_zones: self + .blueprint + .current_sled_zones(sled_id) + .filter_map(|zone| { + DiscretionaryOmicronZone::from_zone_type( + &zone.zone_type, + ) + }) + .collect(), + } + }), + ); - // Build a map of sled -> new nexus zone count. + // Build a map of sled -> new nexus zones to add. let mut sleds_to_change: BTreeMap = BTreeMap::new(); - 'outer: for _ in 0..nexus_to_add { - // `sleds_by_num_nexus` is sorted by key already, and we want to - // pick from the lowest-numbered bin. We can just loop over its - // keys, expecting to stop on the first iteration, with the only - // exception being when we've removed all the sleds from a bin. - for (&num_nexus, sleds) in sleds_by_num_nexus.iter_mut() { - // `sleds` contains all sleds with the minimum number of Nexus - // zones. Pick one arbitrarily but deterministically. - let Some(sled_id) = sleds.pop() else { - // We already drained this bin; move on. - continue; - }; - - // This insert might overwrite an old value for this sled (e.g., - // in the "we have 1 sled and need to add many Nexus instances - // to it" case). That's fine. - sleds_to_change.insert(sled_id, num_nexus + 1); + for i in 0..nexus_to_add { + match zone_placement.place_zone(DiscretionaryOmicronZone::Nexus) { + Ok(sled_id) => { + *sleds_to_change.entry(sled_id).or_default() += 1; + } + Err(PlacementError::NoSledsEligible { .. }) => { + // We won't treat this as a hard error; it's possible + // (albeit unlikely?) we're in a weird state where we need + // more sleds or disks to come online, and we may need to be + // able to produce blueprints to achieve that status. + warn!( + self.log, + "failed to place all new desired Nexus instances"; + "placed" => i, + "wanted_to_place" => nexus_to_add, + ); - // Put this sled back in our map, but now with one more Nexus. - sleds_by_num_nexus - .entry(num_nexus + 1) - .or_default() - .push(sled_id); + // Adjust `nexus_to_add` downward so it's consistent with + // the number of Nexuses we're actually adding. + nexus_to_add = i; - continue 'outer; + break; + } } - - // This should be unreachable: it's only possible if we fail to find - // a nonempty vec in `sleds_by_num_nexus`, and we checked above that - // `sleds_by_num_nexus` is not empty. - unreachable!("logic error finding sleds for Nexus"); } // For each sled we need to change, actually do so. let mut total_added = 0; - for (sled_id, new_nexus_count) in sleds_to_change { + for (sled_id, additional_nexus_count) in sleds_to_change { + // TODO-cleanup This is awkward: the builder wants to know how many + // total Nexus zones go on a given sled, but we have a count of how + // many we want to add. Construct a new target count. Maybe the + // builder should provide a different interface here? + let new_nexus_count = self.blueprint.sled_num_nexus_zones(sled_id) + + additional_nexus_count; match self .blueprint .sled_ensure_zone_multiple_nexus(sled_id, new_nexus_count)? diff --git a/nexus/reconfigurator/planning/src/planner/omicron_zone_placement.rs b/nexus/reconfigurator/planning/src/planner/omicron_zone_placement.rs new file mode 100644 index 00000000000..26e72db4347 --- /dev/null +++ b/nexus/reconfigurator/planning/src/planner/omicron_zone_placement.rs @@ -0,0 +1,493 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Omicron zone placement decisions + +use nexus_types::deployment::BlueprintZoneType; +use omicron_uuid_kinds::SledUuid; +use sled_agent_client::ZoneKind; +use std::cmp::Ordering; +use std::collections::BinaryHeap; +use std::mem; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[cfg_attr(test, derive(test_strategy::Arbitrary))] +pub(super) enum DiscretionaryOmicronZone { + Nexus, + // TODO expand this enum as we start to place more services +} + +impl DiscretionaryOmicronZone { + pub(super) fn from_zone_type( + zone_type: &BlueprintZoneType, + ) -> Option { + match zone_type { + BlueprintZoneType::Nexus(_) => Some(Self::Nexus), + // Zones that we should place but don't yet. + BlueprintZoneType::BoundaryNtp(_) + | BlueprintZoneType::Clickhouse(_) + | BlueprintZoneType::ClickhouseKeeper(_) + | BlueprintZoneType::CockroachDb(_) + | BlueprintZoneType::CruciblePantry(_) + | BlueprintZoneType::ExternalDns(_) + | BlueprintZoneType::InternalDns(_) + | BlueprintZoneType::Oximeter(_) => None, + // Zones that get special handling for placement (all sleds get + // them, although internal NTP has some interactions with boundary + // NTP that we don't yet handle, so this may change). + BlueprintZoneType::Crucible(_) + | BlueprintZoneType::InternalNtp(_) => None, + } + } +} + +impl From for ZoneKind { + fn from(zone: DiscretionaryOmicronZone) -> Self { + match zone { + DiscretionaryOmicronZone::Nexus => Self::Nexus, + } + } +} + +#[derive(Debug, thiserror::Error)] +pub(super) enum PlacementError { + #[error( + "no sleds eligible for placement of new {} zone", + ZoneKind::from(*zone_kind) + )] + NoSledsEligible { zone_kind: DiscretionaryOmicronZone }, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(super) struct OmicronZonePlacementSledState { + pub sled_id: SledUuid, + pub num_zpools: usize, + pub discretionary_zones: Vec, +} + +/// `OmicronZonePlacement` keeps an internal heap of sleds and their current +/// discretionary zones and chooses sleds for placement of additional +/// discretionary zones. +#[derive(Debug, Clone)] +pub(super) struct OmicronZonePlacement { + sleds: OrderedSleds, +} + +impl OmicronZonePlacement { + /// Construct a new `OmicronZonePlacement` with a given set of eligible + /// sleds. + /// + /// Sleds which are not eligible for discretionary services for reasons + /// outside the knowledge of `OmicronZonePlacement` (e.g., sleds with a + /// policy or state that makes them ineligible) should be omitted from this + /// list of sleds. For now, sleds that are waiting for an NTP zone should be + /// omitted as well, although that may change in the future when we add + /// support for boundary NTP zone placement. + pub(super) fn new( + sleds: impl Iterator, + ) -> Self { + // We rebuild our heap whenever the zone type we're placing changes. We + // need to pick _something_ to start; this only matters for performance, + // not correctness (we don't have to rebuild the heap if `place_zone` is + // called with a zone kind that matches the current sorting). + let ordered_by = DiscretionaryOmicronZone::Nexus; + Self { sleds: OrderedSleds::new(ordered_by, sleds) } + } + + /// Attempt to place a new zone of kind `zone_kind` on one of the sleds + /// provided when this `OmicronZonePlacement` was created. + /// + /// On success, the internal heap held by `self` is updated assuming that a + /// new zone of the given kind was added to the sled returned by + /// `place_zone()`. This allows one `OmicronZonePlacement` to be reused + /// across multiple zone placement decisions, but requires the caller to + /// accept its decisions. If the caller decides not to add a zone to the + /// returned sled, the `OmicronZonePlacement` instance should be discarded + /// and a new one should be created for future placement decisions. + /// + /// Placement is currently minimal. The only hard requirement we enforce is + /// that a sled may only one run one instance of any given zone kind per + /// zpool it has (e.g., a sled with 5 zpools could run 5 Nexus instances and + /// 5 CockroachDb instances concurrently, but could not run 6 Nexus + /// instances). If there is at least one sled that satisfies this + /// requirement, this method will return `Ok(_)`. If there are multiple + /// sleds that satisfy this requirement, this method will return a sled + /// which has the fewest instances of `zone_kind`; if multiple sleds are + /// tied, it will pick the one with the fewest total discretionary zones; if + /// multiple sleds are still tied, it will pick deterministically (e.g., + /// choosing the lowest or highest sled ID). + /// + /// `OmicronZonePlacement` currently does not track _which_ zpools are + /// assigned to services. This could lead to it being overly conservative if + /// zpools that are not in service are hosting relevant zones. For example, + /// imagine a sled with two zpools: zpool-a and zpool-b. The sled has a + /// single Nexus instance with a transitory dataset on zpool-a. If zpool-a + /// is in a degraded state and considered not-in-service, + /// `OmicronZonePlacement` will be told by the planner that the sled has 1 + /// zpool. Our simple check of "at most one Nexus per zpool" would + /// erroneously fail to realize we could still add a Nexus (backed by + /// zpool-b), and would claim that the sled already has a Nexus for each + /// zpool. + /// + /// We punt on this problem for multiple reasons: + /// + /// 1. It's overly conservative; if we get into this state, we may refuse to + /// start services when we ought to be able to, but this isn't the worst + /// failure mode. In practice we should have far more options for + /// placement than we need for any of our control plane services, so + /// skipping a sled in this state should be fine. + /// 2. We don't yet track transitory datasets, so even if we wanted to know + /// which zpool Nexus was using (in the above example), we can't. + /// 3. We don't (yet?) have a way for a zpool to be present, backing a zone, + /// and not considered to be in service. The only zpools that aren't in + /// service belong to expunged disks, which can't be backing live + /// services. + pub(super) fn place_zone( + &mut self, + zone_kind: DiscretionaryOmicronZone, + ) -> Result { + self.sleds.ensure_ordered_by(zone_kind); + + let mut sleds_skipped = Vec::new(); + let mut chosen_sled = None; + while let Some(sled) = self.sleds.pop() { + // Ensure we have at least one zpool more than the number of + // `zone_kind` zones already placed on this sled. If we don't, we + // already have a zone of this kind on each zpool, so we'll skip + // this sled. + if sled + .discretionary_zones + .iter() + .filter(|&&z| z == zone_kind) + .count() + < sled.num_zpools + { + chosen_sled = Some(sled); + break; + } else { + sleds_skipped.push(sled); + } + } + + // Push any skipped sleds back onto our heap. + for sled in sleds_skipped { + self.sleds.push(sled); + } + + let mut sled = + chosen_sled.ok_or(PlacementError::NoSledsEligible { zone_kind })?; + let sled_id = sled.sled_id; + + // Update our internal state so future `place_zone` calls take the new + // zone we just placed into account. + sled.discretionary_zones.push(zone_kind); + self.sleds.push(sled); + + Ok(sled_id) + } +} + +// Wrapper around a binary heap that allows us to change the ordering at runtime +// (so we can sort for particular types of zones to place). +#[derive(Debug, Clone)] +struct OrderedSleds { + // The current zone type we're sorted to place. We maintain the invariant + // that every element of `heap` has the same `ordered_by` value as this + // field's current value. + ordered_by: DiscretionaryOmicronZone, + heap: BinaryHeap, +} + +impl OrderedSleds { + fn new( + ordered_by: DiscretionaryOmicronZone, + sleds: impl Iterator, + ) -> Self { + Self { + ordered_by, + heap: sleds + .map(|sled| OrderedSledState { ordered_by, sled }) + .collect(), + } + } + + fn ensure_ordered_by(&mut self, ordered_by: DiscretionaryOmicronZone) { + if self.ordered_by == ordered_by { + return; + } + + // Rebuild our heap, sorting by a new zone kind, and maintaining the + // invariant that all our heap members have the same `ordered_by` value + // as we do. + let mut sleds = mem::take(&mut self.heap).into_vec(); + for s in &mut sleds { + s.ordered_by = ordered_by; + } + self.heap = BinaryHeap::from(sleds); + self.ordered_by = ordered_by; + } + + fn pop(&mut self) -> Option { + self.heap.pop().map(|ordered| ordered.sled) + } + + fn push(&mut self, sled: OmicronZonePlacementSledState) { + self.heap.push(OrderedSledState { ordered_by: self.ordered_by, sled }) + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +struct OrderedSledState { + ordered_by: DiscretionaryOmicronZone, + sled: OmicronZonePlacementSledState, +} + +impl Ord for OrderedSledState { + fn cmp(&self, other: &Self) -> Ordering { + // Invariant: We should never compare other entries with a different + // `ordered_by`. This is enforced by `OrderedSleds`. + assert_eq!(self.ordered_by, other.ordered_by); + + // Count how many zones of our ordering type are in each side. + let our_zones_of_interest = self + .sled + .discretionary_zones + .iter() + .filter(|&&z| z == self.ordered_by) + .count(); + let other_zones_of_interest = other + .sled + .discretionary_zones + .iter() + .filter(|&&z| z == self.ordered_by) + .count(); + + // BinaryHeap is a max heap, and we want to be on the top of the heap if + // we have fewer zones of interest, so reverse the comparisons below. + our_zones_of_interest + .cmp(&other_zones_of_interest) + .reverse() + // If the zones of interest count is equal, we tiebreak by total + // discretionary zones, again reversing the order for our max heap + // to prioritize sleds with fewer total discretionary zones. + .then_with(|| { + self.sled + .discretionary_zones + .len() + .cmp(&other.sled.discretionary_zones.len()) + .reverse() + }) + // If we're still tied, tiebreak by sorting on sled ID for + // determinism. + .then_with(|| self.sled.sled_id.cmp(&other.sled.sled_id)) + } +} + +impl PartialOrd for OrderedSledState { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +#[cfg(test)] +pub mod test { + use super::*; + use omicron_uuid_kinds::GenericUuid; + use proptest::arbitrary::any; + use proptest::collection::btree_map; + use proptest::sample::size_range; + use std::collections::BTreeMap; + use test_strategy::proptest; + use test_strategy::Arbitrary; + use uuid::Uuid; + + #[derive(Debug, Clone, Arbitrary)] + struct ZonesToPlace { + #[any(size_range(0..8).lift())] + zones: Vec, + } + + #[derive(Debug, Clone, Arbitrary)] + struct ExistingSled { + zones: ZonesToPlace, + #[strategy(0_usize..8)] + num_zpools: usize, + } + + #[derive(Debug, Arbitrary)] + struct ArbitraryTestInput { + #[strategy(btree_map(any::<[u8; 16]>(), any::(), 1..8))] + existing_sleds: BTreeMap<[u8; 16], ExistingSled>, + zones_to_place: ZonesToPlace, + } + + #[derive(Debug)] + struct TestInput { + state: TestState, + zones_to_place: Vec, + } + + impl From for TestInput { + fn from(input: ArbitraryTestInput) -> Self { + let mut sleds = BTreeMap::new(); + for (&raw_id, existing_sled) in input.existing_sleds.iter() { + let sled_id = + SledUuid::from_untyped_uuid(Uuid::from_bytes(raw_id)); + sleds.insert( + sled_id, + TestSledState { + zones: existing_sled.zones.zones.clone(), + num_zpools: existing_sled.num_zpools, + }, + ); + } + let state = TestState { sleds }; + Self { state, zones_to_place: input.zones_to_place.zones } + } + } + + #[derive(Debug)] + struct TestSledState { + zones: Vec, + num_zpools: usize, + } + + impl TestSledState { + fn count_zones_of_kind(&self, kind: DiscretionaryOmicronZone) -> usize { + self.zones.iter().filter(|&&k| k == kind).count() + } + } + + #[derive(Debug)] + struct TestState { + sleds: BTreeMap, + } + + impl TestState { + fn validate_sled_can_support_another_zone_of_kind( + &self, + sled_id: SledUuid, + kind: DiscretionaryOmicronZone, + ) -> Result<(), String> { + let sled_state = self.sleds.get(&sled_id).expect("valid sled_id"); + let existing_zones = sled_state.count_zones_of_kind(kind); + if existing_zones < sled_state.num_zpools { + Ok(()) + } else { + Err(format!( + "already have {existing_zones} \ + {kind:?} instances but only {} zpools", + sled_state.num_zpools + )) + } + } + + fn validate_placement( + &mut self, + sled_id: SledUuid, + kind: DiscretionaryOmicronZone, + ) -> Result<(), String> { + // Ensure this sled is eligible for this kind at all: We have to + // have at least one disk on which we can put the dataset for this + // zone that isn't already holding another zone of this same kind + // (i.e., at most one zone of any given kind per disk per sled). + self.validate_sled_can_support_another_zone_of_kind(sled_id, kind)?; + + let sled_state = self.sleds.get(&sled_id).expect("valid sled_id"); + let existing_zones = sled_state.count_zones_of_kind(kind); + + // Ensure this sled is (at least tied for) the best choice for this + // kind: it should have the minimum number of existing zones of this + // kind, and of all sleds tied for the minimum, it should have the + // fewest total discretionary services. + for (&other_sled_id, other_sled_state) in &self.sleds { + // Ignore other sleds that can't run another zone of `kind`. + if self + .validate_sled_can_support_another_zone_of_kind( + other_sled_id, + kind, + ) + .is_err() + { + continue; + } + + let other_zone_count = + other_sled_state.count_zones_of_kind(kind); + if other_zone_count < existing_zones { + return Err(format!( + "sled {other_sled_id} would be a better choice \ + (fewer existing {kind:?} instances: \ + {other_zone_count} < {existing_zones})" + )); + } + if other_zone_count == existing_zones + && other_sled_state.zones.len() < sled_state.zones.len() + { + return Err(format!( + "sled {other_sled_id} would be a better choice \ + (same number of existing {kind:?} instances, but \ + fewer total discretionary services: {} < {})", + other_sled_state.zones.len(), + sled_state.zones.len(), + )); + } + } + + // This placement is valid: update our state. + self.sleds.get_mut(&sled_id).unwrap().zones.push(kind); + Ok(()) + } + + fn validate_no_placement_possible( + &self, + kind: DiscretionaryOmicronZone, + ) -> Result<(), String> { + // Zones should be placeable unless every sled already has a zone of + // this kind on every disk. + for (sled_id, sled_state) in self.sleds.iter() { + if sled_state.count_zones_of_kind(kind) < sled_state.num_zpools + { + return Err(format!( + "sled {sled_id} is eligible for {kind:?} placement" + )); + } + } + Ok(()) + } + } + + #[proptest] + fn test_place_omicron_zones(input: ArbitraryTestInput) { + let mut input = TestInput::from(input); + + let mut placer = + OmicronZonePlacement::new(input.state.sleds.iter().map( + |(&sled_id, sled_state)| OmicronZonePlacementSledState { + sled_id, + num_zpools: sled_state.num_zpools, + discretionary_zones: sled_state.zones.clone(), + }, + )); + + for z in input.zones_to_place { + println!("placing {z:?}"); + match placer.place_zone(z) { + Ok(sled_id) => { + input + .state + .validate_placement(sled_id, z) + .expect("valid placement"); + } + Err(PlacementError::NoSledsEligible { zone_kind }) => { + assert_eq!(zone_kind, z); + input + .state + .validate_no_placement_possible(z) + .expect("no placement possible"); + } + } + } + } +} From 2082942d287bb8d890c39d624a45a8ca15ddda73 Mon Sep 17 00:00:00 2001 From: Levon Tarver <11586085+internet-diglett@users.noreply.github.com> Date: Wed, 22 May 2024 13:52:44 -0500 Subject: [PATCH 35/37] RPW for OPTE v2p Mappings (#5568) TODO --- - [x] Extend db view to include probe v2p mappings - [x] Update sagas to trigger rpw activation instead of directly configuring v2p mappings - [x] Test that the `delete` functionality cleans up v2p mappings Related --- Resolves #5214 Resolves #4259 Resolves #3107 - [x] Depends on https://github.com/oxidecomputer/opte/pull/494 - [x] Depends on https://github.com/oxidecomputer/meta/issues/409 - [x] Depends on https://github.com/oxidecomputer/maghemite/pull/244 --------- Co-authored-by: Levon Tarver --- .github/buildomat/jobs/deploy.sh | 2 +- Cargo.lock | 14 +- Cargo.toml | 4 +- clients/sled-agent-client/src/lib.rs | 3 +- dev-tools/omdb/tests/env.out | 12 + dev-tools/omdb/tests/successes.out | 11 + dev-tools/oxlog/src/bin/oxlog.rs | 2 +- illumos-utils/src/opte/params.rs | 18 +- illumos-utils/src/opte/port_manager.rs | 90 ++++- nexus-config/src/nexus_config.rs | 17 +- nexus/db-model/src/lib.rs | 2 + nexus/db-model/src/schema.rs | 11 + nexus/db-model/src/schema_versions.rs | 3 +- nexus/db-model/src/v2p_mapping.rs | 16 + nexus/db-queries/src/db/datastore/mod.rs | 1 + .../src/db/datastore/network_interface.rs | 56 +++ .../src/db/datastore/v2p_mapping.rs | 45 +++ nexus/examples/config.toml | 1 + nexus/src/app/background/init.rs | 21 ++ nexus/src/app/background/instance_watcher.rs | 6 +- nexus/src/app/background/mod.rs | 1 + nexus/src/app/background/v2p_mappings.rs | 165 +++++++++ nexus/src/app/instance.rs | 6 +- nexus/src/app/instance_network.rs | 328 ++---------------- nexus/src/app/mod.rs | 7 + nexus/src/app/sagas/instance_create.rs | 4 +- nexus/src/app/sagas/instance_delete.rs | 2 + nexus/src/app/sagas/instance_start.rs | 40 +-- nexus/tests/config.test.toml | 1 + nexus/tests/integration_tests/instances.rs | 100 +++--- openapi/sled-agent.json | 129 +++---- .../tests/output/self-stat-schema.json | 4 +- package-manifest.toml | 10 +- .../crdb/add-view-for-v2p-mappings/up01.sql | 41 +++ .../crdb/add-view-for-v2p-mappings/up02.sql | 3 + .../crdb/add-view-for-v2p-mappings/up03.sql | 2 + .../crdb/add-view-for-v2p-mappings/up04.sql | 2 + .../crdb/add-view-for-v2p-mappings/up05.sql | 4 + .../crdb/add-view-for-v2p-mappings/up06.sql | 2 + .../crdb/add-view-for-v2p-mappings/up07.sql | 2 + schema/crdb/dbinit.sql | 69 +++- sled-agent/src/http_entrypoints.rs | 38 +- sled-agent/src/sim/http_entrypoints.rs | 41 ++- sled-agent/src/sim/sled_agent.rs | 37 +- sled-agent/src/sled_agent.rs | 14 +- smf/nexus/multi-sled/config-partial.toml | 1 + smf/nexus/single-sled/config-partial.toml | 1 + tools/maghemite_mg_openapi_version | 4 +- tools/opte_version | 2 +- 49 files changed, 805 insertions(+), 590 deletions(-) create mode 100644 nexus/db-model/src/v2p_mapping.rs create mode 100644 nexus/db-queries/src/db/datastore/v2p_mapping.rs create mode 100644 nexus/src/app/background/v2p_mappings.rs create mode 100644 schema/crdb/add-view-for-v2p-mappings/up01.sql create mode 100644 schema/crdb/add-view-for-v2p-mappings/up02.sql create mode 100644 schema/crdb/add-view-for-v2p-mappings/up03.sql create mode 100644 schema/crdb/add-view-for-v2p-mappings/up04.sql create mode 100644 schema/crdb/add-view-for-v2p-mappings/up05.sql create mode 100644 schema/crdb/add-view-for-v2p-mappings/up06.sql create mode 100644 schema/crdb/add-view-for-v2p-mappings/up07.sql diff --git a/.github/buildomat/jobs/deploy.sh b/.github/buildomat/jobs/deploy.sh index c947a05e10a..31733f0dc00 100755 --- a/.github/buildomat/jobs/deploy.sh +++ b/.github/buildomat/jobs/deploy.sh @@ -2,7 +2,7 @@ #: #: name = "helios / deploy" #: variety = "basic" -#: target = "lab-2.0-opte-0.28" +#: target = "lab-2.0-opte-0.29" #: output_rules = [ #: "%/var/svc/log/oxide-sled-agent:default.log*", #: "%/zone/oxz_*/root/var/svc/log/oxide-*.log*", diff --git a/Cargo.lock b/Cargo.lock index 5cf0c5aa73c..0d534a3c2f0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1732,7 +1732,7 @@ dependencies = [ [[package]] name = "derror-macro" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" +source = "git+https://github.com/oxidecomputer/opte?rev=4cc823b50d3e4a629cdfaab2b3d3382514174ba9#4cc823b50d3e4a629cdfaab2b3d3382514174ba9" dependencies = [ "darling", "proc-macro2", @@ -3481,7 +3481,7 @@ dependencies = [ [[package]] name = "illumos-sys-hdrs" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" +source = "git+https://github.com/oxidecomputer/opte?rev=4cc823b50d3e4a629cdfaab2b3d3382514174ba9#4cc823b50d3e4a629cdfaab2b3d3382514174ba9" [[package]] name = "illumos-utils" @@ -3894,7 +3894,7 @@ dependencies = [ [[package]] name = "kstat-macro" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" +source = "git+https://github.com/oxidecomputer/opte?rev=4cc823b50d3e4a629cdfaab2b3d3382514174ba9#4cc823b50d3e4a629cdfaab2b3d3382514174ba9" dependencies = [ "quote", "syn 2.0.64", @@ -6019,7 +6019,7 @@ dependencies = [ [[package]] name = "opte" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" +source = "git+https://github.com/oxidecomputer/opte?rev=4cc823b50d3e4a629cdfaab2b3d3382514174ba9#4cc823b50d3e4a629cdfaab2b3d3382514174ba9" dependencies = [ "cfg-if", "derror-macro", @@ -6037,7 +6037,7 @@ dependencies = [ [[package]] name = "opte-api" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" +source = "git+https://github.com/oxidecomputer/opte?rev=4cc823b50d3e4a629cdfaab2b3d3382514174ba9#4cc823b50d3e4a629cdfaab2b3d3382514174ba9" dependencies = [ "illumos-sys-hdrs", "ipnetwork", @@ -6049,7 +6049,7 @@ dependencies = [ [[package]] name = "opte-ioctl" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" +source = "git+https://github.com/oxidecomputer/opte?rev=4cc823b50d3e4a629cdfaab2b3d3382514174ba9#4cc823b50d3e4a629cdfaab2b3d3382514174ba9" dependencies = [ "libc", "libnet 0.1.0 (git+https://github.com/oxidecomputer/netadm-sys)", @@ -6123,7 +6123,7 @@ dependencies = [ [[package]] name = "oxide-vpc" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" +source = "git+https://github.com/oxidecomputer/opte?rev=4cc823b50d3e4a629cdfaab2b3d3382514174ba9#4cc823b50d3e4a629cdfaab2b3d3382514174ba9" dependencies = [ "cfg-if", "illumos-sys-hdrs", diff --git a/Cargo.toml b/Cargo.toml index 29e2a8cbd00..16207d2f310 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -347,14 +347,14 @@ omicron-sled-agent = { path = "sled-agent" } omicron-test-utils = { path = "test-utils" } omicron-zone-package = "0.11.0" oxide-client = { path = "clients/oxide-client" } -oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "7ee353a470ea59529ee1b34729681da887aa88ce", features = [ "api", "std" ] } +oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "4cc823b50d3e4a629cdfaab2b3d3382514174ba9", features = [ "api", "std" ] } once_cell = "1.19.0" openapi-lint = { git = "https://github.com/oxidecomputer/openapi-lint", branch = "main" } openapiv3 = "2.0.0" # must match samael's crate! openssl = "0.10" openssl-sys = "0.9" -opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "7ee353a470ea59529ee1b34729681da887aa88ce" } +opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "4cc823b50d3e4a629cdfaab2b3d3382514174ba9" } oso = "0.27" owo-colors = "4.0.0" oximeter = { path = "oximeter/oximeter" } diff --git a/clients/sled-agent-client/src/lib.rs b/clients/sled-agent-client/src/lib.rs index a0145af9106..4ac7eed27c2 100644 --- a/clients/sled-agent-client/src/lib.rs +++ b/clients/sled-agent-client/src/lib.rs @@ -35,7 +35,8 @@ progenitor::generate_api!( PortConfigV1 = { derives = [PartialEq, Eq, Hash, Serialize, Deserialize] }, RouteConfig = { derives = [PartialEq, Eq, Hash, Serialize, Deserialize] }, IpNet = { derives = [PartialEq, Eq, Hash, Serialize, Deserialize] }, - OmicronPhysicalDiskConfig = { derives = [Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash, PartialOrd, Ord] } + VirtualNetworkInterfaceHost = { derives = [PartialEq, Eq, Hash, Serialize, Deserialize] }, + OmicronPhysicalDiskConfig = { derives = [Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash, PartialOrd, Ord] }, }, //TODO trade the manual transformations later in this file for the // replace directives below? diff --git a/dev-tools/omdb/tests/env.out b/dev-tools/omdb/tests/env.out index 57165106026..d187c47d18d 100644 --- a/dev-tools/omdb/tests/env.out +++ b/dev-tools/omdb/tests/env.out @@ -114,6 +114,10 @@ task: "switch_port_config_manager" manages switch port settings for rack switches +task: "v2p_manager" + manages opte v2p mappings for vpc networking + + --------------------------------------------- stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT @@ -225,6 +229,10 @@ task: "switch_port_config_manager" manages switch port settings for rack switches +task: "v2p_manager" + manages opte v2p mappings for vpc networking + + --------------------------------------------- stderr: note: Nexus URL not specified. Will pick one from DNS. @@ -323,6 +331,10 @@ task: "switch_port_config_manager" manages switch port settings for rack switches +task: "v2p_manager" + manages opte v2p mappings for vpc networking + + --------------------------------------------- stderr: note: Nexus URL not specified. Will pick one from DNS. diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index c4c28460b88..db6e5fde87d 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -291,6 +291,10 @@ task: "switch_port_config_manager" manages switch port settings for rack switches +task: "v2p_manager" + manages opte v2p mappings for vpc networking + + --------------------------------------------- stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ @@ -471,6 +475,13 @@ task: "switch_port_config_manager" started at (s ago) and ran for ms warning: unknown background task: "switch_port_config_manager" (don't know how to interpret details: Object {}) +task: "v2p_manager" + configured period: every 30s + currently executing: no + last completed activation: , triggered by an explicit signal + started at (s ago) and ran for ms +warning: unknown background task: "v2p_manager" (don't know how to interpret details: Object {}) + --------------------------------------------- stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ diff --git a/dev-tools/oxlog/src/bin/oxlog.rs b/dev-tools/oxlog/src/bin/oxlog.rs index ceeb98b3bdf..ed1c1a1fc84 100644 --- a/dev-tools/oxlog/src/bin/oxlog.rs +++ b/dev-tools/oxlog/src/bin/oxlog.rs @@ -47,7 +47,7 @@ struct FilterArgs { #[arg(short, long)] archived: bool, - // Print only the extra log files + /// Print only the extra log files #[arg(short, long)] extra: bool, diff --git a/illumos-utils/src/opte/params.rs b/illumos-utils/src/opte/params.rs index df1f33cb92c..17c61d680f1 100644 --- a/illumos-utils/src/opte/params.rs +++ b/illumos-utils/src/opte/params.rs @@ -31,26 +31,16 @@ pub struct VpcFirewallRule { } /// A mapping from a virtual NIC to a physical host -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] -pub struct SetVirtualNetworkInterfaceHost { +#[derive( + Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq, Eq, Hash, +)] +pub struct VirtualNetworkInterfaceHost { pub virtual_ip: IpAddr, pub virtual_mac: external::MacAddr, pub physical_host_ip: Ipv6Addr, pub vni: external::Vni, } -/// The data needed to identify a virtual IP for which a sled maintains an OPTE -/// virtual-to-physical mapping such that that mapping can be deleted. -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] -pub struct DeleteVirtualNetworkInterfaceHost { - /// The virtual IP whose mapping should be deleted. - pub virtual_ip: IpAddr, - - /// The VNI for the network containing the virtual IP whose mapping should - /// be deleted. - pub vni: external::Vni, -} - /// DHCP configuration for a port /// /// Not present here: Hostname (DHCPv4 option 12; used in DHCPv6 option 39); we diff --git a/illumos-utils/src/opte/port_manager.rs b/illumos-utils/src/opte/port_manager.rs index 03c51c321df..726aa01a2aa 100644 --- a/illumos-utils/src/opte/port_manager.rs +++ b/illumos-utils/src/opte/port_manager.rs @@ -5,8 +5,7 @@ //! Manager for all OPTE ports on a Helios system use crate::opte::opte_firewall_rules; -use crate::opte::params::DeleteVirtualNetworkInterfaceHost; -use crate::opte::params::SetVirtualNetworkInterfaceHost; +use crate::opte::params::VirtualNetworkInterfaceHost; use crate::opte::params::VpcFirewallRule; use crate::opte::Error; use crate::opte::Gateway; @@ -570,10 +569,62 @@ impl PortManager { Ok(()) } + #[cfg(target_os = "illumos")] + pub fn list_virtual_nics( + &self, + ) -> Result, Error> { + use macaddr::MacAddr6; + use opte_ioctl::OpteHdl; + + let hdl = OpteHdl::open(OpteHdl::XDE_CTL)?; + let v2p = + hdl.dump_v2p(&oxide_vpc::api::DumpVirt2PhysReq { unused: 99 })?; + let mut mappings: Vec<_> = vec![]; + + for mapping in v2p.mappings { + let vni = mapping + .vni + .as_u32() + .try_into() + .expect("opte VNI should be 24 bits"); + + for entry in mapping.ip4 { + mappings.push(VirtualNetworkInterfaceHost { + virtual_ip: IpAddr::V4(entry.0.into()), + virtual_mac: MacAddr6::from(entry.1.ether.bytes()).into(), + physical_host_ip: entry.1.ip.into(), + vni, + }); + } + + for entry in mapping.ip6 { + mappings.push(VirtualNetworkInterfaceHost { + virtual_ip: IpAddr::V6(entry.0.into()), + virtual_mac: MacAddr6::from(entry.1.ether.bytes()).into(), + physical_host_ip: entry.1.ip.into(), + vni, + }); + } + } + + Ok(mappings) + } + + #[cfg(not(target_os = "illumos"))] + pub fn list_virtual_nics( + &self, + ) -> Result, Error> { + info!( + self.inner.log, + "Listing virtual nics (ignored)"; + ); + Ok(vec![]) + } + #[cfg(target_os = "illumos")] pub fn set_virtual_nic_host( &self, - mapping: &SetVirtualNetworkInterfaceHost, + mapping: &VirtualNetworkInterfaceHost, ) -> Result<(), Error> { use opte_ioctl::OpteHdl; @@ -600,7 +651,7 @@ impl PortManager { #[cfg(not(target_os = "illumos"))] pub fn set_virtual_nic_host( &self, - mapping: &SetVirtualNetworkInterfaceHost, + mapping: &VirtualNetworkInterfaceHost, ) -> Result<(), Error> { info!( self.inner.log, @@ -613,20 +664,41 @@ impl PortManager { #[cfg(target_os = "illumos")] pub fn unset_virtual_nic_host( &self, - _mapping: &DeleteVirtualNetworkInterfaceHost, + mapping: &VirtualNetworkInterfaceHost, ) -> Result<(), Error> { - // TODO requires https://github.com/oxidecomputer/opte/issues/332 + use opte_ioctl::OpteHdl; + + info!( + self.inner.log, + "Clearing mapping of virtual NIC to physical host"; + "mapping" => ?&mapping, + ); + + let hdl = OpteHdl::open(OpteHdl::XDE_CTL)?; + hdl.clear_v2p(&oxide_vpc::api::ClearVirt2PhysReq { + vip: mapping.virtual_ip.into(), + phys: oxide_vpc::api::PhysNet { + ether: oxide_vpc::api::MacAddr::from( + (*mapping.virtual_mac).into_array(), + ), + ip: mapping.physical_host_ip.into(), + vni: Vni::new(mapping.vni).unwrap(), + }, + })?; - slog::warn!(self.inner.log, "unset_virtual_nic_host unimplmented"); Ok(()) } #[cfg(not(target_os = "illumos"))] pub fn unset_virtual_nic_host( &self, - _mapping: &DeleteVirtualNetworkInterfaceHost, + mapping: &VirtualNetworkInterfaceHost, ) -> Result<(), Error> { - info!(self.inner.log, "Ignoring unset of virtual NIC mapping"); + info!( + self.inner.log, + "Ignoring unset of virtual NIC mapping"; + "mapping" => ?&mapping, + ); Ok(()) } } diff --git a/nexus-config/src/nexus_config.rs b/nexus-config/src/nexus_config.rs index 01f642a36be..08517026ef2 100644 --- a/nexus-config/src/nexus_config.rs +++ b/nexus-config/src/nexus_config.rs @@ -379,6 +379,8 @@ pub struct BackgroundTaskConfig { pub instance_watcher: InstanceWatcherConfig, /// configuration for service VPC firewall propagation task pub service_firewall_propagation: ServiceFirewallPropagationConfig, + /// configuration for v2p mapping propagation task + pub v2p_mapping_propagation: V2PMappingPropagationConfig, } #[serde_as] @@ -539,6 +541,14 @@ pub struct ServiceFirewallPropagationConfig { pub period_secs: Duration, } +#[serde_as] +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct V2PMappingPropagationConfig { + /// period (in seconds) for periodic activations of this background task + #[serde_as(as = "DurationSeconds")] + pub period_secs: Duration, +} + /// Configuration for a nexus server #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] pub struct PackageConfig { @@ -777,6 +787,7 @@ mod test { region_replacement.period_secs = 30 instance_watcher.period_secs = 30 service_firewall_propagation.period_secs = 300 + v2p_mapping_propagation.period_secs = 30 [default_region_allocation_strategy] type = "random" seed = 0 @@ -911,7 +922,10 @@ mod test { service_firewall_propagation: ServiceFirewallPropagationConfig { period_secs: Duration::from_secs(300), - } + }, + v2p_mapping_propagation: V2PMappingPropagationConfig { + period_secs: Duration::from_secs(30) + }, }, default_region_allocation_strategy: crate::nexus_config::RegionAllocationStrategy::Random { @@ -980,6 +994,7 @@ mod test { region_replacement.period_secs = 30 instance_watcher.period_secs = 30 service_firewall_propagation.period_secs = 300 + v2p_mapping_propagation.period_secs = 30 [default_region_allocation_strategy] type = "random" "##, diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index c7b495b0948..205885cfd86 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -55,6 +55,7 @@ mod project; mod semver_version; mod switch_interface; mod switch_port; +mod v2p_mapping; // These actually represent subqueries, not real table. // However, they must be defined in the same crate as our tables // for join-based marker trait generation. @@ -188,6 +189,7 @@ pub use typed_uuid::to_db_typed_uuid; pub use upstairs_repair::*; pub use user_builtin::*; pub use utilization::*; +pub use v2p_mapping::*; pub use virtual_provisioning_collection::*; pub use virtual_provisioning_resource::*; pub use vmm::*; diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 224c461da0a..423388de300 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -285,6 +285,17 @@ table! { } } +table! { + v2p_mapping_view (nic_id) { + nic_id -> Uuid, + sled_id -> Uuid, + sled_ip -> Inet, + vni -> Int4, + mac -> Int8, + ip -> Inet, + } +} + table! { bgp_announce_set (id) { id -> Uuid, diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index cb229274fea..ed4b762e68d 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -17,7 +17,7 @@ use std::collections::BTreeMap; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(63, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(64, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -29,6 +29,7 @@ static KNOWN_VERSIONS: Lazy> = Lazy::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(64, "add-view-for-v2p-mappings"), KnownVersion::new(63, "remove-producer-base-route-column"), KnownVersion::new(62, "allocate-subnet-decommissioned-sleds"), KnownVersion::new(61, "blueprint-add-sled-state"), diff --git a/nexus/db-model/src/v2p_mapping.rs b/nexus/db-model/src/v2p_mapping.rs new file mode 100644 index 00000000000..43831f75030 --- /dev/null +++ b/nexus/db-model/src/v2p_mapping.rs @@ -0,0 +1,16 @@ +use crate::schema::v2p_mapping_view; +use crate::{MacAddr, Vni}; +use ipnetwork::IpNetwork; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +#[derive(Queryable, Selectable, Clone, Debug, Serialize, Deserialize)] +#[diesel(table_name = v2p_mapping_view)] +pub struct V2PMappingView { + pub nic_id: Uuid, + pub sled_id: Uuid, + pub sled_ip: IpNetwork, + pub vni: Vni, + pub mac: MacAddr, + pub ip: IpNetwork, +} diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 9f2d2d02dbc..7c2cf8cf817 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -94,6 +94,7 @@ mod switch_port; pub(crate) mod test_utils; mod update; mod utilization; +mod v2p_mapping; mod virtual_provisioning_collection; mod vmm; mod volume; diff --git a/nexus/db-queries/src/db/datastore/network_interface.rs b/nexus/db-queries/src/db/datastore/network_interface.rs index 733e4ef32b0..f552e845c63 100644 --- a/nexus/db-queries/src/db/datastore/network_interface.rs +++ b/nexus/db-queries/src/db/datastore/network_interface.rs @@ -792,6 +792,62 @@ impl DataStore { public_error_from_diesel(e, ErrorHandler::Server) }) } + + /// List all network interfaces associated with all instances, making as + /// many queries as needed to get them all + /// + /// This should generally not be used in API handlers or other + /// latency-sensitive contexts, but it can make sense in saga actions or + /// background tasks. + /// + /// This particular method was added for propagating v2p mappings via RPWs + pub async fn instance_network_interfaces_all_list_batched( + &self, + opctx: &OpContext, + ) -> ListResultVec { + opctx.check_complex_operations_allowed()?; + + let mut all_interfaces = Vec::new(); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + let batch = self + .instance_network_interfaces_all_list( + opctx, + &p.current_pagparams(), + ) + .await?; + paginator = p + .found_batch(&batch, &|nic: &InstanceNetworkInterface| { + nic.id() + }); + all_interfaces.extend(batch); + } + Ok(all_interfaces) + } + + /// List one page of all network interfaces associated with instances + pub async fn instance_network_interfaces_all_list( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + use db::schema::instance_network_interface::dsl; + + // See the comment in `service_create_network_interface`. There's no + // obvious parent for a service network interface (as opposed to + // instance network interfaces, which require ListChildren on the + // instance to list). As a logical proxy, we check for listing children + // of the service IP pool. + let (authz_pool, _pool) = self.ip_pools_service_lookup(opctx).await?; + opctx.authorize(authz::Action::ListChildren, &authz_pool).await?; + + paginated(dsl::instance_network_interface, dsl::id, pagparams) + .filter(dsl::time_deleted.is_null()) + .select(InstanceNetworkInterface::as_select()) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } } #[cfg(test)] diff --git a/nexus/db-queries/src/db/datastore/v2p_mapping.rs b/nexus/db-queries/src/db/datastore/v2p_mapping.rs new file mode 100644 index 00000000000..6c00957e7d1 --- /dev/null +++ b/nexus/db-queries/src/db/datastore/v2p_mapping.rs @@ -0,0 +1,45 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::DataStore; +use crate::context::OpContext; +use crate::db; +use crate::db::datastore::SQL_BATCH_SIZE; +use crate::db::error::{public_error_from_diesel, ErrorHandler}; +use crate::db::model::V2PMappingView; +use crate::db::pagination::paginated; +use crate::db::pagination::Paginator; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::{QueryDsl, SelectableHelper}; +use omicron_common::api::external::ListResultVec; + +impl DataStore { + pub async fn v2p_mappings( + &self, + opctx: &OpContext, + ) -> ListResultVec { + use db::schema::v2p_mapping_view::dsl; + + opctx.check_complex_operations_allowed()?; + + let mut mappings = Vec::new(); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + let batch = paginated( + dsl::v2p_mapping_view, + dsl::nic_id, + &p.current_pagparams(), + ) + .select(V2PMappingView::as_select()) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + paginator = p.found_batch(&batch, &|mapping| mapping.nic_id); + mappings.extend(batch); + } + + Ok(mappings) + } +} diff --git a/nexus/examples/config.toml b/nexus/examples/config.toml index d3faf2459c4..cba2edb7e66 100644 --- a/nexus/examples/config.toml +++ b/nexus/examples/config.toml @@ -116,6 +116,7 @@ region_replacement.period_secs = 30 # How frequently to query the status of active instances. instance_watcher.period_secs = 30 service_firewall_propagation.period_secs = 300 +v2p_mapping_propagation.period_secs = 30 [default_region_allocation_strategy] # allocate region on 3 random distinct zpools, on 3 random distinct sleds. diff --git a/nexus/src/app/background/init.rs b/nexus/src/app/background/init.rs index d2f940018d3..f7b7291c591 100644 --- a/nexus/src/app/background/init.rs +++ b/nexus/src/app/background/init.rs @@ -22,6 +22,7 @@ use super::region_replacement; use super::service_firewall_rules; use super::sync_service_zone_nat::ServiceZoneNatTracker; use super::sync_switch_configuration::SwitchPortSettingsManager; +use super::v2p_mappings::V2PManager; use crate::app::oximeter::PRODUCER_LEASE_DURATION; use crate::app::sagas::SagaRequest; use nexus_config::BackgroundTaskConfig; @@ -90,6 +91,9 @@ pub struct BackgroundTasks { /// task handle for the switch port settings manager pub task_switch_port_settings_manager: common::TaskHandle, + /// task handle for the opte v2p manager + pub task_v2p_manager: common::TaskHandle, + /// task handle for the task that detects if regions need replacement and /// begins the process pub task_region_replacement: common::TaskHandle, @@ -113,6 +117,10 @@ impl BackgroundTasks { nexus_id: Uuid, resolver: internal_dns::resolver::Resolver, saga_request: Sender, + v2p_watcher: ( + tokio::sync::watch::Sender<()>, + tokio::sync::watch::Receiver<()>, + ), producer_registry: &ProducerRegistry, ) -> BackgroundTasks { let mut driver = common::Driver::new(); @@ -332,6 +340,17 @@ impl BackgroundTasks { ) }; + let task_v2p_manager = { + driver.register( + "v2p_manager".to_string(), + String::from("manages opte v2p mappings for vpc networking"), + config.v2p_mapping_propagation.period_secs, + Box::new(V2PManager::new(datastore.clone())), + opctx.child(BTreeMap::new()), + vec![Box::new(v2p_watcher.1)], + ) + }; + // Background task: detect if a region needs replacement and begin the // process let task_region_replacement = { @@ -358,6 +377,7 @@ impl BackgroundTasks { resolver.clone(), producer_registry, instance_watcher::WatcherIdentity { nexus_id, rack_id }, + v2p_watcher.0, ); driver.register( "instance_watcher".to_string(), @@ -401,6 +421,7 @@ impl BackgroundTasks { task_blueprint_executor, task_service_zone_nat_tracker, task_switch_port_settings_manager, + task_v2p_manager, task_region_replacement, task_instance_watcher, task_service_firewall_propagation, diff --git a/nexus/src/app/background/instance_watcher.rs b/nexus/src/app/background/instance_watcher.rs index 4cdca3c4b7d..d473ea8e994 100644 --- a/nexus/src/app/background/instance_watcher.rs +++ b/nexus/src/app/background/instance_watcher.rs @@ -35,6 +35,7 @@ pub(crate) struct InstanceWatcher { resolver: internal_dns::resolver::Resolver, metrics: Arc>, id: WatcherIdentity, + v2p_notification_tx: tokio::sync::watch::Sender<()>, } const MAX_SLED_AGENTS: NonZeroU32 = unsafe { @@ -48,12 +49,13 @@ impl InstanceWatcher { resolver: internal_dns::resolver::Resolver, producer_registry: &ProducerRegistry, id: WatcherIdentity, + v2p_notification_tx: tokio::sync::watch::Sender<()>, ) -> Self { let metrics = Arc::new(Mutex::new(metrics::Metrics::default())); producer_registry .register_producer(metrics::Producer(metrics.clone())) .unwrap(); - Self { datastore, resolver, metrics, id } + Self { datastore, resolver, metrics, id, v2p_notification_tx } } fn check_instance( @@ -73,6 +75,7 @@ impl InstanceWatcher { .collect(), ); let client = client.clone(); + let v2p_notification_tx = self.v2p_notification_tx.clone(); async move { slog::trace!(opctx.log, "checking on instance..."); @@ -153,6 +156,7 @@ impl InstanceWatcher { &opctx.log, &target.instance_id, &new_runtime_state, + v2p_notification_tx, ) .await .map_err(|e| { diff --git a/nexus/src/app/background/mod.rs b/nexus/src/app/background/mod.rs index 512c782b2ed..38bde3c0483 100644 --- a/nexus/src/app/background/mod.rs +++ b/nexus/src/app/background/mod.rs @@ -25,5 +25,6 @@ mod service_firewall_rules; mod status; mod sync_service_zone_nat; mod sync_switch_configuration; +mod v2p_mappings; pub use init::BackgroundTasks; diff --git a/nexus/src/app/background/v2p_mappings.rs b/nexus/src/app/background/v2p_mappings.rs new file mode 100644 index 00000000000..a53ac3442f6 --- /dev/null +++ b/nexus/src/app/background/v2p_mappings.rs @@ -0,0 +1,165 @@ +use std::{collections::HashSet, sync::Arc}; + +use futures::future::BoxFuture; +use futures::FutureExt; +use nexus_db_model::{Sled, SledState}; +use nexus_db_queries::{context::OpContext, db::DataStore}; +use nexus_networking::sled_client_from_address; +use nexus_types::{ + deployment::SledFilter, external_api::views::SledPolicy, identity::Asset, +}; +use omicron_common::api::external::Vni; +use serde_json::json; +use sled_agent_client::types::VirtualNetworkInterfaceHost; + +use super::common::BackgroundTask; + +pub struct V2PManager { + datastore: Arc, +} + +impl V2PManager { + pub fn new(datastore: Arc) -> Self { + Self { datastore } + } +} + +impl BackgroundTask for V2PManager { + fn activate<'a>( + &'a mut self, + opctx: &'a OpContext, + ) -> BoxFuture<'a, serde_json::Value> { + let log = opctx.log.clone(); + + async move { + // Get the v2p mappings + let v2p_mappings = match self.datastore.v2p_mappings(opctx).await { + Ok(v) => v, + Err(e) => { + let msg = format!("failed to list v2p mappings: {:#}", e); + error!(&log, "{msg}"); + return json!({"error": msg}); + } + }; + + // Get sleds + // we only care about sleds that are active && inservice + let sleds = match self.datastore.sled_list_all_batched(opctx, SledFilter::InService).await + { + Ok(v) => v, + Err(e) => { + let msg = format!("failed to enumerate sleds: {:#}", e); + error!(&log, "{msg}"); + return json!({"error": msg}); + } + } + .into_iter() + .filter(|sled| { + matches!(sled.state(), SledState::Active) + && matches!(sled.policy(), SledPolicy::InService { .. }) + }); + + // Map sled db records to sled-agent clients + let sled_clients: Vec<(Sled, sled_agent_client::Client)> = sleds + .map(|sled| { + let client = sled_client_from_address( + sled.id(), + sled.address(), + &log, + ); + (sled, client) + }) + .collect(); + + // create a set of updates from the v2p mappings + let desired_v2p: HashSet<_> = v2p_mappings + .into_iter() + .filter_map(|mapping| { + let physical_host_ip = match mapping.sled_ip.ip() { + std::net::IpAddr::V4(v) => { + // sled ip should never be ipv4 + error!( + &log, + "sled ip should be ipv6 but is ipv4: {v}" + ); + return None; + } + std::net::IpAddr::V6(v) => v, + }; + + let vni = mapping.vni.0; + + let mapping = VirtualNetworkInterfaceHost { + virtual_ip: mapping.ip.ip(), + virtual_mac: *mapping.mac, + physical_host_ip, + vni, + }; + Some(mapping) + }) + .collect(); + + for (sled, client) in sled_clients { + // + // Get the current mappings on each sled + // Ignore vopte interfaces that are used for services. Service zones only need + // an opte interface for external communication. For services zones, intra-sled + // communication is facilitated via zone underlay interfaces / addresses, + // not opte interfaces / v2p mappings. + // + let found_v2p: HashSet = match client.list_v2p().await { + Ok(v) => v.into_inner(), + Err(e) => { + error!( + &log, + "unable to list opte v2p mappings for sled"; + "sled" => sled.serial_number(), + "error" => ?e + ); + continue; + } + }.into_iter().filter(|vnic| vnic.vni != Vni::SERVICES_VNI).collect(); + + info!(&log, "found opte v2p mappings"; "sled" => sled.serial_number(), "interfaces" => ?found_v2p); + + let v2p_to_add: Vec<_> = desired_v2p.difference(&found_v2p).collect(); + + let v2p_to_del: Vec<_> = found_v2p.difference(&desired_v2p).collect(); + + // + // Generally, we delete stale entries before adding new entries in RPWs to prevent stale entries + // from causing a conflict with an incoming entry. In the case of opte it doesn't matter which + // order we perform the next two steps in, since conflicting stale entries are overwritten by the + // incoming entries. + // + info!(&log, "v2p mappings to delete"; "sled" => sled.serial_number(), "mappings" => ?v2p_to_del); + for mapping in v2p_to_del { + if let Err(e) = client.del_v2p(&mapping).await { + error!( + &log, + "failed to delete v2p mapping from sled"; + "sled" => sled.serial_number(), + "mapping" => ?mapping, + "error" => ?e, + ); + } + } + + info!(&log, "v2p mappings to add"; "sled" => sled.serial_number(), "mappings" => ?v2p_to_add); + for mapping in v2p_to_add { + if let Err(e) = client.set_v2p(mapping).await { + error!( + &log, + "failed to add v2p mapping to sled"; + "sled" => sled.serial_number(), + "mapping" => ?mapping, + "error" => ?e, + ); + } + } + } + json!({}) + } + .boxed() + } +} diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 50b46c8e8d3..63b080b436e 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -1515,13 +1515,14 @@ impl super::Nexus { new_runtime_state: &nexus::SledInstanceState, ) -> Result<(), Error> { notify_instance_updated( - &self.db_datastore, + &self.datastore(), &self.resolver().await, &self.opctx_alloc, opctx, &self.log, instance_id, new_runtime_state, + self.v2p_notification_tx.clone(), ) .await?; Ok(()) @@ -1965,6 +1966,7 @@ pub(crate) struct InstanceUpdated { /// Invoked by a sled agent to publish an updated runtime state for an /// Instance. +#[allow(clippy::too_many_arguments)] // :( pub(crate) async fn notify_instance_updated( datastore: &DataStore, resolver: &internal_dns::resolver::Resolver, @@ -1973,6 +1975,7 @@ pub(crate) async fn notify_instance_updated( log: &slog::Logger, instance_id: &Uuid, new_runtime_state: &nexus::SledInstanceState, + v2p_notification_tx: tokio::sync::watch::Sender<()>, ) -> Result, Error> { let propolis_id = new_runtime_state.propolis_id; @@ -2011,6 +2014,7 @@ pub(crate) async fn notify_instance_updated( &authz_instance, db_instance.runtime(), &new_runtime_state.instance_state, + v2p_notification_tx.clone(), ) .await?; diff --git a/nexus/src/app/instance_network.rs b/nexus/src/app/instance_network.rs index 30bea98cc62..de4de492e00 100644 --- a/nexus/src/app/instance_network.rs +++ b/nexus/src/app/instance_network.rs @@ -15,24 +15,20 @@ use nexus_db_model::Vni as DbVni; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; -use nexus_db_queries::db::identity::Asset; use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::db::DataStore; -use nexus_types::deployment::SledFilter; -use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; use omicron_common::api::external::Ipv4Net; use omicron_common::api::external::Ipv6Net; use omicron_common::api::internal::nexus; use omicron_common::api::internal::shared::NetworkInterface; use omicron_common::api::internal::shared::SwitchLocation; -use omicron_common::retry_until_known_result; -use sled_agent_client::types::DeleteVirtualNetworkInterfaceHost; -use sled_agent_client::types::SetVirtualNetworkInterfaceHost; use std::collections::HashSet; use std::str::FromStr; use uuid::Uuid; +use super::background::BackgroundTasks; + impl super::Nexus { /// Returns the set of switches with uplinks configured and boundary /// services enabled. @@ -43,41 +39,6 @@ impl super::Nexus { boundary_switches(&self.db_datastore, opctx).await } - /// Ensures that V2P mappings exist that indicate that the instance with ID - /// `instance_id` is resident on the sled with ID `sled_id`. - pub(crate) async fn create_instance_v2p_mappings( - &self, - opctx: &OpContext, - instance_id: Uuid, - sled_id: Uuid, - ) -> Result<(), Error> { - create_instance_v2p_mappings( - &self.db_datastore, - &self.log, - opctx, - &self.opctx_alloc, - instance_id, - sled_id, - ) - .await - } - - /// Ensure that the necessary v2p mappings for an instance are deleted - pub(crate) async fn delete_instance_v2p_mappings( - &self, - opctx: &OpContext, - instance_id: Uuid, - ) -> Result<(), Error> { - delete_instance_v2p_mappings( - &self.db_datastore, - &self.log, - opctx, - &self.opctx_alloc, - instance_id, - ) - .await - } - /// Ensures that the Dendrite configuration for the supplied instance is /// up-to-date. /// @@ -239,6 +200,7 @@ impl super::Nexus { opctx, &self.opctx_alloc, probe_id, + &self.background_tasks, ) .await } @@ -303,6 +265,7 @@ pub(crate) async fn ensure_updated_instance_network_config( authz_instance: &authz::Instance, prev_instance_state: &db::model::InstanceRuntimeState, new_instance_state: &nexus::InstanceRuntimeState, + v2p_notification_tx: tokio::sync::watch::Sender<()>, ) -> Result<(), Error> { let instance_id = authz_instance.id(); @@ -333,6 +296,7 @@ pub(crate) async fn ensure_updated_instance_network_config( opctx, opctx_alloc, authz_instance, + v2p_notification_tx, ) .await?; return Ok(()); @@ -412,15 +376,13 @@ pub(crate) async fn ensure_updated_instance_network_config( Err(e) => return Err(e), }; - create_instance_v2p_mappings( - datastore, - log, - opctx, - opctx_alloc, - instance_id, - new_sled_id, - ) - .await?; + if let Err(e) = v2p_notification_tx.send(()) { + error!( + log, + "error notifying background task of v2p change"; + "error" => ?e + ) + }; let (.., sled) = LookupPath::new(opctx, datastore).sled_id(new_sled_id).fetch().await?; @@ -735,20 +697,19 @@ pub(crate) async fn probe_ensure_dpd_config( async fn clear_instance_networking_state( datastore: &DataStore, log: &slog::Logger, - resolver: &internal_dns::resolver::Resolver, opctx: &OpContext, opctx_alloc: &OpContext, authz_instance: &authz::Instance, + v2p_notification_tx: tokio::sync::watch::Sender<()>, ) -> Result<(), Error> { - delete_instance_v2p_mappings( - datastore, - log, - opctx, - opctx_alloc, - authz_instance.id(), - ) - .await?; + if let Err(e) = v2p_notification_tx.send(()) { + error!( + log, + "error notifying background task of v2p change"; + "error" => ?e + ) + }; instance_delete_dpd_config( datastore, @@ -771,253 +732,6 @@ async fn clear_instance_networking_state( .await } -/// Ensures that V2P mappings exist that indicate that the instance with ID -/// `instance_id` is resident on the sled with ID `sled_id`. -pub(crate) async fn create_instance_v2p_mappings( - datastore: &DataStore, - log: &slog::Logger, - opctx: &OpContext, - opctx_alloc: &OpContext, - instance_id: Uuid, - sled_id: Uuid, -) -> Result<(), Error> { - info!(log, "creating V2P mappings for instance"; - "instance_id" => %instance_id, - "sled_id" => %sled_id); - - // For every sled that isn't the sled this instance was allocated to, create - // a virtual to physical mapping for each of this instance's NICs. - // - // For the mappings to be correct, a few invariants must hold: - // - // - mappings must be set whenever an instance's sled changes (eg. - // during instance creation, migration, stop + start) - // - // - an instances' sled must not change while its corresponding mappings - // are being created - // - // - the same mapping creation must be broadcast to all sleds - // - // A more targeted approach would be to see what other instances share - // the VPC this instance is in (or more generally, what instances should - // have connectivity to this one), see what sleds those are allocated - // to, and only create V2P mappings for those sleds. - // - // There's additional work with this approach: - // - // - it means that delete calls are required as well as set calls, - // meaning that now the ordering of those matters (this may also - // necessitate a generation number for V2P mappings) - // - // - V2P mappings have to be bidirectional in order for both instances's - // packets to make a round trip. This isn't a problem with the - // broadcast approach because one of the sides will exist already, but - // it is something to orchestrate with a more targeted approach. - // - // TODO-correctness Default firewall rules currently will block - // instances in different VPCs from connecting to each other. If it ever - // stops doing this, the broadcast approach will create V2P mappings - // that shouldn't exist. - let (.., authz_instance) = LookupPath::new(&opctx, &datastore) - .instance_id(instance_id) - .lookup_for(authz::Action::Read) - .await?; - - let instance_nics = datastore - .derive_guest_network_interface_info(&opctx, &authz_instance) - .await?; - - // Look up the supplied sled's physical host IP. - let physical_host_ip = - nexus_networking::sled_lookup(&datastore, &opctx_alloc, sled_id)? - .fetch() - .await? - .1 - .ip - .into(); - - let mut last_sled_id: Option = None; - loop { - let pagparams = DataPageParams { - marker: last_sled_id.as_ref(), - direction: dropshot::PaginationOrder::Ascending, - limit: std::num::NonZeroU32::new(10).unwrap(), - }; - - let sleds_page = datastore - // XXX: InService might not be exactly correct - .sled_list(&opctx_alloc, &pagparams, SledFilter::InService) - .await?; - let mut join_handles = - Vec::with_capacity(sleds_page.len() * instance_nics.len()); - - for sled in &sleds_page { - // set_v2p not required for sled instance was allocated to, OPTE - // currently does that automatically - // - // TODO(#3107): Remove this when XDE stops creating mappings - // implicitly. - if sled.id() == sled_id { - continue; - } - - for nic in &instance_nics { - let client = nexus_networking::sled_client( - datastore, - opctx_alloc, - sled.id(), - log, - ) - .await?; - let nic_id = nic.id; - let mapping = SetVirtualNetworkInterfaceHost { - virtual_ip: nic.ip, - virtual_mac: nic.mac, - physical_host_ip, - vni: nic.vni, - }; - - let log = log.clone(); - - // This function is idempotent: calling the set_v2p ioctl with - // the same information is a no-op. - join_handles.push(tokio::spawn(futures::future::lazy( - move |_ctx| async move { - retry_until_known_result(&log, || async { - client.set_v2p(&nic_id, &mapping).await - }) - .await - }, - ))); - } - } - - // Concurrently run each future to completion, but return the last - // error seen. - let mut error = None; - for join_handle in join_handles { - let result = join_handle - .await - .map_err(|e| Error::internal_error(&e.to_string()))? - .await; - - if result.is_err() { - error!(log, "{:?}", result); - error = Some(result); - } - } - if let Some(e) = error { - return e.map(|_| ()).map_err(|e| e.into()); - } - - if sleds_page.len() < 10 { - break; - } - - if let Some(last) = sleds_page.last() { - last_sled_id = Some(last.id()); - } - } - - Ok(()) -} - -/// Ensure that the necessary v2p mappings for an instance are deleted -pub(crate) async fn delete_instance_v2p_mappings( - datastore: &DataStore, - log: &slog::Logger, - opctx: &OpContext, - opctx_alloc: &OpContext, - instance_id: Uuid, -) -> Result<(), Error> { - // For every sled that isn't the sled this instance was allocated to, delete - // the virtual to physical mapping for each of this instance's NICs. If - // there isn't a V2P mapping, del_v2p should be a no-op. - let (.., authz_instance) = LookupPath::new(&opctx, datastore) - .instance_id(instance_id) - .lookup_for(authz::Action::Read) - .await?; - - let instance_nics = datastore - .derive_guest_network_interface_info(&opctx, &authz_instance) - .await?; - - let mut last_sled_id: Option = None; - - loop { - let pagparams = DataPageParams { - marker: last_sled_id.as_ref(), - direction: dropshot::PaginationOrder::Ascending, - limit: std::num::NonZeroU32::new(10).unwrap(), - }; - - let sleds_page = datastore - // XXX: InService might not be exactly correct - .sled_list(&opctx_alloc, &pagparams, SledFilter::InService) - .await?; - let mut join_handles = - Vec::with_capacity(sleds_page.len() * instance_nics.len()); - - for sled in &sleds_page { - for nic in &instance_nics { - let client = nexus_networking::sled_client( - &datastore, - &opctx_alloc, - sled.id(), - &log, - ) - .await?; - let nic_id = nic.id; - let mapping = DeleteVirtualNetworkInterfaceHost { - virtual_ip: nic.ip, - vni: nic.vni, - }; - - let log = log.clone(); - - // This function is idempotent: calling the set_v2p ioctl with - // the same information is a no-op. - join_handles.push(tokio::spawn(futures::future::lazy( - move |_ctx| async move { - retry_until_known_result(&log, || async { - client.del_v2p(&nic_id, &mapping).await - }) - .await - }, - ))); - } - } - - // Concurrently run each future to completion, but return the last - // error seen. - let mut error = None; - for join_handle in join_handles { - let result = join_handle - .await - .map_err(|e| Error::internal_error(&e.to_string()))? - .await; - - if result.is_err() { - error!(log, "{:?}", result); - error = Some(result); - } - } - if let Some(e) = error { - return e.map(|_| ()).map_err(|e| e.into()); - } - - if sleds_page.len() < 10 { - break; - } - - if let Some(last) = sleds_page.last() { - last_sled_id = Some(last.id()); - } - } - - Ok(()) -} - /// Attempts to delete all of the Dendrite NAT configuration for the /// instance identified by `authz_instance`. /// @@ -1083,6 +797,7 @@ pub(crate) async fn probe_delete_dpd_config( opctx: &OpContext, opctx_alloc: &OpContext, probe_id: Uuid, + background_tasks: &BackgroundTasks, ) -> Result<(), Error> { info!(log, "deleting probe dpd configuration"; "probe_id" => %probe_id); @@ -1139,6 +854,7 @@ pub(crate) async fn probe_delete_dpd_config( } }; + background_tasks.activate(&background_tasks.task_v2p_manager); // Notify dendrite that there are changes for it to reconcile. // In the event of a failure to notify dendrite, we'll log an error // and rely on dendrite's RPW timer to catch it up. diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 4b77788c965..3083a8e761a 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -200,6 +200,9 @@ pub struct Nexus { /// Default Crucible region allocation strategy default_region_allocation_strategy: RegionAllocationStrategy, + + /// Channel for notifying background task of change to opte v2p state + v2p_notification_tx: tokio::sync::watch::Sender<()>, } impl Nexus { @@ -390,6 +393,8 @@ impl Nexus { Arc::clone(&db_datastore), ); + let v2p_watcher_channel = tokio::sync::watch::channel(()); + let (saga_request, mut saga_request_recv) = SagaRequest::channel(); let background_tasks = background::BackgroundTasks::start( @@ -400,6 +405,7 @@ impl Nexus { config.deployment.id, resolver.clone(), saga_request, + v2p_watcher_channel.clone(), producer_registry, ); @@ -453,6 +459,7 @@ impl Nexus { .pkg .default_region_allocation_strategy .clone(), + v2p_notification_tx: v2p_watcher_channel.0, }; // TODO-cleanup all the extra Arcs here seems wrong diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index a6df7183d19..a6771f65a0e 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -1287,9 +1287,7 @@ pub mod test { assert!(no_instances_or_disks_on_sled(&sled_agent).await); let v2p_mappings = &*sled_agent.v2p_mappings.lock().await; - for (_nic_id, mappings) in v2p_mappings { - assert!(mappings.is_empty()); - } + assert!(v2p_mappings.is_empty()); } #[nexus_test(server = crate::Server)] diff --git a/nexus/src/app/sagas/instance_delete.rs b/nexus/src/app/sagas/instance_delete.rs index d93c1455adc..b6fedc175d8 100644 --- a/nexus/src/app/sagas/instance_delete.rs +++ b/nexus/src/app/sagas/instance_delete.rs @@ -102,6 +102,7 @@ async fn sid_delete_network_interfaces( sagactx: NexusActionContext, ) -> Result<(), ActionError> { let osagactx = sagactx.user_data(); + let nexus = osagactx.nexus(); let params = sagactx.saga_params::()?; let opctx = crate::context::op_context_for_saga_action( &sagactx, @@ -112,6 +113,7 @@ async fn sid_delete_network_interfaces( .instance_delete_all_network_interfaces(&opctx, ¶ms.authz_instance) .await .map_err(ActionError::action_failed)?; + nexus.background_tasks.activate(&nexus.background_tasks.task_v2p_manager); Ok(()) } diff --git a/nexus/src/app/sagas/instance_start.rs b/nexus/src/app/sagas/instance_start.rs index b76bc2e37d3..e7caedfc9c1 100644 --- a/nexus/src/app/sagas/instance_start.rs +++ b/nexus/src/app/sagas/instance_start.rs @@ -447,50 +447,18 @@ async fn sis_dpd_ensure_undo( async fn sis_v2p_ensure( sagactx: NexusActionContext, ) -> Result<(), ActionError> { - let params = sagactx.saga_params::()?; let osagactx = sagactx.user_data(); - let instance_id = params.db_instance.id(); - - info!(osagactx.log(), "start saga: ensuring v2p mappings are configured"; - "instance_id" => %instance_id); - - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); - - let sled_uuid = sagactx.lookup::("sled_id")?; - osagactx - .nexus() - .create_instance_v2p_mappings(&opctx, instance_id, sled_uuid) - .await - .map_err(ActionError::action_failed)?; - + let nexus = osagactx.nexus(); + nexus.background_tasks.activate(&nexus.background_tasks.task_v2p_manager); Ok(()) } async fn sis_v2p_ensure_undo( sagactx: NexusActionContext, ) -> Result<(), anyhow::Error> { - let params = sagactx.saga_params::()?; let osagactx = sagactx.user_data(); - let instance_id = params.db_instance.id(); - let sled_id = sagactx.lookup::("sled_id")?; - info!(osagactx.log(), "start saga: undoing v2p configuration"; - "instance_id" => %instance_id, - "sled_id" => %sled_id); - - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); - - osagactx - .nexus() - .delete_instance_v2p_mappings(&opctx, instance_id) - .await - .map_err(ActionError::action_failed)?; - + let nexus = osagactx.nexus(); + nexus.background_tasks.activate(&nexus.background_tasks.task_v2p_manager); Ok(()) } diff --git a/nexus/tests/config.test.toml b/nexus/tests/config.test.toml index 25a6d97efcc..49a61cfa362 100644 --- a/nexus/tests/config.test.toml +++ b/nexus/tests/config.test.toml @@ -111,6 +111,7 @@ switch_port_settings_manager.period_secs = 30 region_replacement.period_secs = 30 instance_watcher.period_secs = 30 service_firewall_propagation.period_secs = 300 +v2p_mapping_propagation.period_secs = 30 [default_region_allocation_strategy] # we only have one sled in the test environment, so we need to use the diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 7ad52b99191..51e2552e85b 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -66,6 +66,7 @@ use omicron_nexus::app::MIN_MEMORY_BYTES_PER_INSTANCE; use omicron_nexus::Nexus; use omicron_nexus::TestInterfaces as _; use omicron_sled_agent::sim::SledAgent; +use omicron_test_utils::dev::poll::wait_for_condition; use sled_agent_client::TestInterfaces as _; use std::convert::TryFrom; use std::net::Ipv4Addr; @@ -660,14 +661,6 @@ async fn test_instance_start_creates_networking_state( .await .unwrap(); - let instance_state = datastore - .instance_fetch_with_vmm(&opctx, &authz_instance) - .await - .unwrap(); - - let sled_id = - instance_state.sled_id().expect("running instance should have a sled"); - let guest_nics = datastore .derive_guest_network_interface_info(&opctx, &authz_instance) .await @@ -675,13 +668,7 @@ async fn test_instance_start_creates_networking_state( assert_eq!(guest_nics.len(), 1); for agent in &sled_agents { - // TODO(#3107) Remove this bifurcation when Nexus programs all mappings - // itself. - if agent.id != sled_id { - assert_sled_v2p_mappings(agent, &nics[0], guest_nics[0].vni).await; - } else { - assert!(agent.v2p_mappings.lock().await.is_empty()); - } + assert_sled_v2p_mappings(agent, &nics[0], guest_nics[0].vni).await; } } @@ -861,24 +848,7 @@ async fn test_instance_migrate_v2p(cptestctx: &ControlPlaneTestContext) { let mut sled_agents = vec![cptestctx.sled_agent.sled_agent.clone()]; sled_agents.extend(other_sleds.iter().map(|tup| tup.1.sled_agent.clone())); for sled_agent in &sled_agents { - // Starting the instance should have programmed V2P mappings to all the - // sleds except the one where the instance is running. - // - // TODO(#3107): In practice, the instance's sled also has V2P mappings, but - // these are established during VMM setup (i.e. as part of creating the - // instance's OPTE ports) instead of being established by explicit calls - // from Nexus. Simulated sled agent handles the latter calls but does - // not currently update any mappings during simulated instance creation, - // so the check below verifies that no mappings exist on the instance's - // own sled instead of checking for a real mapping. Once Nexus programs - // all mappings explicitly (without skipping the instance's current - // sled) this bifurcation should be removed. - if sled_agent.id != original_sled_id { - assert_sled_v2p_mappings(sled_agent, &nics[0], guest_nics[0].vni) - .await; - } else { - assert!(sled_agent.v2p_mappings.lock().await.is_empty()); - } + assert_sled_v2p_mappings(sled_agent, &nics[0], guest_nics[0].vni).await; } let dst_sled_id = if original_sled_id == cptestctx.sled_agent.sled_agent.id @@ -4545,14 +4515,6 @@ async fn test_instance_v2p_mappings(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); - let instance_state = datastore - .instance_fetch_with_vmm(&opctx, &authz_instance) - .await - .unwrap(); - - let sled_id = - instance_state.sled_id().expect("running instance should have a sled"); - let guest_nics = datastore .derive_guest_network_interface_info(&opctx, &authz_instance) .await @@ -4565,14 +4527,7 @@ async fn test_instance_v2p_mappings(cptestctx: &ControlPlaneTestContext) { sled_agents.push(&cptestctx.sled_agent.sled_agent); for sled_agent in &sled_agents { - // TODO(#3107) Remove this bifurcation when Nexus programs all mappings - // itself. - if sled_agent.id != sled_id { - assert_sled_v2p_mappings(sled_agent, &nics[0], guest_nics[0].vni) - .await; - } else { - assert!(sled_agent.v2p_mappings.lock().await.is_empty()); - } + assert_sled_v2p_mappings(sled_agent, &nics[0], guest_nics[0].vni).await; } // Delete the instance @@ -4589,8 +4544,21 @@ async fn test_instance_v2p_mappings(cptestctx: &ControlPlaneTestContext) { // Validate that every sled no longer has the V2P mapping for this instance for sled_agent in &sled_agents { - let v2p_mappings = sled_agent.v2p_mappings.lock().await; - assert!(v2p_mappings.is_empty()); + let condition = || async { + let v2p_mappings = sled_agent.v2p_mappings.lock().await; + if v2p_mappings.is_empty() { + Ok(()) + } else { + Err(CondCheckError::NotYet::<()>) + } + }; + wait_for_condition( + condition, + &Duration::from_secs(1), + &Duration::from_secs(30), + ) + .await + .expect("v2p mappings should be empty"); } } @@ -4687,14 +4655,28 @@ async fn assert_sled_v2p_mappings( nic: &InstanceNetworkInterface, vni: Vni, ) { - let v2p_mappings = sled_agent.v2p_mappings.lock().await; - assert!(!v2p_mappings.is_empty()); - - let mapping = v2p_mappings.get(&nic.identity.id).unwrap().last().unwrap(); - assert_eq!(mapping.virtual_ip, nic.ip); - assert_eq!(mapping.virtual_mac, nic.mac); - assert_eq!(mapping.physical_host_ip, sled_agent.ip); - assert_eq!(mapping.vni, vni); + let condition = || async { + let v2p_mappings = sled_agent.v2p_mappings.lock().await; + let mapping = v2p_mappings.iter().find(|mapping| { + mapping.virtual_ip == nic.ip + && mapping.virtual_mac == nic.mac + && mapping.physical_host_ip == sled_agent.ip + && mapping.vni == vni + }); + + if mapping.is_some() { + Ok(()) + } else { + Err(CondCheckError::NotYet::<()>) + } + }; + wait_for_condition( + condition, + &Duration::from_secs(1), + &Duration::from_secs(30), + ) + .await + .expect("matching v2p mapping should be present"); } /// Simulate completion of an ongoing instance state transition. To do this, we diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index 5da2b5c7976..7a951a6d159 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -842,26 +842,41 @@ } } }, - "/v2p/{interface_id}": { + "/v2p": { + "get": { + "summary": "List v2p mappings present on sled", + "operationId": "list_v2p", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_VirtualNetworkInterfaceHost", + "type": "array", + "items": { + "$ref": "#/components/schemas/VirtualNetworkInterfaceHost" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, "put": { "summary": "Create a mapping from a virtual NIC to a physical host", "operationId": "set_v2p", - "parameters": [ - { - "in": "path", - "name": "interface_id", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SetVirtualNetworkInterfaceHost" + "$ref": "#/components/schemas/VirtualNetworkInterfaceHost" } } }, @@ -882,22 +897,11 @@ "delete": { "summary": "Delete a mapping from a virtual NIC to a physical host", "operationId": "del_v2p", - "parameters": [ - { - "in": "path", - "name": "interface_id", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/DeleteVirtualNetworkInterfaceHost" + "$ref": "#/components/schemas/VirtualNetworkInterfaceHost" } } }, @@ -2016,29 +2020,6 @@ "target" ] }, - "DeleteVirtualNetworkInterfaceHost": { - "description": "The data needed to identify a virtual IP for which a sled maintains an OPTE virtual-to-physical mapping such that that mapping can be deleted.", - "type": "object", - "properties": { - "virtual_ip": { - "description": "The virtual IP whose mapping should be deleted.", - "type": "string", - "format": "ip" - }, - "vni": { - "description": "The VNI for the network containing the virtual IP whose mapping should be deleted.", - "allOf": [ - { - "$ref": "#/components/schemas/Vni" - } - ] - } - }, - "required": [ - "virtual_ip", - "vni" - ] - }, "DhcpConfig": { "description": "DHCP configuration for a port\n\nNot present here: Hostname (DHCPv4 option 12; used in DHCPv6 option 39); we use `InstanceRuntimeState::hostname` for this value.", "type": "object", @@ -4285,32 +4266,6 @@ "type": "string", "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" }, - "SetVirtualNetworkInterfaceHost": { - "description": "A mapping from a virtual NIC to a physical host", - "type": "object", - "properties": { - "physical_host_ip": { - "type": "string", - "format": "ipv6" - }, - "virtual_ip": { - "type": "string", - "format": "ip" - }, - "virtual_mac": { - "$ref": "#/components/schemas/MacAddr" - }, - "vni": { - "$ref": "#/components/schemas/Vni" - } - }, - "required": [ - "physical_host_ip", - "virtual_ip", - "virtual_mac", - "vni" - ] - }, "SledInstanceState": { "description": "A wrapper type containing a sled's total knowledge of the state of a specific VMM and the instance it incarnates.", "type": "object", @@ -4579,6 +4534,32 @@ "version" ] }, + "VirtualNetworkInterfaceHost": { + "description": "A mapping from a virtual NIC to a physical host", + "type": "object", + "properties": { + "physical_host_ip": { + "type": "string", + "format": "ipv6" + }, + "virtual_ip": { + "type": "string", + "format": "ip" + }, + "virtual_mac": { + "$ref": "#/components/schemas/MacAddr" + }, + "vni": { + "$ref": "#/components/schemas/Vni" + } + }, + "required": [ + "physical_host_ip", + "virtual_ip", + "virtual_mac", + "vni" + ] + }, "VmmRuntimeState": { "description": "The dynamic runtime properties of an individual VMM process.", "type": "object", diff --git a/oximeter/collector/tests/output/self-stat-schema.json b/oximeter/collector/tests/output/self-stat-schema.json index 111d7c0ed2c..286ac63405f 100644 --- a/oximeter/collector/tests/output/self-stat-schema.json +++ b/oximeter/collector/tests/output/self-stat-schema.json @@ -39,7 +39,7 @@ } ], "datum_type": "cumulative_u64", - "created": "2024-05-17T01:26:16.797600385Z" + "created": "2024-05-21T18:32:24.199619581Z" }, "oximeter_collector:failed_collections": { "timeseries_name": "oximeter_collector:failed_collections", @@ -86,6 +86,6 @@ } ], "datum_type": "cumulative_u64", - "created": "2024-05-17T01:26:16.798713487Z" + "created": "2024-05-21T18:32:24.200514936Z" } } \ No newline at end of file diff --git a/package-manifest.toml b/package-manifest.toml index 2bfc51d533b..7f80dacf7c7 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -533,10 +533,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "025389ff39d594bf2b815377e2c1dc4dd23b1f96" +source.commit = "23b0cf439f9f62b9a4933e55cc72bcaddc9596cd" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//maghemite.sha256.txt -source.sha256 = "f2ee54b6a654daa1c1f817440317e9b11c5ddc71249df261bb5cfa0e6057dc24" +source.sha256 = "1ea0e73e149a68bf91b5ce2e0db2a8a1af50dcdbbf381b672aa9ac7e36a3a181" output.type = "tarball" [package.mg-ddm] @@ -549,10 +549,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "025389ff39d594bf2b815377e2c1dc4dd23b1f96" +source.commit = "23b0cf439f9f62b9a4933e55cc72bcaddc9596cd" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt -source.sha256 = "bb98815f759f38abee9f5aea0978cd33e66e75079cc8c171036be21bf9049c96" +source.sha256 = "3334b0a9d5956e3117a6b493b9a5a31220391fab1ecbfb3a4bd8e94d7030771a" output.type = "zone" output.intermediate_only = true @@ -564,7 +564,7 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "025389ff39d594bf2b815377e2c1dc4dd23b1f96" +source.commit = "23b0cf439f9f62b9a4933e55cc72bcaddc9596cd" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt source.sha256 = "e0907de39ca9f8ab45d40d361a1dbeed4bd8e9b157f8d3d8fe0a4bc259d933bd" diff --git a/schema/crdb/add-view-for-v2p-mappings/up01.sql b/schema/crdb/add-view-for-v2p-mappings/up01.sql new file mode 100644 index 00000000000..96d5723c003 --- /dev/null +++ b/schema/crdb/add-view-for-v2p-mappings/up01.sql @@ -0,0 +1,41 @@ +CREATE VIEW IF NOT EXISTS omicron.public.v2p_mapping_view +AS +WITH VmV2pMappings AS ( + SELECT + n.id as nic_id, + s.id as sled_id, + s.ip as sled_ip, + v.vni, + n.mac, + n.ip + FROM omicron.public.network_interface n + JOIN omicron.public.vpc_subnet vs ON vs.id = n.subnet_id + JOIN omicron.public.vpc v ON v.id = n.vpc_id + JOIN omicron.public.vmm vmm ON n.parent_id = vmm.instance_id + JOIN omicron.public.sled s ON vmm.sled_id = s.id + WHERE n.time_deleted IS NULL + AND n.kind = 'instance' + AND s.sled_policy = 'in_service' + AND s.sled_state = 'active' +), +ProbeV2pMapping AS ( + SELECT + n.id as nic_id, + s.id as sled_id, + s.ip as sled_ip, + v.vni, + n.mac, + n.ip + FROM omicron.public.network_interface n + JOIN omicron.public.vpc_subnet vs ON vs.id = n.subnet_id + JOIN omicron.public.vpc v ON v.id = n.vpc_id + JOIN omicron.public.probe p ON n.parent_id = p.id + JOIN omicron.public.sled s ON p.sled = s.id + WHERE n.time_deleted IS NULL + AND n.kind = 'probe' + AND s.sled_policy = 'in_service' + AND s.sled_state = 'active' +) +SELECT nic_id, sled_id, sled_ip, vni, mac, ip FROM VmV2pMappings +UNION +SELECT nic_id, sled_id, sled_ip, vni, mac, ip FROM ProbeV2pMapping; diff --git a/schema/crdb/add-view-for-v2p-mappings/up02.sql b/schema/crdb/add-view-for-v2p-mappings/up02.sql new file mode 100644 index 00000000000..5ab1075fbe2 --- /dev/null +++ b/schema/crdb/add-view-for-v2p-mappings/up02.sql @@ -0,0 +1,3 @@ +CREATE INDEX IF NOT EXISTS network_interface_by_parent +ON omicron.public.network_interface (parent_id) +STORING (name, kind, vpc_id, subnet_id, mac, ip, slot); diff --git a/schema/crdb/add-view-for-v2p-mappings/up03.sql b/schema/crdb/add-view-for-v2p-mappings/up03.sql new file mode 100644 index 00000000000..86cef026a14 --- /dev/null +++ b/schema/crdb/add-view-for-v2p-mappings/up03.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS sled_by_policy_and_state +ON omicron.public.sled (sled_policy, sled_state, id) STORING (ip); diff --git a/schema/crdb/add-view-for-v2p-mappings/up04.sql b/schema/crdb/add-view-for-v2p-mappings/up04.sql new file mode 100644 index 00000000000..809146b809c --- /dev/null +++ b/schema/crdb/add-view-for-v2p-mappings/up04.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS active_vmm +on omicron.public.vmm (time_deleted, sled_id, instance_id); diff --git a/schema/crdb/add-view-for-v2p-mappings/up05.sql b/schema/crdb/add-view-for-v2p-mappings/up05.sql new file mode 100644 index 00000000000..cdabdc6a96b --- /dev/null +++ b/schema/crdb/add-view-for-v2p-mappings/up05.sql @@ -0,0 +1,4 @@ +CREATE INDEX IF NOT EXISTS v2p_mapping_details +ON omicron.public.network_interface ( + time_deleted, kind, subnet_id, vpc_id, parent_id +) STORING (mac, ip); diff --git a/schema/crdb/add-view-for-v2p-mappings/up06.sql b/schema/crdb/add-view-for-v2p-mappings/up06.sql new file mode 100644 index 00000000000..afd10ed13f8 --- /dev/null +++ b/schema/crdb/add-view-for-v2p-mappings/up06.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS sled_by_policy +ON omicron.public.sled (sled_policy) STORING (ip, sled_state); diff --git a/schema/crdb/add-view-for-v2p-mappings/up07.sql b/schema/crdb/add-view-for-v2p-mappings/up07.sql new file mode 100644 index 00000000000..defe411f966 --- /dev/null +++ b/schema/crdb/add-view-for-v2p-mappings/up07.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS vmm_by_instance_id +ON omicron.public.vmm (instance_id) STORING (sled_id); diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index cc298e4565a..2cf9e1100fd 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -3799,6 +3799,73 @@ ON omicron.public.switch_port (port_settings_id, port_name) STORING (switch_loca CREATE INDEX IF NOT EXISTS switch_port_name ON omicron.public.switch_port (port_name); +COMMIT; +BEGIN; + +-- view for v2p mapping rpw +CREATE VIEW IF NOT EXISTS omicron.public.v2p_mapping_view +AS +WITH VmV2pMappings AS ( + SELECT + n.id as nic_id, + s.id as sled_id, + s.ip as sled_ip, + v.vni, + n.mac, + n.ip + FROM omicron.public.network_interface n + JOIN omicron.public.vpc_subnet vs ON vs.id = n.subnet_id + JOIN omicron.public.vpc v ON v.id = n.vpc_id + JOIN omicron.public.vmm vmm ON n.parent_id = vmm.instance_id + JOIN omicron.public.sled s ON vmm.sled_id = s.id + WHERE n.time_deleted IS NULL + AND n.kind = 'instance' + AND s.sled_policy = 'in_service' + AND s.sled_state = 'active' +), +ProbeV2pMapping AS ( + SELECT + n.id as nic_id, + s.id as sled_id, + s.ip as sled_ip, + v.vni, + n.mac, + n.ip + FROM omicron.public.network_interface n + JOIN omicron.public.vpc_subnet vs ON vs.id = n.subnet_id + JOIN omicron.public.vpc v ON v.id = n.vpc_id + JOIN omicron.public.probe p ON n.parent_id = p.id + JOIN omicron.public.sled s ON p.sled = s.id + WHERE n.time_deleted IS NULL + AND n.kind = 'probe' + AND s.sled_policy = 'in_service' + AND s.sled_state = 'active' +) +SELECT nic_id, sled_id, sled_ip, vni, mac, ip FROM VmV2pMappings +UNION +SELECT nic_id, sled_id, sled_ip, vni, mac, ip FROM ProbeV2pMapping; + +CREATE INDEX IF NOT EXISTS network_interface_by_parent +ON omicron.public.network_interface (parent_id) +STORING (name, kind, vpc_id, subnet_id, mac, ip, slot); + +CREATE INDEX IF NOT EXISTS sled_by_policy_and_state +ON omicron.public.sled (sled_policy, sled_state, id) STORING (ip); + +CREATE INDEX IF NOT EXISTS active_vmm +ON omicron.public.vmm (time_deleted, sled_id, instance_id); + +CREATE INDEX IF NOT EXISTS v2p_mapping_details +ON omicron.public.network_interface ( + time_deleted, kind, subnet_id, vpc_id, parent_id +) STORING (mac, ip); + +CREATE INDEX IF NOT EXISTS sled_by_policy +ON omicron.public.sled (sled_policy) STORING (ip, sled_state); + +CREATE INDEX IF NOT EXISTS vmm_by_instance_id +ON omicron.public.vmm (instance_id) STORING (sled_id); + /* * Metadata for the schema itself. This version number isn't great, as there's * nothing to ensure it gets bumped when it should be, but it's a start. @@ -3859,7 +3926,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '63.0.0', NULL) + (TRUE, NOW(), NOW(), '64.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/sled-agent/src/http_entrypoints.rs b/sled-agent/src/http_entrypoints.rs index 99c7725fe3f..c5cd88619f7 100644 --- a/sled-agent/src/http_entrypoints.rs +++ b/sled-agent/src/http_entrypoints.rs @@ -25,9 +25,7 @@ use dropshot::{ HttpResponseUpdatedNoContent, Path, Query, RequestContext, StreamingBody, TypedBody, }; -use illumos_utils::opte::params::{ - DeleteVirtualNetworkInterfaceHost, SetVirtualNetworkInterfaceHost, -}; +use illumos_utils::opte::params::VirtualNetworkInterfaceHost; use installinator_common::M2Slot; use omicron_common::api::external::Error; use omicron_common::api::internal::nexus::{ @@ -71,6 +69,7 @@ pub fn api() -> SledApiDescription { api.register(zone_bundle_cleanup_context_update)?; api.register(zone_bundle_cleanup)?; api.register(sled_role_get)?; + api.register(list_v2p)?; api.register(set_v2p)?; api.register(del_v2p)?; api.register(timesync_get)?; @@ -652,24 +651,16 @@ async fn vpc_firewall_rules_put( Ok(HttpResponseUpdatedNoContent()) } -/// Path parameters for V2P mapping related requests (sled agent API) -#[allow(dead_code)] -#[derive(Deserialize, JsonSchema)] -struct V2pPathParam { - interface_id: Uuid, -} - /// Create a mapping from a virtual NIC to a physical host // Keep interface_id to maintain parity with the simulated sled agent, which // requires interface_id on the path. #[endpoint { method = PUT, - path = "/v2p/{interface_id}", + path = "/v2p/", }] async fn set_v2p( rqctx: RequestContext, - _path_params: Path, - body: TypedBody, + body: TypedBody, ) -> Result { let sa = rqctx.context(); let body_args = body.into_inner(); @@ -684,12 +675,11 @@ async fn set_v2p( // requires interface_id on the path. #[endpoint { method = DELETE, - path = "/v2p/{interface_id}", + path = "/v2p/", }] async fn del_v2p( rqctx: RequestContext, - _path_params: Path, - body: TypedBody, + body: TypedBody, ) -> Result { let sa = rqctx.context(); let body_args = body.into_inner(); @@ -699,6 +689,22 @@ async fn del_v2p( Ok(HttpResponseUpdatedNoContent()) } +/// List v2p mappings present on sled +// Used by nexus background task +#[endpoint { + method = GET, + path = "/v2p/", +}] +async fn list_v2p( + rqctx: RequestContext, +) -> Result>, HttpError> { + let sa = rqctx.context(); + + let vnics = sa.list_virtual_nics().await.map_err(Error::from)?; + + Ok(HttpResponseOk(vnics)) +} + #[endpoint { method = GET, path = "/timesync", diff --git a/sled-agent/src/sim/http_entrypoints.rs b/sled-agent/src/sim/http_entrypoints.rs index 6cddac6fb85..ae1318a8b17 100644 --- a/sled-agent/src/sim/http_entrypoints.rs +++ b/sled-agent/src/sim/http_entrypoints.rs @@ -20,8 +20,7 @@ use dropshot::HttpResponseUpdatedNoContent; use dropshot::Path; use dropshot::RequestContext; use dropshot::TypedBody; -use illumos_utils::opte::params::DeleteVirtualNetworkInterfaceHost; -use illumos_utils::opte::params::SetVirtualNetworkInterfaceHost; +use illumos_utils::opte::params::VirtualNetworkInterfaceHost; use omicron_common::api::internal::nexus::DiskRuntimeState; use omicron_common::api::internal::nexus::SledInstanceState; use omicron_common::api::internal::nexus::UpdateArtifactId; @@ -54,6 +53,7 @@ pub fn api() -> SledApiDescription { api.register(vpc_firewall_rules_put)?; api.register(set_v2p)?; api.register(del_v2p)?; + api.register(list_v2p)?; api.register(uplink_ensure)?; api.register(read_network_bootstore_config)?; api.register(write_network_bootstore_config)?; @@ -343,27 +343,19 @@ async fn vpc_firewall_rules_put( Ok(HttpResponseUpdatedNoContent()) } -/// Path parameters for V2P mapping related requests (sled agent API) -#[derive(Deserialize, JsonSchema)] -struct V2pPathParam { - interface_id: Uuid, -} - /// Create a mapping from a virtual NIC to a physical host #[endpoint { method = PUT, - path = "/v2p/{interface_id}", + path = "/v2p/", }] async fn set_v2p( rqctx: RequestContext>, - path_params: Path, - body: TypedBody, + body: TypedBody, ) -> Result { let sa = rqctx.context(); - let interface_id = path_params.into_inner().interface_id; let body_args = body.into_inner(); - sa.set_virtual_nic_host(interface_id, &body_args) + sa.set_virtual_nic_host(&body_args) .await .map_err(|e| HttpError::for_internal_error(e.to_string()))?; @@ -373,24 +365,37 @@ async fn set_v2p( /// Delete a mapping from a virtual NIC to a physical host #[endpoint { method = DELETE, - path = "/v2p/{interface_id}", + path = "/v2p/", }] async fn del_v2p( rqctx: RequestContext>, - path_params: Path, - body: TypedBody, + body: TypedBody, ) -> Result { let sa = rqctx.context(); - let interface_id = path_params.into_inner().interface_id; let body_args = body.into_inner(); - sa.unset_virtual_nic_host(interface_id, &body_args) + sa.unset_virtual_nic_host(&body_args) .await .map_err(|e| HttpError::for_internal_error(e.to_string()))?; Ok(HttpResponseUpdatedNoContent()) } +/// List v2p mappings present on sled +#[endpoint { + method = GET, + path = "/v2p/", +}] +async fn list_v2p( + rqctx: RequestContext>, +) -> Result>, HttpError> { + let sa = rqctx.context(); + + let vnics = sa.list_virtual_nics().await.map_err(HttpError::from)?; + + Ok(HttpResponseOk(vnics)) +} + #[endpoint { method = POST, path = "/switch-ports", diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index 298a8adc34b..d9308bf769c 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -26,9 +26,7 @@ use anyhow::bail; use anyhow::Context; use dropshot::{HttpError, HttpServer}; use futures::lock::Mutex; -use illumos_utils::opte::params::{ - DeleteVirtualNetworkInterfaceHost, SetVirtualNetworkInterfaceHost, -}; +use illumos_utils::opte::params::VirtualNetworkInterfaceHost; use ipnetwork::Ipv6Network; use omicron_common::api::external::{ ByteCount, DiskState, Error, Generation, ResourceType, @@ -74,7 +72,7 @@ pub struct SledAgent { nexus_address: SocketAddr, pub nexus_client: Arc, disk_id_to_region_ids: Mutex>>, - pub v2p_mappings: Mutex>>, + pub v2p_mappings: Mutex>, mock_propolis: Mutex>, PropolisClient)>>, /// lists of external IPs assigned to instances @@ -189,7 +187,7 @@ impl SledAgent { nexus_address, nexus_client, disk_id_to_region_ids: Mutex::new(HashMap::new()), - v2p_mappings: Mutex::new(HashMap::new()), + v2p_mappings: Mutex::new(HashSet::new()), external_ips: Mutex::new(HashMap::new()), mock_propolis: Mutex::new(None), config: config.clone(), @@ -672,36 +670,29 @@ impl SledAgent { pub async fn set_virtual_nic_host( &self, - interface_id: Uuid, - mapping: &SetVirtualNetworkInterfaceHost, + mapping: &VirtualNetworkInterfaceHost, ) -> Result<(), Error> { let mut v2p_mappings = self.v2p_mappings.lock().await; - let vec = v2p_mappings.entry(interface_id).or_default(); - vec.push(mapping.clone()); + v2p_mappings.insert(mapping.clone()); Ok(()) } pub async fn unset_virtual_nic_host( &self, - interface_id: Uuid, - mapping: &DeleteVirtualNetworkInterfaceHost, + mapping: &VirtualNetworkInterfaceHost, ) -> Result<(), Error> { let mut v2p_mappings = self.v2p_mappings.lock().await; - let vec = v2p_mappings.entry(interface_id).or_default(); - vec.retain(|x| { - x.virtual_ip != mapping.virtual_ip || x.vni != mapping.vni - }); - - // If the last entry was removed, remove the entire interface ID so that - // tests don't have to distinguish never-created entries from - // previously-extant-but-now-empty entries. - if vec.is_empty() { - v2p_mappings.remove(&interface_id); - } - + v2p_mappings.remove(mapping); Ok(()) } + pub async fn list_virtual_nics( + &self, + ) -> Result, Error> { + let v2p_mappings = self.v2p_mappings.lock().await; + Ok(Vec::from_iter(v2p_mappings.clone())) + } + pub async fn instance_put_external_ip( &self, instance_id: Uuid, diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index 39a56474204..670d486686c 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -37,9 +37,7 @@ use derive_more::From; use dropshot::HttpError; use futures::stream::FuturesUnordered; use futures::StreamExt; -use illumos_utils::opte::params::{ - DeleteVirtualNetworkInterfaceHost, SetVirtualNetworkInterfaceHost, -}; +use illumos_utils::opte::params::VirtualNetworkInterfaceHost; use illumos_utils::opte::PortManager; use illumos_utils::zone::PROPOLIS_ZONE_PREFIX; use illumos_utils::zone::ZONE_PREFIX; @@ -1051,9 +1049,15 @@ impl SledAgent { .map_err(Error::from) } + pub async fn list_virtual_nics( + &self, + ) -> Result, Error> { + self.inner.port_manager.list_virtual_nics().map_err(Error::from) + } + pub async fn set_virtual_nic_host( &self, - mapping: &SetVirtualNetworkInterfaceHost, + mapping: &VirtualNetworkInterfaceHost, ) -> Result<(), Error> { self.inner .port_manager @@ -1063,7 +1067,7 @@ impl SledAgent { pub async fn unset_virtual_nic_host( &self, - mapping: &DeleteVirtualNetworkInterfaceHost, + mapping: &VirtualNetworkInterfaceHost, ) -> Result<(), Error> { self.inner .port_manager diff --git a/smf/nexus/multi-sled/config-partial.toml b/smf/nexus/multi-sled/config-partial.toml index 696411966b4..0ed7a0562b9 100644 --- a/smf/nexus/multi-sled/config-partial.toml +++ b/smf/nexus/multi-sled/config-partial.toml @@ -56,6 +56,7 @@ sync_service_zone_nat.period_secs = 30 switch_port_settings_manager.period_secs = 30 region_replacement.period_secs = 30 service_firewall_propagation.period_secs = 300 +v2p_mapping_propagation.period_secs = 30 instance_watcher.period_secs = 30 [default_region_allocation_strategy] diff --git a/smf/nexus/single-sled/config-partial.toml b/smf/nexus/single-sled/config-partial.toml index 206f716fa71..c57d2d3ba20 100644 --- a/smf/nexus/single-sled/config-partial.toml +++ b/smf/nexus/single-sled/config-partial.toml @@ -56,6 +56,7 @@ sync_service_zone_nat.period_secs = 30 switch_port_settings_manager.period_secs = 30 region_replacement.period_secs = 30 service_firewall_propagation.period_secs = 300 +v2p_mapping_propagation.period_secs = 30 instance_watcher.period_secs = 30 [default_region_allocation_strategy] diff --git a/tools/maghemite_mg_openapi_version b/tools/maghemite_mg_openapi_version index 966e4de7fe0..73095bd42d4 100644 --- a/tools/maghemite_mg_openapi_version +++ b/tools/maghemite_mg_openapi_version @@ -1,2 +1,2 @@ -COMMIT="025389ff39d594bf2b815377e2c1dc4dd23b1f96" -SHA2="a5d2f275c99152711dec1df58fd49d459d3fcb8fbfc7a7f48f432be248d74639" +COMMIT="23b0cf439f9f62b9a4933e55cc72bcaddc9596cd" +SHA2="fdb33ee7425923560534672264008ef8948d227afce948ab704de092ad72157c" diff --git a/tools/opte_version b/tools/opte_version index e1b3e114995..41d9666b049 100644 --- a/tools/opte_version +++ b/tools/opte_version @@ -1 +1 @@ -0.28.233 +0.29.248 From e2d9575cf76cf0554bac2ad48391da023e0ee877 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 22 May 2024 14:16:51 -0500 Subject: [PATCH 36/37] Bump web console (#5807) Highlights: soft image validation, logout button on error pages to help deal with auth-related errors. https://github.com/oxidecomputer/console/compare/078d1711...a228b75b * [a228b75b](https://github.com/oxidecomputer/console/commit/a228b75b) bump omicron (only one tiny diff in validators) * [fc91ec1e](https://github.com/oxidecomputer/console/commit/fc91ec1e) oxidecomputer/console#2256 * [39b4491e](https://github.com/oxidecomputer/console/commit/39b4491e) oxidecomputer/console#2230 * [e4e912ca](https://github.com/oxidecomputer/console/commit/e4e912ca) oxidecomputer/console#2247 * [dcf09ec9](https://github.com/oxidecomputer/console/commit/dcf09ec9) oxidecomputer/console#2217 * [c36b3d63](https://github.com/oxidecomputer/console/commit/c36b3d63) oxidecomputer/console#2238 * [a8eb7745](https://github.com/oxidecomputer/console/commit/a8eb7745) oxidecomputer/console#2251 * [9b20b7c9](https://github.com/oxidecomputer/console/commit/9b20b7c9) oxidecomputer/console#2248 * [f20a5bcb](https://github.com/oxidecomputer/console/commit/f20a5bcb) oxidecomputer/console#2245 * [b815dd8f](https://github.com/oxidecomputer/console/commit/b815dd8f) oxidecomputer/console#2244 * [8c7b2946](https://github.com/oxidecomputer/console/commit/8c7b2946) add node_modules to eslint ignore patterns * [90e78dbb](https://github.com/oxidecomputer/console/commit/90e78dbb) oxidecomputer/console#2237 * [b603d2dd](https://github.com/oxidecomputer/console/commit/b603d2dd) oxidecomputer/console#2242 * [bfce37c7](https://github.com/oxidecomputer/console/commit/bfce37c7) upgrade @oxide/openapi-gen-ts to 0.2.2 * [efceb17d](https://github.com/oxidecomputer/console/commit/efceb17d) oxidecomputer/console#2236 * [1aa46459](https://github.com/oxidecomputer/console/commit/1aa46459) oxidecomputer/console#2235 * [b400ae78](https://github.com/oxidecomputer/console/commit/b400ae78) oxidecomputer/console#2225 * [7bb3bbf7](https://github.com/oxidecomputer/console/commit/7bb3bbf7) oxidecomputer/console#2229 * [c56a9ec5](https://github.com/oxidecomputer/console/commit/c56a9ec5) oxidecomputer/console#2228 * [cd9d1f99](https://github.com/oxidecomputer/console/commit/cd9d1f99) oxidecomputer/console#2227 * [ee269bd9](https://github.com/oxidecomputer/console/commit/ee269bd9) oxidecomputer/console#2223 --- tools/console_version | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/console_version b/tools/console_version index 182ef5e8186..07cf4cc9125 100644 --- a/tools/console_version +++ b/tools/console_version @@ -1,2 +1,2 @@ -COMMIT="078d17117a3937d571bb5535f9791db65be7afc5" -SHA2="f3bc51a9ddf5356ecda85ff11eec032da880be162358bb7ee676ab823e59476c" +COMMIT="a228b75ba35952b68c0b8b0892c452d4fc29467a" +SHA2="8d5b06680e5986b633b3f97e46d7823ea2dddf2b98930d8c6a4f7dc1eb382048" From c2f35151c7ac345305f396bc01f088083ccae7e0 Mon Sep 17 00:00:00 2001 From: iliana etaoin Date: Wed, 22 May 2024 21:22:46 -0700 Subject: [PATCH 37/37] update to Rust 1.78.0 / bump OPTE to 0.29.250 (#5722) --- .cargo/{config => config.toml} | 0 .github/buildomat/jobs/a4x2-deploy.sh | 2 +- .github/buildomat/jobs/a4x2-prepare.sh | 2 +- .../buildomat/jobs/build-and-test-helios.sh | 2 +- .../buildomat/jobs/build-and-test-linux.sh | 2 +- .github/buildomat/jobs/clippy.sh | 2 +- .github/buildomat/jobs/omicron-common.sh | 2 +- .github/buildomat/jobs/package.sh | 2 +- .github/buildomat/jobs/tuf-repo.sh | 2 +- Cargo.lock | 14 ++++---- Cargo.toml | 4 +-- certificates/src/lib.rs | 4 +-- dev-tools/omdb/src/bin/omdb/db.rs | 19 ---------- dev-tools/omdb/src/bin/omdb/mgs/dashboard.rs | 10 ------ dev-tools/omdb/src/bin/omdb/mgs/sensors.rs | 4 +-- nexus/db-model/src/omicron_zone_config.rs | 4 +-- nexus/db-queries/src/db/datastore/mod.rs | 4 +-- nexus/db-queries/src/db/explain.rs | 35 ++++--------------- nexus/db-queries/src/db/raw_query_builder.rs | 12 ------- .../background/sync_switch_configuration.rs | 3 +- nexus/src/app/sagas/snapshot_create.rs | 9 ++--- .../db/src/oxql/ast/table_ops/group_by.rs | 2 +- oximeter/db/src/oxql/ast/table_ops/limit.rs | 4 +-- rust-toolchain.toml | 2 +- sled-agent/src/bootstrap/rss_handle.rs | 8 ----- sled-hardware/src/illumos/mod.rs | 2 +- sled-storage/src/resources.rs | 2 +- tools/opte_version | 2 +- wicket/src/ui/defaults/dimensions.rs | 14 -------- wicketd/src/update_tracker.rs | 2 +- 30 files changed, 44 insertions(+), 132 deletions(-) rename .cargo/{config => config.toml} (100%) diff --git a/.cargo/config b/.cargo/config.toml similarity index 100% rename from .cargo/config rename to .cargo/config.toml diff --git a/.github/buildomat/jobs/a4x2-deploy.sh b/.github/buildomat/jobs/a4x2-deploy.sh index 323b3e2e28a..c8eb998b35f 100755 --- a/.github/buildomat/jobs/a4x2-deploy.sh +++ b/.github/buildomat/jobs/a4x2-deploy.sh @@ -2,7 +2,7 @@ #: #: name = "a4x2-deploy" #: variety = "basic" -#: target = "lab-2.0-opte-0.27" +#: target = "lab-2.0-opte-0.29" #: output_rules = [ #: "/out/falcon/*.log", #: "/out/falcon/*.err", diff --git a/.github/buildomat/jobs/a4x2-prepare.sh b/.github/buildomat/jobs/a4x2-prepare.sh index 1e603fc7d9b..1438ec06dee 100755 --- a/.github/buildomat/jobs/a4x2-prepare.sh +++ b/.github/buildomat/jobs/a4x2-prepare.sh @@ -3,7 +3,7 @@ #: name = "a4x2-prepare" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.77.2" +#: rust_toolchain = "1.78.0" #: output_rules = [ #: "=/out/cargo-bay-ce.tgz", #: "=/out/cargo-bay-cr1.tgz", diff --git a/.github/buildomat/jobs/build-and-test-helios.sh b/.github/buildomat/jobs/build-and-test-helios.sh index a4cbd978a99..b63d2e783f7 100755 --- a/.github/buildomat/jobs/build-and-test-helios.sh +++ b/.github/buildomat/jobs/build-and-test-helios.sh @@ -3,7 +3,7 @@ #: name = "build-and-test (helios)" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.77.2" +#: rust_toolchain = "1.78.0" #: output_rules = [ #: "%/work/*", #: "%/var/tmp/omicron_tmp/*", diff --git a/.github/buildomat/jobs/build-and-test-linux.sh b/.github/buildomat/jobs/build-and-test-linux.sh index f10f07ff7ad..4a1f86c3e16 100755 --- a/.github/buildomat/jobs/build-and-test-linux.sh +++ b/.github/buildomat/jobs/build-and-test-linux.sh @@ -3,7 +3,7 @@ #: name = "build-and-test (ubuntu-22.04)" #: variety = "basic" #: target = "ubuntu-22.04" -#: rust_toolchain = "1.77.2" +#: rust_toolchain = "1.78.0" #: output_rules = [ #: "%/work/*", #: "%/var/tmp/omicron_tmp/*", diff --git a/.github/buildomat/jobs/clippy.sh b/.github/buildomat/jobs/clippy.sh index a5007694ab6..1f4c578e47c 100755 --- a/.github/buildomat/jobs/clippy.sh +++ b/.github/buildomat/jobs/clippy.sh @@ -3,7 +3,7 @@ #: name = "clippy (helios)" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.77.2" +#: rust_toolchain = "1.78.0" #: output_rules = [] # Run clippy on illumos (not just other systems) because a bunch of our code diff --git a/.github/buildomat/jobs/omicron-common.sh b/.github/buildomat/jobs/omicron-common.sh index b238eec7c6c..345d99f4058 100755 --- a/.github/buildomat/jobs/omicron-common.sh +++ b/.github/buildomat/jobs/omicron-common.sh @@ -3,7 +3,7 @@ #: name = "omicron-common (helios)" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.77.2" +#: rust_toolchain = "1.78.0" #: output_rules = [] # Verify that omicron-common builds successfully when used as a dependency diff --git a/.github/buildomat/jobs/package.sh b/.github/buildomat/jobs/package.sh index 63e5e1ce716..81ed41a961b 100755 --- a/.github/buildomat/jobs/package.sh +++ b/.github/buildomat/jobs/package.sh @@ -3,7 +3,7 @@ #: name = "helios / package" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.77.2" +#: rust_toolchain = "1.78.0" #: output_rules = [ #: "=/work/package.tar.gz", #: ] diff --git a/.github/buildomat/jobs/tuf-repo.sh b/.github/buildomat/jobs/tuf-repo.sh index 2e3050b4892..5b2d1bd4057 100755 --- a/.github/buildomat/jobs/tuf-repo.sh +++ b/.github/buildomat/jobs/tuf-repo.sh @@ -3,7 +3,7 @@ #: name = "helios / build TUF repo" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.77.2" +#: rust_toolchain = "1.78.0" #: output_rules = [ #: "=/work/manifest.toml", #: "=/work/repo.zip", diff --git a/Cargo.lock b/Cargo.lock index 0d534a3c2f0..1dfaff0d779 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1732,7 +1732,7 @@ dependencies = [ [[package]] name = "derror-macro" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=4cc823b50d3e4a629cdfaab2b3d3382514174ba9#4cc823b50d3e4a629cdfaab2b3d3382514174ba9" +source = "git+https://github.com/oxidecomputer/opte?rev=194a8d1d6443f78d59702a25849607dba33db732#194a8d1d6443f78d59702a25849607dba33db732" dependencies = [ "darling", "proc-macro2", @@ -3481,7 +3481,7 @@ dependencies = [ [[package]] name = "illumos-sys-hdrs" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=4cc823b50d3e4a629cdfaab2b3d3382514174ba9#4cc823b50d3e4a629cdfaab2b3d3382514174ba9" +source = "git+https://github.com/oxidecomputer/opte?rev=194a8d1d6443f78d59702a25849607dba33db732#194a8d1d6443f78d59702a25849607dba33db732" [[package]] name = "illumos-utils" @@ -3894,7 +3894,7 @@ dependencies = [ [[package]] name = "kstat-macro" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=4cc823b50d3e4a629cdfaab2b3d3382514174ba9#4cc823b50d3e4a629cdfaab2b3d3382514174ba9" +source = "git+https://github.com/oxidecomputer/opte?rev=194a8d1d6443f78d59702a25849607dba33db732#194a8d1d6443f78d59702a25849607dba33db732" dependencies = [ "quote", "syn 2.0.64", @@ -6019,7 +6019,7 @@ dependencies = [ [[package]] name = "opte" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=4cc823b50d3e4a629cdfaab2b3d3382514174ba9#4cc823b50d3e4a629cdfaab2b3d3382514174ba9" +source = "git+https://github.com/oxidecomputer/opte?rev=194a8d1d6443f78d59702a25849607dba33db732#194a8d1d6443f78d59702a25849607dba33db732" dependencies = [ "cfg-if", "derror-macro", @@ -6037,7 +6037,7 @@ dependencies = [ [[package]] name = "opte-api" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=4cc823b50d3e4a629cdfaab2b3d3382514174ba9#4cc823b50d3e4a629cdfaab2b3d3382514174ba9" +source = "git+https://github.com/oxidecomputer/opte?rev=194a8d1d6443f78d59702a25849607dba33db732#194a8d1d6443f78d59702a25849607dba33db732" dependencies = [ "illumos-sys-hdrs", "ipnetwork", @@ -6049,7 +6049,7 @@ dependencies = [ [[package]] name = "opte-ioctl" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=4cc823b50d3e4a629cdfaab2b3d3382514174ba9#4cc823b50d3e4a629cdfaab2b3d3382514174ba9" +source = "git+https://github.com/oxidecomputer/opte?rev=194a8d1d6443f78d59702a25849607dba33db732#194a8d1d6443f78d59702a25849607dba33db732" dependencies = [ "libc", "libnet 0.1.0 (git+https://github.com/oxidecomputer/netadm-sys)", @@ -6123,7 +6123,7 @@ dependencies = [ [[package]] name = "oxide-vpc" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=4cc823b50d3e4a629cdfaab2b3d3382514174ba9#4cc823b50d3e4a629cdfaab2b3d3382514174ba9" +source = "git+https://github.com/oxidecomputer/opte?rev=194a8d1d6443f78d59702a25849607dba33db732#194a8d1d6443f78d59702a25849607dba33db732" dependencies = [ "cfg-if", "illumos-sys-hdrs", diff --git a/Cargo.toml b/Cargo.toml index 16207d2f310..ed2b7cdcfe1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -347,14 +347,14 @@ omicron-sled-agent = { path = "sled-agent" } omicron-test-utils = { path = "test-utils" } omicron-zone-package = "0.11.0" oxide-client = { path = "clients/oxide-client" } -oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "4cc823b50d3e4a629cdfaab2b3d3382514174ba9", features = [ "api", "std" ] } +oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "194a8d1d6443f78d59702a25849607dba33db732", features = [ "api", "std" ] } once_cell = "1.19.0" openapi-lint = { git = "https://github.com/oxidecomputer/openapi-lint", branch = "main" } openapiv3 = "2.0.0" # must match samael's crate! openssl = "0.10" openssl-sys = "0.9" -opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "4cc823b50d3e4a629cdfaab2b3d3382514174ba9" } +opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "194a8d1d6443f78d59702a25849607dba33db732" } oso = "0.27" owo-colors = "4.0.0" oximeter = { path = "oximeter/oximeter" } diff --git a/certificates/src/lib.rs b/certificates/src/lib.rs index 442a9cfdd59..ee4ab4a6bd4 100644 --- a/certificates/src/lib.rs +++ b/certificates/src/lib.rs @@ -412,7 +412,7 @@ mod tests { // Valid certs: either no key usage values, or valid ones. for ext_key_usage in &valid_ext_key_usage { let mut params = CertificateParams::new(vec![HOST.to_string()]); - params.extended_key_usages = ext_key_usage.clone(); + params.extended_key_usages.clone_from(ext_key_usage); assert!( validate_cert_with_params(params, &[HOST]).is_ok(), @@ -431,7 +431,7 @@ mod tests { for ext_key_usage in &invalid_ext_key_usage { let mut params = CertificateParams::new(vec![HOST.to_string()]); - params.extended_key_usages = ext_key_usage.clone(); + params.extended_key_usages.clone_from(ext_key_usage); assert!( matches!( diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 5b029b09082..549f289ad04 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -1273,16 +1273,6 @@ async fn cmd_db_disk_physical( // SERVICES -#[derive(Tabled)] -#[tabled(rename_all = "SCREAMING_SNAKE_CASE")] -struct ServiceInstanceRow { - #[tabled(rename = "SERVICE")] - kind: String, - instance_id: Uuid, - addr: String, - sled_serial: String, -} - // Snapshots fn format_snapshot(state: &SnapshotState) -> impl Display { match state { @@ -1438,15 +1428,6 @@ async fn cmd_db_snapshot_info( // SLEDS -#[derive(Tabled)] -#[tabled(rename_all = "SCREAMING_SNAKE_CASE")] -struct ServiceInstanceSledRow { - #[tabled(rename = "SERVICE")] - kind: String, - instance_id: Uuid, - addr: String, -} - #[derive(Tabled)] #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] struct SledRow { diff --git a/dev-tools/omdb/src/bin/omdb/mgs/dashboard.rs b/dev-tools/omdb/src/bin/omdb/mgs/dashboard.rs index 08ecaf31011..cd7628a8409 100644 --- a/dev-tools/omdb/src/bin/omdb/mgs/dashboard.rs +++ b/dev-tools/omdb/src/bin/omdb/mgs/dashboard.rs @@ -101,16 +101,6 @@ trait Attributes: DynClone { fn y_axis_label(&self) -> String; fn axis_value(&self, val: f64) -> String; fn legend_value(&self, val: f64) -> String; - - fn increase(&mut self, _ndx: usize) -> Option { - None - } - - fn decrease(&mut self, _ndx: usize) -> Option { - None - } - - fn clear(&mut self) {} } dyn_clone::clone_trait_object!(Attributes); diff --git a/dev-tools/omdb/src/bin/omdb/mgs/sensors.rs b/dev-tools/omdb/src/bin/omdb/mgs/sensors.rs index d00bebd96c1..f36e8633f96 100644 --- a/dev-tools/omdb/src/bin/omdb/mgs/sensors.rs +++ b/dev-tools/omdb/src/bin/omdb/mgs/sensors.rs @@ -480,12 +480,10 @@ fn sp_info_csv( } if let Some(sensor) = Sensor::from_string(&record[1], &record[2]) { - if sensors.get(&sensor).is_some() { + if !sensors.insert(sensor.clone()) { break; } - sensors.insert(sensor.clone()); - for (ndx, sp) in sps.iter().enumerate() { if let Some(sp) = sp { let value = match record[ndx + len].parse::() { diff --git a/nexus/db-model/src/omicron_zone_config.rs b/nexus/db-model/src/omicron_zone_config.rs index 05383cd0560..c2258dba6cd 100644 --- a/nexus/db-model/src/omicron_zone_config.rs +++ b/nexus/db-model/src/omicron_zone_config.rs @@ -92,7 +92,7 @@ impl OmicronZone { let (first_port, last_port) = snat_cfg.port_range_raw(); ntp_ntp_servers = Some(ntp_servers.clone()); ntp_dns_servers = Some(dns_servers.clone()); - ntp_ntp_domain = domain.clone(); + ntp_ntp_domain.clone_from(domain); snat_ip = Some(IpNetwork::from(snat_cfg.ip)); snat_first_port = Some(SqlU16::from(first_port)); snat_last_port = Some(SqlU16::from(last_port)); @@ -162,7 +162,7 @@ impl OmicronZone { } => { ntp_ntp_servers = Some(ntp_servers.clone()); ntp_dns_servers = Some(dns_servers.clone()); - ntp_ntp_domain = domain.clone(); + ntp_ntp_domain.clone_from(domain); (ZoneType::InternalNtp, address, None) } OmicronZoneType::Nexus { diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 7c2cf8cf817..16183958008 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -984,8 +984,8 @@ mod test { // This is a little goofy, but it catches a bug that has // happened before. The returned columns share names (like // "id"), so we need to process them in-order. - assert!(regions.get(&dataset.id()).is_none()); - assert!(disk_datasets.get(®ion.id()).is_none()); + assert!(!regions.contains(&dataset.id())); + assert!(!disk_datasets.contains(®ion.id())); // Dataset must not be eligible for provisioning. if let Some(kind) = diff --git a/nexus/db-queries/src/db/explain.rs b/nexus/db-queries/src/db/explain.rs index 3de5b4f2809..24fd9930407 100644 --- a/nexus/db-queries/src/db/explain.rs +++ b/nexus/db-queries/src/db/explain.rs @@ -4,6 +4,11 @@ //! Utility allowing Diesel to EXPLAIN queries. +// These utilities can be useful during development, so we don't want to +// `#[cfg(test)]` the module, but it's likely they won't be used outside of +// tests. +#![cfg_attr(not(test), allow(dead_code))] + use super::pool::DbConnection; use async_bb8_diesel::AsyncRunQueryDsl; use async_trait::async_trait; @@ -17,33 +22,6 @@ use diesel::result::Error as DieselError; /// Q: The Query we're explaining. /// /// EXPLAIN: -pub trait Explainable { - /// Syncronously issues an explain statement. - fn explain( - self, - conn: &mut DbConnection, - ) -> Result; -} - -impl Explainable for Q -where - Q: QueryFragment - + QueryId - + RunQueryDsl - + Sized - + 'static, -{ - fn explain( - self, - conn: &mut DbConnection, - ) -> Result { - Ok(ExplainStatement { query: self } - .get_results::(conn)? - .join("\n")) - } -} - -/// An async variant of [`Explainable`]. #[async_trait] pub trait ExplainableAsync { /// Asynchronously issues an explain statement. @@ -185,7 +163,8 @@ mod test { logctx.cleanup_successful(); } - // Tests that ".explain()" can tell us when we're doing full table scans. + // Tests that ".explain_async()" can tell us when we're doing full table + // scans. #[tokio::test] async fn test_explain_full_table_scan() { let logctx = dev::test_setup_log("test_explain_full_table_scan"); diff --git a/nexus/db-queries/src/db/raw_query_builder.rs b/nexus/db-queries/src/db/raw_query_builder.rs index 5c803e20ac9..c7215417c58 100644 --- a/nexus/db-queries/src/db/raw_query_builder.rs +++ b/nexus/db-queries/src/db/raw_query_builder.rs @@ -69,18 +69,6 @@ enum TrustedStrVariants { ValidatedExplicitly(String), } -trait SqlQueryBinds { - fn add_bind(self, bind_counter: &BindParamCounter) -> Self; -} - -impl<'a, Query> SqlQueryBinds - for diesel::query_builder::BoxedSqlQuery<'a, Pg, Query> -{ - fn add_bind(self, bind_counter: &BindParamCounter) -> Self { - self.sql("$").sql(bind_counter.next().to_string()) - } -} - type BoxedQuery = diesel::query_builder::BoxedSqlQuery< 'static, Pg, diff --git a/nexus/src/app/background/sync_switch_configuration.rs b/nexus/src/app/background/sync_switch_configuration.rs index dc7aa745760..7efe9ef92b6 100644 --- a/nexus/src/app/background/sync_switch_configuration.rs +++ b/nexus/src/app/background/sync_switch_configuration.rs @@ -551,7 +551,8 @@ impl BackgroundTask for SwitchPortSettingsManager { // Same thing as above, check to see if we've already built the announce set, // if so we'll skip this step - if bgp_announce_prefixes.get(&bgp_config.bgp_announce_set_id).is_none() { + #[allow(clippy::map_entry)] + if !bgp_announce_prefixes.contains_key(&bgp_config.bgp_announce_set_id) { let announcements = match self .datastore .bgp_announce_list( diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 2a5deeff510..287571cfd50 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -1572,12 +1572,9 @@ fn create_snapshot_from_disk( if let Some(socket_map) = socket_map { for target in &mut opts.target { - *target = socket_map - .get(target) - .ok_or_else(|| { - anyhow!("target {} not found in map!", target) - })? - .clone(); + target.clone_from(socket_map.get(target).ok_or_else( + || anyhow!("target {} not found in map!", target), + )?); } } diff --git a/oximeter/db/src/oxql/ast/table_ops/group_by.rs b/oximeter/db/src/oxql/ast/table_ops/group_by.rs index 3284c70c1fa..f40572d762c 100644 --- a/oximeter/db/src/oxql/ast/table_ops/group_by.rs +++ b/oximeter/db/src/oxql/ast/table_ops/group_by.rs @@ -496,7 +496,7 @@ mod tests { ) .unwrap(); ts0.points.start_times = None; - ts0.points.timestamps = timestamps.clone(); + ts0.points.timestamps.clone_from(×tamps); *ts0.points.values_mut(0).unwrap() = ValueArray::Double(vec![ Some(1.0), if matches!( diff --git a/oximeter/db/src/oxql/ast/table_ops/limit.rs b/oximeter/db/src/oxql/ast/table_ops/limit.rs index 46d19b9cdc6..0205868f5c9 100644 --- a/oximeter/db/src/oxql/ast/table_ops/limit.rs +++ b/oximeter/db/src/oxql/ast/table_ops/limit.rs @@ -150,7 +150,7 @@ mod tests { MetricType::Gauge, ) .unwrap(); - timeseries.points.timestamps = timestamps.clone(); + timeseries.points.timestamps.clone_from(×tamps); timeseries.points.values[0].values.as_integer_mut().unwrap().extend([ Some(1), Some(2), @@ -166,7 +166,7 @@ mod tests { MetricType::Gauge, ) .unwrap(); - timeseries.points.timestamps = timestamps.clone(); + timeseries.points.timestamps.clone_from(×tamps); timeseries.points.values[0].values.as_integer_mut().unwrap().extend([ Some(4), Some(5), diff --git a/rust-toolchain.toml b/rust-toolchain.toml index a2ed3895ecd..7c513cfbad3 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -4,5 +4,5 @@ # # We choose a specific toolchain (rather than "stable") for repeatability. The # intent is to keep this up-to-date with recently-released stable Rust. -channel = "1.77.2" +channel = "1.78.0" profile = "default" diff --git a/sled-agent/src/bootstrap/rss_handle.rs b/sled-agent/src/bootstrap/rss_handle.rs index 5d9c01e7f27..9baf0e7ef31 100644 --- a/sled-agent/src/bootstrap/rss_handle.rs +++ b/sled-agent/src/bootstrap/rss_handle.rs @@ -299,11 +299,3 @@ impl BootstrapAgentHandleReceiver { tx.send(Ok(())).unwrap(); } } - -struct AbortOnDrop(JoinHandle); - -impl Drop for AbortOnDrop { - fn drop(&mut self) { - self.0.abort(); - } -} diff --git a/sled-hardware/src/illumos/mod.rs b/sled-hardware/src/illumos/mod.rs index 0bf2fa6e533..e9a47de29e4 100644 --- a/sled-hardware/src/illumos/mod.rs +++ b/sled-hardware/src/illumos/mod.rs @@ -263,7 +263,7 @@ impl HardwareView { updates.push(DiskAdded(disk.clone())); } - self.disks = polled_hw.disks.clone(); + self.disks.clone_from(&polled_hw.disks); } } diff --git a/sled-storage/src/resources.rs b/sled-storage/src/resources.rs index a2e75249b3a..b44c8e5b53a 100644 --- a/sled-storage/src/resources.rs +++ b/sled-storage/src/resources.rs @@ -347,7 +347,7 @@ impl StorageResources { // This leaves the presence of the disk still in "Self", but // downgrades the disk to an unmanaged status. ManagedDisk::ExplicitlyManaged(disk) => { - if self.control_plane_disks.get(identity).is_none() { + if !self.control_plane_disks.contains_key(identity) { *managed_disk = ManagedDisk::Unmanaged(RawDisk::from(disk.clone())); updated = true; diff --git a/tools/opte_version b/tools/opte_version index 41d9666b049..2de18d2d9b0 100644 --- a/tools/opte_version +++ b/tools/opte_version @@ -1 +1 @@ -0.29.248 +0.29.250 diff --git a/wicket/src/ui/defaults/dimensions.rs b/wicket/src/ui/defaults/dimensions.rs index ca768077867..2961400aa7f 100644 --- a/wicket/src/ui/defaults/dimensions.rs +++ b/wicket/src/ui/defaults/dimensions.rs @@ -18,14 +18,6 @@ pub trait RectExt { /// /// Panics if `height > self.height`. fn center_vertically(self, height: u16) -> Self; - - /// Create a new maximally sized `Rect` that is bounded by `self`, and - /// shifted down by `y` columns. In order to maintain the bounding, the - /// new `Rect` is originally sized to `self` and then shrunk by the same - /// amount it is shifted downwards: namely `y` columns. - /// - /// Panics if `y > self.height`. - fn move_down_within_bounds(self, y: u16) -> Self; } impl RectExt for Rect { @@ -42,10 +34,4 @@ impl RectExt for Rect { self.height = height; self } - - fn move_down_within_bounds(mut self, y: u16) -> Self { - self.y = self.y + y; - self.height -= y; - self - } } diff --git a/wicketd/src/update_tracker.rs b/wicketd/src/update_tracker.rs index 42853a40764..10253bc2f77 100644 --- a/wicketd/src/update_tracker.rs +++ b/wicketd/src/update_tracker.rs @@ -269,7 +269,7 @@ impl UpdateTracker { // This used to check that the task was finished, but we changed // that in favor of forcing users to clear update state before // starting a new one. - update_data.sp_update_data.get(sp).is_some() + update_data.sp_update_data.contains_key(sp) }) .copied() .collect();