From 5176bb3c0398488d0c9669fa049e85b33fbacd54 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Thu, 15 Feb 2024 14:12:04 -0500 Subject: [PATCH 001/157] `IncompleteNetworkInterface`: allow user-specified explicit `slot` (#5065) This makes a several minor changes to plumb slots through: * `IncompleteNetworkInterface` now stores an optional slot, just like it stores an optional IP/MAC address * In `network_interfaces::InsertQuery`, if the incoming slot is set, we use it directly instead of running the `NextItem`-based subquery * Adds a partial index to ensure uniqueness of a slot within a single `parent_id` (I believe this is correct, but would love confirmation from someone more familiar!) * `IncompleteNetworkInterface::new_service()` now takes a _non-optional_ IP, MAC address, and slot. This matches how it was called in all non-test code. * Tweaked the Nexus internal API used for RSS handoff to include the slot in the description of NICs. This is a partial fix for #5056, and should produce correct behavior on new systems that run through RSS, even without a fix for #5055 (because we bypass `NextItem` altogether with this change). In particular, I think this should unblock testing of #5045 on madrid / testbeds. It does not address the already-recorded-NICs-with-incorrect-slots on systems like dogfood; I'll take care of that in a subsequent PR. --- nexus/db-model/src/network_interface.rs | 55 +++-- nexus/db-model/src/schema.rs | 2 +- nexus/db-queries/src/db/datastore/rack.rs | 19 +- .../src/db/queries/network_interface.rs | 201 +++++++++++++++--- nexus/src/app/mod.rs | 2 +- nexus/test-utils/src/lib.rs | 2 + nexus/types/src/internal_api/params.rs | 1 + openapi/nexus-internal.json | 8 +- schema/crdb/34.0.0/up.sql | 6 + schema/crdb/dbinit.sql | 13 +- sled-agent/src/params.rs | 3 + sled-agent/src/sim/server.rs | 2 + 12 files changed, 266 insertions(+), 48 deletions(-) create mode 100644 schema/crdb/34.0.0/up.sql diff --git a/nexus/db-model/src/network_interface.rs b/nexus/db-model/src/network_interface.rs index 3d3fabbe66..01317fc160 100644 --- a/nexus/db-model/src/network_interface.rs +++ b/nexus/db-model/src/network_interface.rs @@ -17,6 +17,13 @@ use nexus_types::identity::Resource; use omicron_common::api::external; use uuid::Uuid; +/// The max number of interfaces that may be associated with a resource, +/// e.g., instance or service. +/// +/// RFD 135 caps instances at 8 interfaces and we use the same limit for +/// all types of interfaces for simplicity. +pub const MAX_NICS_PER_INSTANCE: usize = 8; + impl_enum_type! { #[derive(SqlType, QueryId, Debug, Clone, Copy)] #[diesel(postgres_type(name = "network_interface_kind", schema = "public"))] @@ -210,9 +217,11 @@ pub struct IncompleteNetworkInterface { pub subnet: VpcSubnet, pub ip: Option, pub mac: Option, + pub slot: Option, } impl IncompleteNetworkInterface { + #[allow(clippy::too_many_arguments)] fn new( interface_id: Uuid, kind: NetworkInterfaceKind, @@ -221,24 +230,36 @@ impl IncompleteNetworkInterface { identity: external::IdentityMetadataCreateParams, ip: Option, mac: Option, + slot: Option, ) -> Result { if let Some(ip) = ip { subnet.check_requestable_addr(ip)?; }; - match (mac, kind) { - (Some(mac), NetworkInterfaceKind::Instance) if !mac.is_guest() => { - return Err(external::Error::invalid_request(&format!( - "invalid MAC address {} for guest NIC", - mac - ))); + if let Some(mac) = mac { + match kind { + NetworkInterfaceKind::Instance => { + if !mac.is_guest() { + return Err(external::Error::invalid_request(format!( + "invalid MAC address {mac} for guest NIC", + ))); + } + } + NetworkInterfaceKind::Service => { + if !mac.is_system() { + return Err(external::Error::invalid_request(format!( + "invalid MAC address {mac} for service NIC", + ))); + } + } } - (Some(mac), NetworkInterfaceKind::Service) if !mac.is_system() => { - return Err(external::Error::invalid_request(&format!( - "invalid MAC address {} for service NIC", - mac + } + if let Some(slot) = slot { + if usize::from(slot) >= MAX_NICS_PER_INSTANCE { + return Err(external::Error::invalid_request(format!( + "invalid slot {slot} for NIC (max slot = {})", + MAX_NICS_PER_INSTANCE - 1, ))); } - _ => {} } let identity = NetworkInterfaceIdentity::new(interface_id, identity); Ok(IncompleteNetworkInterface { @@ -248,6 +269,7 @@ impl IncompleteNetworkInterface { subnet, ip, mac, + slot, }) } @@ -266,6 +288,7 @@ impl IncompleteNetworkInterface { identity, ip, None, + None, ) } @@ -274,8 +297,9 @@ impl IncompleteNetworkInterface { service_id: Uuid, subnet: VpcSubnet, identity: external::IdentityMetadataCreateParams, - ip: Option, - mac: Option, + ip: std::net::IpAddr, + mac: external::MacAddr, + slot: u8, ) -> Result { Self::new( interface_id, @@ -283,8 +307,9 @@ impl IncompleteNetworkInterface { service_id, subnet, identity, - ip, - mac, + Some(ip), + Some(mac), + Some(slot), ) } } diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 7fc4f9ae45..9ebf7516cf 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -13,7 +13,7 @@ use omicron_common::api::external::SemverVersion; /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(33, 0, 1); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(34, 0, 0); table! { disk (id) { diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 50bae03c2d..99ed71a073 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -482,8 +482,9 @@ impl DataStore { name: nic.name.clone(), description: format!("{} service vNIC", service.kind), }, - Some(nic.ip), - Some(nic.mac), + nic.ip, + nic.mac, + nic.slot, ) .map_err(|e| RackInitError::AddingNic(e))?; Some((db_ip, db_nic)) @@ -504,8 +505,9 @@ impl DataStore { name: nic.name.clone(), description: format!("{} service vNIC", service.kind), }, - Some(nic.ip), - Some(nic.mac), + nic.ip, + nic.mac, + nic.slot, ) .map_err(|e| RackInitError::AddingNic(e))?; Some((db_ip, db_nic)) @@ -1159,6 +1161,7 @@ mod test { name: "external-dns".parse().unwrap(), ip: external_dns_pip.into(), mac: macs.next().unwrap(), + slot: 0, }, }, }, @@ -1178,6 +1181,7 @@ mod test { name: "ntp1".parse().unwrap(), ip: ntp1_pip.into(), mac: macs.next().unwrap(), + slot: 0, }, }, }, @@ -1193,6 +1197,7 @@ mod test { name: "nexus".parse().unwrap(), ip: nexus_pip.into(), mac: macs.next().unwrap(), + slot: 0, }, }, }, @@ -1212,6 +1217,7 @@ mod test { name: "ntp2".parse().unwrap(), ip: ntp2_pip.into(), mac: macs.next().unwrap(), + slot: 0, }, }, }, @@ -1406,6 +1412,7 @@ mod test { name: "nexus1".parse().unwrap(), ip: nexus_pip1.into(), mac: macs.next().unwrap(), + slot: 0, }, }, }, @@ -1421,6 +1428,7 @@ mod test { name: "nexus2".parse().unwrap(), ip: nexus_pip2.into(), mac: macs.next().unwrap(), + slot: 0, }, }, }, @@ -1603,6 +1611,7 @@ mod test { name: "nexus".parse().unwrap(), ip: nexus_pip.into(), mac: macs.next().unwrap(), + slot: 0, }, }, }]; @@ -1662,6 +1671,7 @@ mod test { name: "external-dns".parse().unwrap(), ip: external_dns_pip.into(), mac: macs.next().unwrap(), + slot: 0, }, }, }, @@ -1677,6 +1687,7 @@ mod test { name: "nexus".parse().unwrap(), ip: nexus_pip.into(), mac: macs.next().unwrap(), + slot: 0, }, }, }, diff --git a/nexus/db-queries/src/db/queries/network_interface.rs b/nexus/db-queries/src/db/queries/network_interface.rs index ae3e2c8ead..0643089316 100644 --- a/nexus/db-queries/src/db/queries/network_interface.rs +++ b/nexus/db-queries/src/db/queries/network_interface.rs @@ -25,8 +25,8 @@ use diesel::QueryResult; use diesel::RunQueryDsl; use ipnetwork::IpNetwork; use ipnetwork::Ipv4Network; -use nexus_db_model::NetworkInterfaceKind; -use nexus_db_model::NetworkInterfaceKindEnum; +use nexus_db_model::{NetworkInterfaceKind, MAX_NICS_PER_INSTANCE}; +use nexus_db_model::{NetworkInterfaceKindEnum, SqlU8}; use omicron_common::api::external; use omicron_common::api::external::MacAddr; use omicron_common::nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; @@ -34,13 +34,6 @@ use once_cell::sync::Lazy; use std::net::IpAddr; use uuid::Uuid; -/// The max number of interfaces that may be associated with a resource, -/// e.g., instance or service. -/// -/// RFD 135 caps instances at 8 interfaces and we use the same limit for -/// all types of interfaces for simplicity. -pub(crate) const MAX_NICS: usize = 8; - // These are sentinel values and other constants used to verify the state of the // system when operating on network interfaces @@ -111,6 +104,8 @@ pub enum InsertError { IpAddressNotAvailable(std::net::IpAddr), /// An explicity-requested MAC address is already in use MacAddressNotAvailable(MacAddr), + /// An explicity-requested interface slot is already in use + SlotNotAvailable(u8), /// There are no slots available for a new interface NoSlotsAvailable, /// There are no MAC addresses available @@ -176,21 +171,26 @@ impl InsertError { )) } InsertError::IpAddressNotAvailable(ip) => { - external::Error::invalid_request(&format!( + external::Error::invalid_request(format!( "The IP address '{}' is not available", ip )) } InsertError::MacAddressNotAvailable(mac) => { - external::Error::invalid_request(&format!( + external::Error::invalid_request(format!( "The MAC address '{}' is not available", mac )) } + InsertError::SlotNotAvailable(slot) => { + external::Error::invalid_request(format!( + "The interface slot '{slot}' is not available", + )) + } InsertError::NoSlotsAvailable => { - external::Error::invalid_request(&format!( + external::Error::invalid_request(format!( "May not attach more than {} network interfaces", - MAX_NICS + MAX_NICS_PER_INSTANCE )) } InsertError::NoMacAddrressesAvailable => { @@ -260,6 +260,12 @@ fn decode_database_error( const MAC_NOT_AVAILABLE_CONSTRAINT: &str = "network_interface_vpc_id_mac_key"; + // The name of the index whose uniqueness is violated if we try to assign a + // slot to an interface that is already allocated to another interface in + // the same instance or service. + const SLOT_NOT_AVAILABLE_CONSTRAINT: &str = + "network_interface_parent_id_slot_key"; + // The name of the index whose uniqueness is violated if we try to assign a // name to an interface that is already used for another interface on the // same resource. @@ -386,6 +392,12 @@ fn decode_database_error( let mac = interface.mac.unwrap_or_else(|| MacAddr::from_i64(0)); InsertError::MacAddressNotAvailable(mac) } + // Constraint violated if a user-requested slot has + // already been assigned within the same instance or service. + Some(constraint) if constraint == SLOT_NOT_AVAILABLE_CONSTRAINT => { + let slot = interface.slot.unwrap_or(0); + InsertError::SlotNotAvailable(slot) + } // Constraint violated if the user-requested name is already // assigned to an interface on this resource Some(constraint) if constraint == NAME_CONFLICT_CONSTRAINT => { @@ -565,7 +577,7 @@ impl NextNicSlot { pub fn new(parent_id: Uuid) -> Self { let generator = DefaultShiftGenerator { base: 0, - max_shift: i64::try_from(MAX_NICS) + max_shift: i64::try_from(MAX_NICS_PER_INSTANCE) .expect("Too many network interfaces"), min_shift: 0, }; @@ -984,6 +996,7 @@ pub struct InsertQuery { parent_id_str: String, ip_sql: Option, mac_sql: Option, + slot_sql: Option, next_mac_subquery: NextMacAddress, next_ipv4_address_subquery: NextIpv4Address, next_slot_subquery: NextNicSlot, @@ -998,6 +1011,7 @@ impl InsertQuery { let parent_id_str = interface.parent_id.to_string(); let ip_sql = interface.ip.map(|ip| ip.into()); let mac_sql = interface.mac.map(|mac| mac.into()); + let slot_sql = interface.slot.map(|slot| slot.into()); let next_mac_subquery = NextMacAddress::new(interface.subnet.vpc_id, interface.kind); let next_ipv4_address_subquery = NextIpv4Address::new( @@ -1015,6 +1029,7 @@ impl InsertQuery { parent_id_str, ip_sql, mac_sql, + slot_sql, next_mac_subquery, next_ipv4_address_subquery, next_slot_subquery, @@ -1169,7 +1184,11 @@ impl QueryFragment for InsertQuery { out.push_identifier(dsl::ip::NAME)?; out.push_sql(", "); - select_from_cte(out.reborrow(), dsl::slot::NAME)?; + if let Some(slot) = &self.slot_sql { + out.push_bind_param::(slot)?; + } else { + select_from_cte(out.reborrow(), dsl::slot::NAME)?; + } out.push_sql(", "); select_from_cte(out.reborrow(), dsl::is_primary::NAME)?; @@ -1681,7 +1700,7 @@ mod tests { use super::first_available_address; use super::last_address_offset; use super::InsertError; - use super::MAX_NICS; + use super::MAX_NICS_PER_INSTANCE; use super::NUM_INITIAL_RESERVED_IP_ADDRESSES; use crate::authz; use crate::context::OpContext; @@ -1713,6 +1732,7 @@ mod tests { use omicron_common::api::external::MacAddr; use omicron_test_utils::dev; use omicron_test_utils::dev::db::CockroachInstance; + use std::collections::HashSet; use std::convert::TryInto; use std::net::IpAddr; use std::net::Ipv4Addr; @@ -2167,8 +2187,14 @@ mod tests { async fn test_insert_request_mac() { let context = TestContext::new("test_insert_request_mac", 1).await; - // Insert a service NIC with an explicit MAC address + // Ensure service NICs are recorded with the explicit requested MAC + // address let service_id = Uuid::new_v4(); + let ip = context.net1.subnets[0] + .ipv4_block + .iter() + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES) + .unwrap(); let mac = MacAddr::random_system(); let interface = IncompleteNetworkInterface::new_service( Uuid::new_v4(), @@ -2178,8 +2204,9 @@ mod tests { name: "service-nic".parse().unwrap(), description: String::from("service nic"), }, - None, - Some(mac), + ip.into(), + mac, + 0, ) .unwrap(); let inserted_interface = context @@ -2192,13 +2219,60 @@ mod tests { context.success().await; } + #[tokio::test] + async fn test_insert_request_slot() { + let context = TestContext::new("test_insert_request_slot", 1).await; + + // Ensure service NICs are recorded with the explicit requested slot + let mut used_macs = HashSet::new(); + let mut ips = context.net1.subnets[0] + .ipv4_block + .iter() + .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES); + for slot in 0..u8::try_from(MAX_NICS_PER_INSTANCE).unwrap() { + let service_id = Uuid::new_v4(); + let ip = ips.next().expect("exhausted test subnet"); + let mut mac = MacAddr::random_system(); + while !used_macs.insert(mac) { + mac = MacAddr::random_system(); + } + let interface = IncompleteNetworkInterface::new_service( + Uuid::new_v4(), + service_id, + context.net1.subnets[0].clone(), + IdentityMetadataCreateParams { + name: "service-nic".parse().unwrap(), + description: String::from("service nic"), + }, + ip.into(), + mac, + slot, + ) + .unwrap(); + let inserted_interface = context + .db_datastore + .service_create_network_interface_raw(&context.opctx, interface) + .await + .expect("Failed to insert interface"); + assert_eq!(inserted_interface.slot, i16::from(slot)); + } + + context.success().await; + } + #[tokio::test] async fn test_insert_request_same_mac_fails() { let context = TestContext::new("test_insert_request_same_mac_fails", 2).await; + let mut ips = context.net1.subnets[0] + .ipv4_block + .iter() + .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES); + // Insert a service NIC let service_id = Uuid::new_v4(); + let mac = MacAddr::random_system(); let interface = IncompleteNetworkInterface::new_service( Uuid::new_v4(), service_id, @@ -2207,8 +2281,9 @@ mod tests { name: "service-nic".parse().unwrap(), description: String::from("service nic"), }, - None, - None, + ips.next().expect("exhausted test subnet").into(), + mac, + 0, ) .unwrap(); let inserted_interface = context @@ -2216,6 +2291,7 @@ mod tests { .service_create_network_interface_raw(&context.opctx, interface) .await .expect("Failed to insert interface"); + assert_eq!(inserted_interface.mac.0, mac); // Inserting an interface with the same MAC should fail, even if all // other parameters are valid. @@ -2228,8 +2304,9 @@ mod tests { name: "new-service-nic".parse().unwrap(), description: String::from("new-service nic"), }, - None, - Some(inserted_interface.mac.0), + ips.next().expect("exhausted test subnet").into(), + mac, + 0, ) .unwrap(); let result = context @@ -2243,6 +2320,80 @@ mod tests { context.success().await; } + #[tokio::test] + async fn test_insert_request_same_slot_fails() { + let context = + TestContext::new("test_insert_request_same_slot_fails", 2).await; + + let ip0 = context.net1.subnets[0] + .ipv4_block + .iter() + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES) + .unwrap(); + let ip1 = context.net1.subnets[1] + .ipv4_block + .iter() + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES) + .unwrap(); + + let mut next_mac = { + let mut used_macs = HashSet::new(); + move || { + let mut mac = MacAddr::random_system(); + while !used_macs.insert(mac) { + mac = MacAddr::random_system(); + } + mac + } + }; + + // Insert a service NIC + let service_id = Uuid::new_v4(); + let interface = IncompleteNetworkInterface::new_service( + Uuid::new_v4(), + service_id, + context.net1.subnets[0].clone(), + IdentityMetadataCreateParams { + name: "service-nic".parse().unwrap(), + description: String::from("service nic"), + }, + ip0.into(), + next_mac(), + 0, + ) + .unwrap(); + let inserted_interface = context + .db_datastore + .service_create_network_interface_raw(&context.opctx, interface) + .await + .expect("Failed to insert interface"); + assert_eq!(inserted_interface.slot, 0); + + // Inserting an interface with the same slot on the same service should + let new_interface = IncompleteNetworkInterface::new_service( + Uuid::new_v4(), + service_id, + context.net1.subnets[1].clone(), + IdentityMetadataCreateParams { + name: "new-service-nic".parse().unwrap(), + description: String::from("new-service nic"), + }, + ip1.into(), + next_mac(), + 0, + ) + .unwrap(); + let result = context + .db_datastore + .service_create_network_interface_raw(&context.opctx, new_interface) + .await; + assert!( + matches!(result, Err(InsertError::SlotNotAvailable(0))), + "Requesting an interface with an existing slot should fail" + ); + context.success().await; + } + #[tokio::test] async fn test_insert_with_duplicate_name_fails() { let context = @@ -2553,11 +2704,11 @@ mod tests { async fn test_limit_number_of_interfaces_per_instance_query() { let context = TestContext::new( "test_limit_number_of_interfaces_per_instance_query", - MAX_NICS as u8 + 1, + MAX_NICS_PER_INSTANCE as u8 + 1, ) .await; let instance = context.create_stopped_instance().await; - for slot in 0..MAX_NICS { + for slot in 0..MAX_NICS_PER_INSTANCE { let subnet = &context.net1.subnets[slot]; let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 7a9a26b05f..969320617f 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -83,7 +83,7 @@ pub(crate) mod sagas; pub(crate) use nexus_db_queries::db::queries::disk::MAX_DISKS_PER_INSTANCE; -pub(crate) const MAX_NICS_PER_INSTANCE: usize = 8; +pub(crate) use nexus_db_model::MAX_NICS_PER_INSTANCE; // XXX: Might want to recast as max *floating* IPs, we have at most one // ephemeral (so bounded in saga by design). diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 7baacf97ce..2a37721bb0 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -608,6 +608,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .unwrap() .into(), mac, + slot: 0, }, }, internal_dns::ServiceName::Nexus, @@ -831,6 +832,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .unwrap() .into(), mac, + slot: 0, }, }, internal_dns::ServiceName::ExternalDns, diff --git a/nexus/types/src/internal_api/params.rs b/nexus/types/src/internal_api/params.rs index ab15ec26b7..987efbf6b7 100644 --- a/nexus/types/src/internal_api/params.rs +++ b/nexus/types/src/internal_api/params.rs @@ -163,6 +163,7 @@ pub struct ServiceNic { pub name: Name, pub ip: IpAddr, pub mac: MacAddr, + pub slot: u8, } /// Describes the purpose of the service. diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index a1a78deb84..b8b45aa08e 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -6273,13 +6273,19 @@ }, "name": { "$ref": "#/components/schemas/Name" + }, + "slot": { + "type": "integer", + "format": "uint8", + "minimum": 0 } }, "required": [ "id", "ip", "mac", - "name" + "name", + "slot" ] }, "ServicePutRequest": { diff --git a/schema/crdb/34.0.0/up.sql b/schema/crdb/34.0.0/up.sql new file mode 100644 index 0000000000..b8190b2246 --- /dev/null +++ b/schema/crdb/34.0.0/up.sql @@ -0,0 +1,6 @@ +CREATE UNIQUE INDEX IF NOT EXISTS network_interface_parent_id_slot_key ON omicron.public.network_interface ( + parent_id, + slot +) +WHERE + time_deleted IS NULL; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 18b1b82563..254080e92d 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1462,6 +1462,17 @@ STORING (vpc_id, subnet_id, is_primary) WHERE time_deleted IS NULL; +/* + * Index used to verify that all interfaces for a resource (e.g. Instance, + * Service) have unique slots. + */ +CREATE UNIQUE INDEX IF NOT EXISTS network_interface_parent_id_slot_key ON omicron.public.network_interface ( + parent_id, + slot +) +WHERE + time_deleted IS NULL; + CREATE TYPE IF NOT EXISTS omicron.public.vpc_firewall_rule_status AS ENUM ( 'disabled', 'enabled' @@ -3504,7 +3515,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '33.0.1', NULL) + ( TRUE, NOW(), NOW(), '34.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/sled-agent/src/params.rs b/sled-agent/src/params.rs index 7ed1264d9c..fda952ca87 100644 --- a/sled-agent/src/params.rs +++ b/sled-agent/src/params.rs @@ -377,6 +377,7 @@ impl OmicronZoneConfig { name: nic.name.clone(), ip: nic.ip, mac: nic.mac, + slot: nic.slot, }, }, }, @@ -397,6 +398,7 @@ impl OmicronZoneConfig { name: nic.name.clone(), ip: nic.ip, mac: nic.mac, + slot: nic.slot, }, }, }, @@ -440,6 +442,7 @@ impl OmicronZoneConfig { name: nic.name.clone(), ip: nic.ip, mac: nic.mac, + slot: nic.slot, }, }, } diff --git a/sled-agent/src/sim/server.rs b/sled-agent/src/sim/server.rs index fd5995b8f1..8854aee05c 100644 --- a/sled-agent/src/sim/server.rs +++ b/sled-agent/src/sim/server.rs @@ -363,6 +363,7 @@ pub async fn run_standalone_server( .unwrap() .into(), mac: macs.next().unwrap(), + slot: 0, }, }, service_id: Uuid::new_v4(), @@ -396,6 +397,7 @@ pub async fn run_standalone_server( .unwrap() .into(), mac: macs.next().unwrap(), + slot: 0, }, }, service_id: Uuid::new_v4(), From 4db6c6c4983a68ec8717f4155d30f3489191d453 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Thu, 15 Feb 2024 15:44:35 -0500 Subject: [PATCH 002/157] [omdb] Add `db network list-vnics` and expand output of `db network list-eips` (#5064) This PR has some omdb commands I wanted during dev/debug of #5045. It expands the output of `list-vnics` to include the parent's ID (particularly useful when trying to determine the external IP of a specific Nexus instance, for example): ``` IP PORTS KIND STATE OWNER_KIND OWNER_ID OWNER_DESCRIPTION 10.1.1.3/32 0/65535 floating Attached instance 4e6fb33a-7ba2-4a5e-abc5-dc9b047c01e0 v6/some-vm2 10.1.1.4/32 0/16383 SNAT Attached instance 4e6fb33a-7ba2-4a5e-abc5-dc9b047c01e0 v6/some-vm2 10.1.1.5/32 0/65535 ephemeral Attached instance 4e6fb33a-7ba2-4a5e-abc5-dc9b047c01e0 v6/some-vm2 172.20.26.1/32 0/65535 floating Attached service edd99650-5df1-4241-815d-253e4ef2399c ExternalDns 172.20.26.2/32 0/65535 floating Attached service f500d564-c40a-4eca-ac8a-a26b435f2037 ExternalDns 172.20.26.3/32 0/65535 floating Attached service 65a11c18-7f59-41ac-b9e7-680627f996e7 Nexus 172.20.26.4/32 0/65535 floating Attached service 20b100d0-84c3-4119-aa9b-0c632b0b6a3a Nexus 172.20.26.5/32 0/65535 floating Attached service 2898657e-4141-4c05-851b-147bffc6bbbd Nexus 172.20.26.6/32 0/16383 SNAT Attached service c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55 Ntp ``` and adds a `list-vnics` command to show allocated vnics: ``` IP MAC SLOT PRIMARY KIND SUBNET PARENT_ID DESCRIPTION 172.30.0.5/32 A8:40:25:F8:A5:8C 1 true instance 172.30.0.0/22 2a4afdda-e269-48bc-913f-01ad57c50543 default primary interface for p4 172.30.0.5/32 A8:40:25:F5:AF:F0 1 true instance 172.30.0.0/22 be705808-d507-4693-9a97-186c92970e7b default primary interface for updateinst 172.30.0.5/32 A8:40:25:F7:3B:00 1 true instance 172.30.0.0/22 0ab1939f-af6e-4ea2-a155-71f210e937fc a sample nic 172.30.1.5/32 A8:40:25:FF:B0:9C 1 true service 172.30.1.0/24 edd99650-5df1-4241-815d-253e4ef2399c external_dns service vNIC 172.30.1.6/32 A8:40:25:FF:D0:B4 1 true service 172.30.1.0/24 f500d564-c40a-4eca-ac8a-a26b435f2037 external_dns service vNIC 172.30.2.5/32 A8:40:25:FF:A6:83 1 true service 172.30.2.0/24 65a11c18-7f59-41ac-b9e7-680627f996e7 nexus service vNIC 172.30.2.6/32 A8:40:25:FF:B4:C1 1 true service 172.30.2.0/24 20b100d0-84c3-4119-aa9b-0c632b0b6a3a nexus service vNIC 172.30.2.7/32 A8:40:25:FF:C6:59 0 true service 172.30.2.0/24 2898657e-4141-4c05-851b-147bffc6bbbd nexus service vNIC 172.30.3.5/32 A8:40:25:FF:B2:52 1 true service 172.30.3.0/24 c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55 ntp service vNIC 172.30.3.6/32 A8:40:25:FF:A0:F9 1 true service 172.30.3.0/24 6ea2684c-115e-48a6-8453-ab52d1cecd73 ntp service vNIC ``` (This command immediately revealed issues with the slot number recording on dogfood, which led to opening #5056.) --- dev-tools/omdb/src/bin/omdb/db.rs | 317 ++++++++++++++++++++++---- dev-tools/omdb/tests/usage_errors.out | 5 +- 2 files changed, 270 insertions(+), 52 deletions(-) diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 9c41c25cc0..a51deacaf4 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -32,8 +32,10 @@ use diesel::BoolExpressionMethods; use diesel::ExpressionMethods; use diesel::JoinOnDsl; use diesel::NullableExpressionMethods; +use diesel::OptionalExtension; use diesel::TextExpressionMethods; use gateway_client::types::SpType; +use ipnetwork::IpNetwork; use nexus_db_model::Dataset; use nexus_db_model::Disk; use nexus_db_model::DnsGroup; @@ -45,6 +47,9 @@ use nexus_db_model::HwBaseboardId; use nexus_db_model::Instance; use nexus_db_model::InvCollection; use nexus_db_model::IpAttachState; +use nexus_db_model::IpKind; +use nexus_db_model::NetworkInterface; +use nexus_db_model::NetworkInterfaceKind; use nexus_db_model::Project; use nexus_db_model::Region; use nexus_db_model::RegionSnapshot; @@ -55,6 +60,7 @@ use nexus_db_model::SwCaboose; use nexus_db_model::SwRotPage; use nexus_db_model::Vmm; use nexus_db_model::Volume; +use nexus_db_model::VpcSubnet; use nexus_db_model::Zpool; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; @@ -75,6 +81,7 @@ use nexus_types::inventory::Collection; use nexus_types::inventory::RotPageWhich; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Generation; +use omicron_common::api::external::MacAddr; use omicron_common::postgres_config::PostgresConfigWithUrl; use sled_agent_client::types::VolumeConstructionRequest; use std::borrow::Cow; @@ -331,6 +338,8 @@ struct NetworkArgs { enum NetworkCommands { /// List external IPs ListEips, + /// List virtual network interfaces + ListVnics, } #[derive(Debug, Args)] @@ -488,6 +497,17 @@ impl DbArgs { cmd_db_eips(&opctx, &datastore, &self.fetch_opts, *verbose) .await } + DbCommands::Network(NetworkArgs { + command: NetworkCommands::ListVnics, + verbose, + }) => { + cmd_db_network_list_vnics( + &datastore, + &self.fetch_opts, + *verbose, + ) + .await + } DbCommands::Snapshots(SnapshotArgs { command: SnapshotCommands::Info(uuid), }) => cmd_db_snapshot_info(&opctx, &datastore, uuid).await, @@ -582,6 +602,42 @@ fn first_page<'a, T>(limit: NonZeroU32) -> DataPageParams<'a, T> { } } +/// Helper function to looks up an instance with the given ID. +async fn lookup_instance( + datastore: &DataStore, + instance_id: Uuid, +) -> anyhow::Result> { + use db::schema::instance::dsl; + + let conn = datastore.pool_connection_for_tests().await?; + dsl::instance + .filter(dsl::id.eq(instance_id)) + .limit(1) + .select(Instance::as_select()) + .get_result_async(&*conn) + .await + .optional() + .with_context(|| format!("loading instance {instance_id}")) +} + +/// Helper function to looks up a project with the given ID. +async fn lookup_project( + datastore: &DataStore, + project_id: Uuid, +) -> anyhow::Result> { + use db::schema::project::dsl; + + let conn = datastore.pool_connection_for_tests().await?; + dsl::project + .filter(dsl::id.eq(project_id)) + .limit(1) + .select(Project::as_select()) + .get_result_async(&*conn) + .await + .optional() + .with_context(|| format!("loading project {project_id}")) +} + // Disks /// Run `omdb db disk list`. @@ -1743,32 +1799,54 @@ async fn cmd_db_eips( } } - #[derive(Tabled)] enum Owner { - Instance { project: String, name: String }, - Service { kind: String }, + Instance { id: Uuid, project: String, name: String }, + Service { id: Uuid, kind: String }, + Project { id: Uuid, name: String }, None, } - impl Display for Owner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + impl Owner { + fn kind(&self) -> &'static str { + match self { + Owner::Instance { .. } => "instance", + Owner::Service { .. } => "service", + Owner::Project { .. } => "project", + Owner::None => "none", + } + } + + fn id(&self) -> String { + match self { + Owner::Instance { id, .. } + | Owner::Service { id, .. } + | Owner::Project { id, .. } => id.to_string(), + Owner::None => "none".to_string(), + } + } + + fn name(&self) -> String { match self { - Self::Instance { project, name } => { - write!(f, "Instance {project}/{name}") + Self::Instance { project, name, .. } => { + format!("{project}/{name}") } - Self::Service { kind } => write!(f, "Service {kind}"), - Self::None => write!(f, "None"), + Self::Service { kind, .. } => kind.to_string(), + Self::Project { name, .. } => name.to_string(), + Self::None => "none".to_string(), } } } #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] struct IpRow { ip: ipnetwork::IpNetwork, ports: PortRange, - kind: String, + kind: IpKind, state: IpAttachState, - owner: Owner, + owner_kind: &'static str, + owner_id: String, + owner_name: String, } if verbose { @@ -1798,50 +1876,58 @@ async fn cmd_db_eips( continue; } }; - Owner::Service { kind: format!("{:?}", service.1.kind) } + Owner::Service { + id: owner_id, + kind: format!("{:?}", service.1.kind), + } } else { - use db::schema::instance::dsl as instance_dsl; - let instance = match instance_dsl::instance - .filter(instance_dsl::id.eq(owner_id)) - .limit(1) - .select(Instance::as_select()) - .load_async(&*datastore.pool_connection_for_tests().await?) - .await - .context("loading requested instance")? - .pop() - { - Some(instance) => instance, - None => { - eprintln!("instance with id {owner_id} not found"); - continue; - } - }; - - use db::schema::project::dsl as project_dsl; - let project = match project_dsl::project - .filter(project_dsl::id.eq(instance.project_id)) - .limit(1) - .select(Project::as_select()) - .load_async(&*datastore.pool_connection_for_tests().await?) - .await - .context("loading requested project")? - .pop() - { - Some(instance) => instance, - None => { - eprintln!( - "project with id {} not found", - instance.project_id - ); - continue; - } - }; + let instance = + match lookup_instance(datastore, owner_id).await? { + Some(instance) => instance, + None => { + eprintln!("instance with id {owner_id} not found"); + continue; + } + }; + + let project = + match lookup_project(datastore, instance.project_id).await? + { + Some(project) => project, + None => { + eprintln!( + "project with id {} not found", + instance.project_id + ); + continue; + } + }; Owner::Instance { + id: owner_id, project: project.name().to_string(), name: instance.name().to_string(), } } + } else if let Some(project_id) = ip.project_id { + use db::schema::project::dsl as project_dsl; + let project = match project_dsl::project + .filter(project_dsl::id.eq(project_id)) + .limit(1) + .select(Project::as_select()) + .load_async(&*datastore.pool_connection_for_tests().await?) + .await + .context("loading requested project")? + .pop() + { + Some(project) => project, + None => { + eprintln!("project with id {} not found", project_id); + continue; + } + }; + + Owner::Project { id: project_id, name: project.name().to_string() } } else { Owner::None }; @@ -1853,8 +1939,139 @@ async fn cmd_db_eips( last: ip.last_port.into(), }, state: ip.state, - kind: format!("{:?}", ip.kind), - owner, + kind: ip.kind, + owner_kind: owner.kind(), + owner_id: owner.id(), + owner_name: owner.name(), + }; + rows.push(row); + } + + rows.sort_by(|a, b| a.ip.cmp(&b.ip)); + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .to_string(); + + println!("{}", table); + + Ok(()) +} + +async fn cmd_db_network_list_vnics( + datastore: &DataStore, + fetch_opts: &DbFetchOptions, + verbose: bool, +) -> Result<(), anyhow::Error> { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct NicRow { + ip: IpNetwork, + mac: MacAddr, + slot: i16, + primary: bool, + kind: &'static str, + subnet: String, + parent_id: Uuid, + parent_name: String, + } + use db::schema::network_interface::dsl; + let mut query = dsl::network_interface.into_boxed(); + if !fetch_opts.include_deleted { + query = query.filter(dsl::time_deleted.is_null()); + } + + let nics: Vec = query + .select(NetworkInterface::as_select()) + .limit(i64::from(u32::from(fetch_opts.fetch_limit))) + .get_results_async(&*datastore.pool_connection_for_tests().await?) + .await?; + + check_limit(&nics, fetch_opts.fetch_limit, || { + String::from("listing network interfaces") + }); + + if verbose { + for nic in &nics { + if verbose { + println!("{nic:#?}"); + } + } + return Ok(()); + } + + let mut rows = Vec::new(); + + for nic in &nics { + let (kind, parent_name) = match nic.kind { + NetworkInterfaceKind::Instance => { + match lookup_instance(datastore, nic.parent_id).await? { + Some(instance) => { + match lookup_project(datastore, instance.project_id) + .await? + { + Some(project) => ( + "instance", + format!( + "{}/{}", + project.name(), + instance.name() + ), + ), + None => { + eprintln!( + "project with id {} not found", + instance.project_id + ); + continue; + } + } + } + None => { + ("instance?", "parent instance not found".to_string()) + } + } + } + NetworkInterfaceKind::Service => { + // We create service NICs named after the service, so we can use + // the nic name instead of looking up the service. + ("service", nic.name().to_string()) + } + }; + + let subnet = { + use db::schema::vpc_subnet::dsl; + let subnet = match dsl::vpc_subnet + .filter(dsl::id.eq(nic.subnet_id)) + .limit(1) + .select(VpcSubnet::as_select()) + .load_async(&*datastore.pool_connection_for_tests().await?) + .await + .context("loading requested subnet")? + .pop() + { + Some(subnet) => subnet, + None => { + eprintln!("subnet with id {} not found", nic.subnet_id); + continue; + } + }; + + if nic.ip.is_ipv4() { + subnet.ipv4_block.to_string() + } else { + subnet.ipv6_block.to_string() + } + }; + + let row = NicRow { + ip: nic.ip, + mac: *nic.mac, + slot: nic.slot, + primary: nic.primary, + kind, + subnet, + parent_id: nic.parent_id, + parent_name, }; rows.push(row); } diff --git a/dev-tools/omdb/tests/usage_errors.out b/dev-tools/omdb/tests/usage_errors.out index bb7da1be57..c10f95e23d 100644 --- a/dev-tools/omdb/tests/usage_errors.out +++ b/dev-tools/omdb/tests/usage_errors.out @@ -254,8 +254,9 @@ Print information about the network Usage: omdb db network [OPTIONS] Commands: - list-eips List external IPs - help Print this message or the help of the given subcommand(s) + list-eips List external IPs + list-vnics List virtual network interfaces + help Print this message or the help of the given subcommand(s) Options: --verbose Print out raw data structures from the data store From e4ba67bfece71a417201d844f89e3ff23d5176c8 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Thu, 15 Feb 2024 14:02:32 -0800 Subject: [PATCH 003/157] chore(deps): update rust crate num-integer to 0.1.46 (#5076) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 5 ++--- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 4 ++-- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2f13d1e990..63d72bc117 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4699,11 +4699,10 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] diff --git a/Cargo.toml b/Cargo.toml index 3dae9211e3..e5fa364305 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -264,7 +264,7 @@ nexus-test-interface = { path = "nexus/test-interface" } nexus-test-utils-macros = { path = "nexus/test-utils-macros" } nexus-test-utils = { path = "nexus/test-utils" } nexus-types = { path = "nexus/types" } -num-integer = "0.1.45" +num-integer = "0.1.46" num = { version = "0.4.1", default-features = false, features = [ "libm" ] } omicron-common = { path = "common" } omicron-gateway = { path = "gateway" } diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index a52f63fec1..d72b63356b 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -71,7 +71,7 @@ managed = { version = "0.8.0", default-features = false, features = ["alloc", "m memchr = { version = "2.6.3" } nom = { version = "7.1.3" } num-bigint = { version = "0.4.4", features = ["rand"] } -num-integer = { version = "0.1.45", features = ["i128"] } +num-integer = { version = "0.1.46", features = ["i128"] } num-iter = { version = "0.1.43", default-features = false, features = ["i128"] } num-traits = { version = "0.2.16", features = ["i128", "libm"] } openapiv3 = { version = "2.0.0", default-features = false, features = ["skip_serializing_defaults"] } @@ -178,7 +178,7 @@ managed = { version = "0.8.0", default-features = false, features = ["alloc", "m memchr = { version = "2.6.3" } nom = { version = "7.1.3" } num-bigint = { version = "0.4.4", features = ["rand"] } -num-integer = { version = "0.1.45", features = ["i128"] } +num-integer = { version = "0.1.46", features = ["i128"] } num-iter = { version = "0.1.43", default-features = false, features = ["i128"] } num-traits = { version = "0.2.16", features = ["i128", "libm"] } openapiv3 = { version = "2.0.0", default-features = false, features = ["skip_serializing_defaults"] } From eda05500978621d28df6e15cb50549abed9a60ca Mon Sep 17 00:00:00 2001 From: bnaecker Date: Thu, 15 Feb 2024 14:43:47 -0800 Subject: [PATCH 004/157] Update progenitor to bc0bb4b (#5071) --- Cargo.lock | 61 +++++++++++---------------- clients/dns-service-client/src/lib.rs | 3 +- common/src/api/external/error.rs | 3 +- sled-agent/src/instance.rs | 3 +- workspace-hack/Cargo.toml | 6 +-- 5 files changed, 33 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 63d72bc117..e82fd6a60f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -655,7 +655,7 @@ dependencies = [ "omicron-common", "omicron-workspace-hack", "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", - "regress 0.8.0", + "regress", "reqwest", "schemars", "serde", @@ -1946,7 +1946,7 @@ dependencies = [ "progenitor-client 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", "quote", "rand 0.8.5", - "regress 0.8.0", + "regress", "reqwest", "rustfmt-wrapper", "schemars", @@ -3368,7 +3368,7 @@ dependencies = [ "opte-ioctl", "oxide-vpc", "oxlog", - "regress 0.8.0", + "regress", "schemars", "serde", "serde_json", @@ -3504,7 +3504,7 @@ dependencies = [ "installinator-common", "omicron-workspace-hack", "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", - "regress 0.8.0", + "regress", "reqwest", "schemars", "serde", @@ -4302,7 +4302,7 @@ dependencies = [ "omicron-passwords", "omicron-workspace-hack", "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", - "regress 0.8.0", + "regress", "reqwest", "schemars", "serde", @@ -4871,7 +4871,7 @@ dependencies = [ "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", "proptest", "rand 0.8.5", - "regress 0.8.0", + "regress", "reqwest", "schemars", "semver 1.0.21", @@ -5365,7 +5365,6 @@ dependencies = [ "generic-array", "getrandom 0.2.10", "group", - "hashbrown 0.13.2", "hashbrown 0.14.3", "hex", "hmac", @@ -5647,7 +5646,7 @@ dependencies = [ "omicron-workspace-hack", "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", "rand 0.8.5", - "regress 0.8.0", + "regress", "reqwest", "serde", "serde_json", @@ -6500,7 +6499,7 @@ dependencies = [ [[package]] name = "progenitor" version = "0.5.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#86b60220b88a2ca3629fb87acf8f83ff35f63aaa" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#bc0bb4b0fb40084f189eb1a8807b17fbd0ce0b64" dependencies = [ "progenitor-client 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", "progenitor-impl 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", @@ -6511,7 +6510,7 @@ dependencies = [ [[package]] name = "progenitor" version = "0.5.0" -source = "git+https://github.com/oxidecomputer/progenitor#86b60220b88a2ca3629fb87acf8f83ff35f63aaa" +source = "git+https://github.com/oxidecomputer/progenitor#bc0bb4b0fb40084f189eb1a8807b17fbd0ce0b64" dependencies = [ "progenitor-client 0.5.0 (git+https://github.com/oxidecomputer/progenitor)", "progenitor-impl 0.5.0 (git+https://github.com/oxidecomputer/progenitor)", @@ -6522,7 +6521,7 @@ dependencies = [ [[package]] name = "progenitor-client" version = "0.5.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#86b60220b88a2ca3629fb87acf8f83ff35f63aaa" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#bc0bb4b0fb40084f189eb1a8807b17fbd0ce0b64" dependencies = [ "bytes", "futures-core", @@ -6536,7 +6535,7 @@ dependencies = [ [[package]] name = "progenitor-client" version = "0.5.0" -source = "git+https://github.com/oxidecomputer/progenitor#86b60220b88a2ca3629fb87acf8f83ff35f63aaa" +source = "git+https://github.com/oxidecomputer/progenitor#bc0bb4b0fb40084f189eb1a8807b17fbd0ce0b64" dependencies = [ "bytes", "futures-core", @@ -6550,7 +6549,7 @@ dependencies = [ [[package]] name = "progenitor-impl" version = "0.5.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#86b60220b88a2ca3629fb87acf8f83ff35f63aaa" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#bc0bb4b0fb40084f189eb1a8807b17fbd0ce0b64" dependencies = [ "getopts", "heck 0.4.1", @@ -6572,7 +6571,7 @@ dependencies = [ [[package]] name = "progenitor-impl" version = "0.5.0" -source = "git+https://github.com/oxidecomputer/progenitor#86b60220b88a2ca3629fb87acf8f83ff35f63aaa" +source = "git+https://github.com/oxidecomputer/progenitor#bc0bb4b0fb40084f189eb1a8807b17fbd0ce0b64" dependencies = [ "getopts", "heck 0.4.1", @@ -6594,7 +6593,7 @@ dependencies = [ [[package]] name = "progenitor-macro" version = "0.5.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#86b60220b88a2ca3629fb87acf8f83ff35f63aaa" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#bc0bb4b0fb40084f189eb1a8807b17fbd0ce0b64" dependencies = [ "openapiv3", "proc-macro2", @@ -6611,7 +6610,7 @@ dependencies = [ [[package]] name = "progenitor-macro" version = "0.5.0" -source = "git+https://github.com/oxidecomputer/progenitor#86b60220b88a2ca3629fb87acf8f83ff35f63aaa" +source = "git+https://github.com/oxidecomputer/progenitor#bc0bb4b0fb40084f189eb1a8807b17fbd0ce0b64" dependencies = [ "openapiv3", "proc-macro2", @@ -7051,16 +7050,6 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" -[[package]] -name = "regress" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ed9969cad8051328011596bf549629f1b800cf1731e7964b1eef8dfc480d2c2" -dependencies = [ - "hashbrown 0.13.2", - "memchr", -] - [[package]] name = "regress" version = "0.8.0" @@ -8146,7 +8135,7 @@ dependencies = [ "omicron-common", "omicron-workspace-hack", "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", - "regress 0.8.0", + "regress", "reqwest", "schemars", "serde", @@ -9009,18 +8998,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" +checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" +checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", @@ -9716,7 +9705,7 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "typify" version = "0.0.15" -source = "git+https://github.com/oxidecomputer/typify#1f97f167923f001818d461b1286f8a5242abf8b1" +source = "git+https://github.com/oxidecomputer/typify#ce009d6f83b620cbd0e3acdd9b9ea071018471d8" dependencies = [ "typify-impl", "typify-macro", @@ -9725,13 +9714,13 @@ dependencies = [ [[package]] name = "typify-impl" version = "0.0.15" -source = "git+https://github.com/oxidecomputer/typify#1f97f167923f001818d461b1286f8a5242abf8b1" +source = "git+https://github.com/oxidecomputer/typify#ce009d6f83b620cbd0e3acdd9b9ea071018471d8" dependencies = [ "heck 0.4.1", "log", "proc-macro2", "quote", - "regress 0.7.1", + "regress", "schemars", "serde_json", "syn 2.0.48", @@ -9742,7 +9731,7 @@ dependencies = [ [[package]] name = "typify-macro" version = "0.0.15" -source = "git+https://github.com/oxidecomputer/typify#1f97f167923f001818d461b1286f8a5242abf8b1" +source = "git+https://github.com/oxidecomputer/typify#ce009d6f83b620cbd0e3acdd9b9ea071018471d8" dependencies = [ "proc-macro2", "quote", @@ -10466,7 +10455,7 @@ dependencies = [ "ipnetwork", "omicron-workspace-hack", "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", - "regress 0.8.0", + "regress", "reqwest", "schemars", "serde", diff --git a/clients/dns-service-client/src/lib.rs b/clients/dns-service-client/src/lib.rs index e437f1a7f6..52c2b8bcd2 100644 --- a/clients/dns-service-client/src/lib.rs +++ b/clients/dns-service-client/src/lib.rs @@ -32,7 +32,8 @@ pub fn is_retryable(error: &DnsConfigError) -> bool { | DnsConfigError::InvalidResponsePayload(_, _) | DnsConfigError::UnexpectedResponse(_) | DnsConfigError::InvalidUpgrade(_) - | DnsConfigError::ResponseBodyError(_) => return false, + | DnsConfigError::ResponseBodyError(_) + | DnsConfigError::PreHookError(_) => return false, DnsConfigError::ErrorResponse(response_value) => response_value, }; diff --git a/common/src/api/external/error.rs b/common/src/api/external/error.rs index d2e062f2e1..f7eb257e8f 100644 --- a/common/src/api/external/error.rs +++ b/common/src/api/external/error.rs @@ -496,7 +496,8 @@ impl From> for Error { ) | progenitor::progenitor_client::Error::UnexpectedResponse(_) | progenitor::progenitor_client::Error::InvalidUpgrade(_) - | progenitor::progenitor_client::Error::ResponseBodyError(_) => { + | progenitor::progenitor_client::Error::ResponseBodyError(_) + | progenitor::progenitor_client::Error::PreHookError(_) => { Error::internal_error(&e.to_string()) } // This error represents an expected error from the remote service. diff --git a/sled-agent/src/instance.rs b/sled-agent/src/instance.rs index 5b9bf54dd9..7a6033b4bb 100644 --- a/sled-agent/src/instance.rs +++ b/sled-agent/src/instance.rs @@ -504,7 +504,8 @@ impl InstanceRunner { | nexus_client::Error::InvalidResponsePayload(..) | nexus_client::Error::UnexpectedResponse(_) | nexus_client::Error::InvalidUpgrade(_) - | nexus_client::Error::ResponseBodyError(_) => { + | nexus_client::Error::ResponseBodyError(_) + | nexus_client::Error::PreHookError(_) => { BackoffError::permanent(Error::Notification( err, )) diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index d72b63356b..098be26460 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -54,8 +54,7 @@ gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway- generic-array = { version = "0.14.7", default-features = false, features = ["more_lengths", "zeroize"] } getrandom = { version = "0.2.10", default-features = false, features = ["js", "rdrand", "std"] } group = { version = "0.13.0", default-features = false, features = ["alloc"] } -hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14.3", features = ["raw"] } -hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13.2" } +hashbrown = { version = "0.14.3", features = ["raw"] } hex = { version = "0.4.3", features = ["serde"] } hmac = { version = "0.12.1", default-features = false, features = ["reset"] } hyper = { version = "0.14.27", features = ["full"] } @@ -161,8 +160,7 @@ gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway- generic-array = { version = "0.14.7", default-features = false, features = ["more_lengths", "zeroize"] } getrandom = { version = "0.2.10", default-features = false, features = ["js", "rdrand", "std"] } group = { version = "0.13.0", default-features = false, features = ["alloc"] } -hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14.3", features = ["raw"] } -hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13.2" } +hashbrown = { version = "0.14.3", features = ["raw"] } hex = { version = "0.4.3", features = ["serde"] } hmac = { version = "0.12.1", default-features = false, features = ["reset"] } hyper = { version = "0.14.27", features = ["full"] } From fedd8c87ccbf17af6251433a122652cf0962464e Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Thu, 15 Feb 2024 23:06:32 +0000 Subject: [PATCH 005/157] chore(deps): update rust crate serde_with to 3.6.1 (#5077) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 9 +++++---- Cargo.toml | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e82fd6a60f..c5f0192dbd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7931,9 +7931,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.6.0" +version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b0ed1662c5a68664f45b76d18deb0e234aff37207086803165c961eb695e981" +checksum = "15d167997bd841ec232f5b2b8e0e26606df2e7caa4c31b95ea9ca52b200bd270" dependencies = [ "base64", "chrono", @@ -7941,6 +7941,7 @@ dependencies = [ "indexmap 1.9.3", "indexmap 2.2.3", "serde", + "serde_derive", "serde_json", "serde_with_macros", "time", @@ -7948,9 +7949,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.6.0" +version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "568577ff0ef47b879f736cd66740e022f3672788cdf002a05a4e609ea5a6fb15" +checksum = "865f9743393e638991566a8b7a479043c2c8da94a33e0a31f18214c9cae0a64d" dependencies = [ "darling 0.20.3", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index e5fa364305..bf9ebd9354 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -341,7 +341,7 @@ serde_json = "1.0.113" serde_path_to_error = "0.1.15" serde_tokenstream = "0.2" serde_urlencoded = "0.7.1" -serde_with = "3.6.0" +serde_with = "3.6.1" sha2 = "0.10.8" sha3 = "0.10.8" shell-words = "1.1.0" From 1f48138840644e2573800b8f52dd00af35acc179 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Fri, 16 Feb 2024 01:32:15 +0000 Subject: [PATCH 006/157] chore(deps): update actions/setup-node action to v4.0.2 (#5075) --- .github/workflows/validate-openapi-spec.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/validate-openapi-spec.yml b/.github/workflows/validate-openapi-spec.yml index a76567af2a..50aba2dc53 100644 --- a/.github/workflows/validate-openapi-spec.yml +++ b/.github/workflows/validate-openapi-spec.yml @@ -13,7 +13,7 @@ jobs: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - - uses: actions/setup-node@b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8 # v4.0.1 + - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 with: node-version: '18' - name: Install our tools From 235420ebd04831829ff3b2ce89fcd26f38c768f5 Mon Sep 17 00:00:00 2001 From: Rain Date: Thu, 15 Feb 2024 20:58:31 -0800 Subject: [PATCH 007/157] [nexus] basic scaffolding for typed UUIDs + a couple of simple examples (#4934) This PR contains a basic implementation of typed UUIDs using the [newtype-uuid](https://crates.io/crates/newtype-uuid) crate, plus a simple conversion of two UUID types to this model: `LoopbackAddress` and `TufRepo`. I picked these two types based on the fact that there were just a few places that used them. --- .config/hakari.toml | 11 +- Cargo.lock | 36 +++ Cargo.toml | 28 +++ common/Cargo.toml | 1 + common/src/api/external/error.rs | 6 + nexus/authz-macros/Cargo.toml | 1 + nexus/authz-macros/outputs/rack.txt | 75 ++++++ nexus/authz-macros/src/lib.rs | 114 ++++++++- nexus/db-macros/Cargo.toml | 1 + .../outputs/asset_with_uuid_kind.txt | 49 ++++ nexus/db-macros/outputs/project.txt | 6 +- .../outputs/resource_with_uuid_kind.txt | 66 +++++ nexus/db-macros/outputs/silo_user.txt | 6 +- nexus/db-macros/outputs/sled.txt | 182 ++++++++++++++ nexus/db-macros/src/lib.rs | 229 +++++++++++++++--- nexus/db-macros/src/lookup.rs | 111 +++++++-- nexus/db-macros/src/test_helpers.rs | 10 + nexus/db-model/Cargo.toml | 2 + nexus/db-model/src/lib.rs | 2 + nexus/db-model/src/switch_interface.rs | 7 +- nexus/db-model/src/tuf_repo.rs | 11 +- nexus/db-model/src/typed_uuid.rs | 116 +++++++++ nexus/db-queries/Cargo.toml | 3 +- nexus/db-queries/src/authz/api_resources.rs | 4 +- .../src/authz/policy_test/resources.rs | 5 +- .../src/db/datastore/switch_interface.rs | 9 +- nexus/db-queries/src/db/datastore/update.rs | 15 +- nexus/db-queries/src/db/lookup.rs | 6 +- nexus/macros-common/Cargo.toml | 11 + nexus/macros-common/src/lib.rs | 109 +++++++++ nexus/types/Cargo.toml | 1 + nexus/types/src/identity.rs | 13 +- uuid-kinds/Cargo.toml | 20 ++ uuid-kinds/README.adoc | 73 ++++++ uuid-kinds/src/lib.rs | 50 ++++ 35 files changed, 1297 insertions(+), 92 deletions(-) create mode 100644 nexus/authz-macros/outputs/rack.txt create mode 100644 nexus/db-macros/outputs/asset_with_uuid_kind.txt create mode 100644 nexus/db-macros/outputs/resource_with_uuid_kind.txt create mode 100644 nexus/db-macros/outputs/sled.txt create mode 100644 nexus/db-macros/src/test_helpers.rs create mode 100644 nexus/db-model/src/typed_uuid.rs create mode 100644 nexus/macros-common/Cargo.toml create mode 100644 nexus/macros-common/src/lib.rs create mode 100644 uuid-kinds/Cargo.toml create mode 100644 uuid-kinds/README.adoc create mode 100644 uuid-kinds/src/lib.rs diff --git a/.config/hakari.toml b/.config/hakari.toml index 0d883dc6f6..3c08a89e12 100644 --- a/.config/hakari.toml +++ b/.config/hakari.toml @@ -30,4 +30,13 @@ platforms = [ exact-versions = true [traversal-excludes] -workspace-members = ["xtask"] +workspace-members = [ + # Exclude xtask because it needs to be built quickly. + "xtask", + + # Exclude omicron-uuid-kinds because it is a no-std crate. Depending on the + # workspace-hack isn't too problematic because other projects pulling in + # omicron as a git dependency will only see an empty workspace-hack. But + # let's make this explicit. + "omicron-uuid-kinds", +] diff --git a/Cargo.lock b/Cargo.lock index c5f0192dbd..fca461f24b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -354,6 +354,7 @@ version = "0.1.0" dependencies = [ "expectorate", "heck 0.4.1", + "nexus-macros-common", "omicron-workspace-hack", "prettyplease", "proc-macro2", @@ -1548,6 +1549,7 @@ version = "0.1.0" dependencies = [ "expectorate", "heck 0.4.1", + "nexus-macros-common", "omicron-workspace-hack", "prettyplease", "proc-macro2", @@ -4256,6 +4258,17 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "newtype-uuid" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a5ff2b31594942586c1520da8f1e5c705729ec67b3c2ad0fe459f0b576e4d9a" +dependencies = [ + "schemars", + "serde", + "uuid", +] + [[package]] name = "newtype_derive" version = "0.1.6" @@ -4318,6 +4331,7 @@ dependencies = [ "anyhow", "chrono", "db-macros", + "derive-where", "diesel", "expectorate", "hex", @@ -4330,6 +4344,7 @@ dependencies = [ "omicron-common", "omicron-passwords", "omicron-rpaths", + "omicron-uuid-kinds", "omicron-workspace-hack", "parse-display", "pq-sys", @@ -4388,6 +4403,7 @@ dependencies = [ "omicron-rpaths", "omicron-sled-agent", "omicron-test-utils", + "omicron-uuid-kinds", "omicron-workspace-hack", "once_cell", "openapiv3", @@ -4482,6 +4498,16 @@ dependencies = [ "uuid", ] +[[package]] +name = "nexus-macros-common" +version = "0.1.0" +dependencies = [ + "omicron-workspace-hack", + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "nexus-test-interface" version = "0.1.0" @@ -4557,6 +4583,7 @@ dependencies = [ "gateway-client", "omicron-common", "omicron-passwords", + "omicron-uuid-kinds", "omicron-workspace-hack", "openssl", "parse-display", @@ -4865,6 +4892,7 @@ dependencies = [ "ipnetwork", "libc", "macaddr", + "omicron-uuid-kinds", "omicron-workspace-hack", "once_cell", "parse-display", @@ -5319,6 +5347,14 @@ dependencies = [ "walkdir", ] +[[package]] +name = "omicron-uuid-kinds" +version = "0.1.0" +dependencies = [ + "newtype-uuid", + "schemars", +] + [[package]] name = "omicron-workspace-hack" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index bf9ebd9354..f71bd77730 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,6 +44,7 @@ members = [ "nexus/defaults", "nexus/deployment", "nexus/inventory", + "nexus/macros-common", "nexus/test-interface", "nexus/test-utils-macros", "nexus/test-utils", @@ -66,6 +67,7 @@ members = [ "tufaceous", "update-common", "update-engine", + "uuid-kinds", "wicket-common", "wicket-dbg", "wicket", @@ -113,6 +115,7 @@ default-members = [ "nexus", "nexus/authz-macros", "nexus/blueprint-execution", + "nexus/macros-common", "nexus/db-macros", "nexus/db-model", "nexus/db-queries", @@ -138,6 +141,7 @@ default-members = [ "tufaceous", "update-common", "update-engine", + "uuid-kinds", "wicket-common", "wicket-dbg", "wicket", @@ -256,6 +260,7 @@ nexus-db-queries = { path = "nexus/db-queries" } nexus-defaults = { path = "nexus/defaults" } nexus-deployment = { path = "nexus/deployment" } nexus-inventory = { path = "nexus/inventory" } +nexus-macros-common = { path = "nexus/macros-common" } omicron-certificates = { path = "certificates" } omicron-passwords = { path = "passwords" } omicron-workspace-hack = "0.1.0" @@ -415,6 +420,14 @@ zeroize = { version = "1.7.0", features = ["zeroize_derive", "std"] } zip = { version = "0.6.6", default-features = false, features = ["deflate","bzip2"] } zone = { version = "0.3", default-features = false, features = ["async", "sync"] } +# newtype-uuid is set to default-features = false because we don't want to +# depend on std in omicron-uuid-kinds (in case a no-std library wants to access +# the kinds). However, uses of omicron-uuid-kinds _within omicron_ will have +# std and the other features enabled because they'll refer to it via +# omicron-uuid-kinds.workspace = true. +newtype-uuid = { version = "1.0.1", default-features = false } +omicron-uuid-kinds = { path = "uuid-kinds", features = ["serde", "schemars08", "uuid-v4"] } + # NOTE: The test profile inherits from the dev profile, so settings under # profile.dev get inherited. AVOID setting anything under profile.test: that # will cause dev and test builds to diverge, which will cause more Cargo build @@ -570,6 +583,7 @@ opt-level = 3 [profile.dev.package.keccak] opt-level = 3 + # # It's common during development to use a local copy of various complex # dependencies. If you want to use those, uncomment one of these blocks. @@ -617,3 +631,17 @@ path = "workspace-hack" [patch.crates-io.samael] git = "https://github.com/oxidecomputer/samael" branch = "oxide/omicron" + +# Several crates such as crucible and propolis have have a Git dependency on +# this repo. Omicron itself depends on these crates, which can lead to two +# copies of these crates in the dependency graph. (As a Git dependency, and as +# a path dependency.) The goal of omicron-uuid-kinds is to provide a unified +# registry of UUID kinds. Two copies of the same kinds floating around is +# unnecessary and painful. +# +# This directive ensures that whenever we see omicron-uuid-kinds as a Git +# dependency, we'll use the path dependency version of the crate instead. +# +# See also: uuid-kinds/README.adoc. +[patch."https://github.com/oxidecomputer/omicron"] +omicron-uuid-kinds = { path = "uuid-kinds" } diff --git a/common/Cargo.toml b/common/Cargo.toml index ebb8c8c9b4..5628a93397 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -17,6 +17,7 @@ hex.workspace = true http.workspace = true ipnetwork.workspace = true macaddr.workspace = true +omicron-uuid-kinds.workspace = true proptest = { workspace = true, optional = true } rand.workspace = true reqwest = { workspace = true, features = ["rustls-tls", "stream"] } diff --git a/common/src/api/external/error.rs b/common/src/api/external/error.rs index f7eb257e8f..2cb3dc0d6e 100644 --- a/common/src/api/external/error.rs +++ b/common/src/api/external/error.rs @@ -9,6 +9,7 @@ use crate::api::external::Name; use crate::api::external::ResourceType; use dropshot::HttpError; +use omicron_uuid_kinds::GenericUuid; use serde::Deserialize; use serde::Serialize; use std::fmt::Display; @@ -152,6 +153,11 @@ pub enum LookupType { } impl LookupType { + /// Constructs a `ById` lookup type from a typed or untyped UUID. + pub fn by_id(id: T) -> Self { + LookupType::ById(id.into_untyped_uuid()) + } + /// Returns an ObjectNotFound error appropriate for the case where this /// lookup failed pub fn into_not_found(self, type_name: ResourceType) -> Error { diff --git a/nexus/authz-macros/Cargo.toml b/nexus/authz-macros/Cargo.toml index e9bdaf4708..4d2640abee 100644 --- a/nexus/authz-macros/Cargo.toml +++ b/nexus/authz-macros/Cargo.toml @@ -9,6 +9,7 @@ proc-macro = true [dependencies] heck.workspace = true +nexus-macros-common.workspace = true proc-macro2.workspace = true quote.workspace = true serde.workspace = true diff --git a/nexus/authz-macros/outputs/rack.txt b/nexus/authz-macros/outputs/rack.txt new file mode 100644 index 0000000000..40826951ee --- /dev/null +++ b/nexus/authz-macros/outputs/rack.txt @@ -0,0 +1,75 @@ +///`authz` type for a resource of type RackUsed to uniquely identify a resource of type Rack across renames, moves, etc., and to do authorization checks (see [`crate::context::OpContext::authorize()`]). See [`crate::authz`] module-level documentation for more information. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct Rack { + parent: Fleet, + key: ::omicron_uuid_kinds::TypedUuid<::omicron_uuid_kinds::RackKind>, + lookup_type: LookupType, +} +impl Rack { + /// Makes a new `authz` struct for this resource with the given + /// `parent`, unique key `key`, looked up as described by + /// `lookup_type` + pub fn new( + parent: Fleet, + key: ::omicron_uuid_kinds::TypedUuid<::omicron_uuid_kinds::RackKind>, + lookup_type: LookupType, + ) -> Rack { + Rack { + parent, + key: key.into(), + lookup_type, + } + } + /// A version of `new` that takes the primary key type directly. + /// This is only different from [`Self::new`] if this resource + /// uses a different input key type. + pub fn with_primary_key( + parent: Fleet, + key: ::omicron_uuid_kinds::TypedUuid<::omicron_uuid_kinds::RackKind>, + lookup_type: LookupType, + ) -> Rack { + Rack { parent, key, lookup_type } + } + pub fn id(&self) -> ::omicron_uuid_kinds::TypedUuid<::omicron_uuid_kinds::RackKind> { + self.key.clone().into() + } + /// Describes how to register this type with Oso + pub(super) fn init() -> Init { + use oso::PolarClass; + Init { + polar_snippet: "\n resource Rack {\n permissions = [\n \"list_children\",\n \"modify\",\n \"read\",\n \"create_child\",\n ];\n \n relations = { parent_fleet: Fleet };\n \"list_children\" if \"viewer\" on \"parent_fleet\";\n \"read\" if \"viewer\" on \"parent_fleet\";\n \"modify\" if \"admin\" on \"parent_fleet\";\n \"create_child\" if \"admin\" on \"parent_fleet\";\n }\n has_relation(fleet: Fleet, \"parent_fleet\", child: Rack)\n if child.fleet = fleet;\n ", + polar_class: Rack::get_polar_class(), + } + } +} +impl Eq for Rack {} +impl PartialEq for Rack { + fn eq(&self, other: &Self) -> bool { + self.key == other.key + } +} +impl oso::PolarClass for Rack { + fn get_polar_class_builder() -> oso::ClassBuilder { + oso::Class::builder() + .with_equality_check() + .add_method( + "has_role", + |r: &Rack, actor: AuthenticatedActor, role: String| { false }, + ) + .add_attribute_getter("fleet", |r: &Rack| r.parent.clone()) + } +} +impl ApiResource for Rack { + fn parent(&self) -> Option<&dyn AuthorizedResource> { + Some(&self.parent) + } + fn resource_type(&self) -> ResourceType { + ResourceType::Rack + } + fn lookup_type(&self) -> &LookupType { + &self.lookup_type + } + fn as_resource_with_roles(&self) -> Option<&dyn ApiResourceWithRoles> { + None + } +} diff --git a/nexus/authz-macros/src/lib.rs b/nexus/authz-macros/src/lib.rs index 648ae6d952..0548113339 100644 --- a/nexus/authz-macros/src/lib.rs +++ b/nexus/authz-macros/src/lib.rs @@ -6,9 +6,11 @@ extern crate proc_macro; +use nexus_macros_common::PrimaryKeyType; use proc_macro2::TokenStream; use quote::{format_ident, quote}; use serde_tokenstream::ParseWrapper; +use syn::parse_quote; /// Defines a structure and helpers for describing an API resource for authz /// @@ -81,6 +83,24 @@ use serde_tokenstream::ParseWrapper; /// } /// ``` /// +/// ## Resources with typed UUID primary keys +/// +/// Some resources use a [newtype_uuid](https://crates.io/crates/newtype_uuid) +/// `TypedUuid` as their primary key (and resources should generally move over +/// to that model). +/// +/// This can be specified with `primary_key = { uuid_kind = MyKind }`: +/// +/// ```ignore +/// authz_resource! { +/// name = "LoopbackAddress", +/// parent = "Fleet", +/// primary_key = { uuid_kind = LoopbackAddressKind }, +/// roles_allowed = false, +/// polar_snippet = FleetChild, +/// } +/// ``` +/// /// ## Resources with non-id primary keys /// /// Most API resources use "id" (a Uuid) as an immutable, unique identifier. @@ -137,7 +157,7 @@ pub fn authz_resource( } /// Arguments for [`authz_resource!`] -#[derive(serde::Deserialize)] +#[derive(serde::Deserialize, Debug)] struct Input { /// Name of the resource /// @@ -146,8 +166,10 @@ struct Input { name: String, /// Name of the parent `authz` resource parent: String, - /// Rust type for the primary key for this resource - primary_key: ParseWrapper, + /// Rust type for the primary key for this resource. + primary_key: InputPrimaryKeyType, + /// The `TypedUuidKind` for this resource. Must be exclusive + /// /// Rust type for the input key for this resource (the key users specify /// for this resource, convertible to `primary_key`). /// @@ -160,8 +182,77 @@ struct Input { polar_snippet: PolarSnippet, } +#[derive(Debug)] +struct InputPrimaryKeyType(PrimaryKeyType); + +impl<'de> serde::Deserialize<'de> for InputPrimaryKeyType { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + // Attempt to parse as either a string or a map. + struct PrimaryKeyVisitor; + + impl<'de2> serde::de::Visitor<'de2> for PrimaryKeyVisitor { + type Value = PrimaryKeyType; + + fn expecting( + &self, + formatter: &mut std::fmt::Formatter, + ) -> std::fmt::Result { + formatter.write_str( + "a Rust type, or a map with a single key `uuid_kind`", + ) + } + + fn visit_str(self, value: &str) -> Result + where + E: serde::de::Error, + { + syn::parse_str(value) + .map(PrimaryKeyType::Standard) + .map_err(|e| E::custom(e.to_string())) + } + + // seq represents a tuple type + fn visit_seq(self, mut seq: A) -> Result + where + A: serde::de::SeqAccess<'de2>, + { + let mut elements = vec![]; + while let Some(element) = + seq.next_element::>()? + { + elements.push(element.into_inner()); + } + Ok(PrimaryKeyType::Standard(parse_quote!((#(#elements,)*)))) + } + + fn visit_map(self, mut map: A) -> Result + where + A: serde::de::MapAccess<'de2>, + { + let key: String = map.next_key()?.ok_or_else(|| { + serde::de::Error::custom("expected a single key") + })?; + if key == "uuid_kind" { + // uuid kinds must be plain identifiers + let value: ParseWrapper = map.next_value()?; + Ok(PrimaryKeyType::new_typed_uuid(&value)) + } else { + Err(serde::de::Error::custom( + "expected a single key `uuid_kind`", + )) + } + } + } + + deserializer.deserialize_any(PrimaryKeyVisitor).map(InputPrimaryKeyType) + } +} + /// How to generate the Polar snippet for this resource -#[derive(serde::Deserialize)] +#[derive(serde::Deserialize, Debug)] enum PolarSnippet { /// Don't generate it at all -- it's generated elsewhere Custom, @@ -185,9 +276,8 @@ fn do_authz_resource( let resource_name = format_ident!("{}", input.name); let parent_resource_name = format_ident!("{}", input.parent); let parent_as_snake = heck::AsSnakeCase(&input.parent).to_string(); - let primary_key_type = &*input.primary_key; - let input_key_type = - &**input.input_key.as_ref().unwrap_or(&input.primary_key); + let primary_key_type = input.primary_key.0.external(); + let input_key_type = input.input_key.as_deref().unwrap_or(primary_key_type); let (has_role_body, as_roles_body, api_resource_roles_trait) = if input.roles_allowed { @@ -493,6 +583,16 @@ mod tests { }) .unwrap(); assert_contents("outputs/instance.txt", &pretty_format(output)); + + let output = do_authz_resource(quote! { + name = "Rack", + parent = "Fleet", + primary_key = { uuid_kind = RackKind }, + roles_allowed = false, + polar_snippet = FleetChild, + }) + .unwrap(); + assert_contents("outputs/rack.txt", &pretty_format(output)); } fn pretty_format(input: TokenStream) -> String { diff --git a/nexus/db-macros/Cargo.toml b/nexus/db-macros/Cargo.toml index 46e5d9a5d6..8032ba814d 100644 --- a/nexus/db-macros/Cargo.toml +++ b/nexus/db-macros/Cargo.toml @@ -10,6 +10,7 @@ proc-macro = true [dependencies] heck.workspace = true +nexus-macros-common.workspace = true proc-macro2.workspace = true quote.workspace = true serde.workspace = true diff --git a/nexus/db-macros/outputs/asset_with_uuid_kind.txt b/nexus/db-macros/outputs/asset_with_uuid_kind.txt new file mode 100644 index 0000000000..a4a756248d --- /dev/null +++ b/nexus/db-macros/outputs/asset_with_uuid_kind.txt @@ -0,0 +1,49 @@ +///Auto-generated identity for [`AssetWithUuidKind`] from deriving [`macro@Asset`]. +#[derive( + Clone, + Debug, + PartialEq, + Selectable, + Queryable, + Insertable, + serde::Serialize, + serde::Deserialize +)] +#[diesel(table_name = my_target)] +pub struct AssetWithUuidKindIdentity { + pub id: crate::typed_uuid::DbTypedUuid<::omicron_uuid_kinds::CustomKind>, + pub time_created: ::chrono::DateTime<::chrono::Utc>, + pub time_modified: ::chrono::DateTime<::chrono::Utc>, +} +impl AssetWithUuidKindIdentity { + pub fn new( + id: ::omicron_uuid_kinds::TypedUuid<::omicron_uuid_kinds::CustomKind>, + ) -> Self { + let now = ::chrono::Utc::now(); + Self { + id: crate::to_db_typed_uuid(id), + time_created: now, + time_modified: now, + } + } +} +trait __AssetWithUuidKindIdentityMarker {} +impl __AssetWithUuidKindIdentityMarker for AssetWithUuidKindIdentity {} +const _: () = { + fn assert_identity() {} + fn assert_all() { + assert_identity::(); + } +}; +impl ::nexus_types::identity::Asset for AssetWithUuidKind { + type IdType = ::omicron_uuid_kinds::TypedUuid<::omicron_uuid_kinds::CustomKind>; + fn id(&self) -> ::omicron_uuid_kinds::TypedUuid<::omicron_uuid_kinds::CustomKind> { + ::omicron_uuid_kinds::TypedUuid::from(self.identity.id) + } + fn time_created(&self) -> ::chrono::DateTime<::chrono::Utc> { + self.identity.time_created + } + fn time_modified(&self) -> ::chrono::DateTime<::chrono::Utc> { + self.identity.time_modified + } +} diff --git a/nexus/db-macros/outputs/project.txt b/nexus/db-macros/outputs/project.txt index 9f4b5cfaa2..333cdb7acf 100644 --- a/nexus/db-macros/outputs/project.txt +++ b/nexus/db-macros/outputs/project.txt @@ -373,7 +373,9 @@ impl<'a> Project<'a> { e, ErrorHandler::NotFoundByLookup( ResourceType::Project, - LookupType::ById(v0.clone()), + LookupType::ById( + ::omicron_uuid_kinds::GenericUuid::into_untyped_uuid(*v0), + ), ), ) })?; @@ -386,7 +388,7 @@ impl<'a> Project<'a> { let authz_project = Self::make_authz( &authz_silo, &db_row, - LookupType::ById(v0.clone()), + LookupType::ById(::omicron_uuid_kinds::GenericUuid::into_untyped_uuid(*v0)), ); Ok((authz_silo, authz_project, db_row)) } diff --git a/nexus/db-macros/outputs/resource_with_uuid_kind.txt b/nexus/db-macros/outputs/resource_with_uuid_kind.txt new file mode 100644 index 0000000000..8d1628e7c9 --- /dev/null +++ b/nexus/db-macros/outputs/resource_with_uuid_kind.txt @@ -0,0 +1,66 @@ +///Auto-generated identity for [`ResourceWithUuidKind`] from deriving [`macro@Resource`]. +#[derive( + Clone, + Debug, + PartialEq, + Eq, + Selectable, + Queryable, + Insertable, + serde::Serialize, + serde::Deserialize +)] +#[diesel(table_name = my_target)] +pub struct ResourceWithUuidKindIdentity { + pub id: crate::typed_uuid::DbTypedUuid<::omicron_uuid_kinds::CustomKind>, + pub name: crate::db::model::Name, + pub description: ::std::string::String, + pub time_created: ::chrono::DateTime<::chrono::Utc>, + pub time_modified: ::chrono::DateTime<::chrono::Utc>, + pub time_deleted: ::std::option::Option>, +} +impl ResourceWithUuidKindIdentity { + pub fn new( + id: ::omicron_uuid_kinds::TypedUuid<::omicron_uuid_kinds::CustomKind>, + params: ::omicron_common::api::external::IdentityMetadataCreateParams, + ) -> Self { + let now = ::chrono::Utc::now(); + Self { + id: crate::to_db_typed_uuid(id), + name: params.name.into(), + description: params.description, + time_created: now, + time_modified: now, + time_deleted: None, + } + } +} +trait __ResourceWithUuidKindIdentityMarker {} +impl __ResourceWithUuidKindIdentityMarker for ResourceWithUuidKindIdentity {} +const _: () = { + fn assert_identity() {} + fn assert_all() { + assert_identity::(); + } +}; +impl ::nexus_types::identity::Resource for ResourceWithUuidKind { + type IdType = ::omicron_uuid_kinds::TypedUuid<::omicron_uuid_kinds::CustomKind>; + fn id(&self) -> ::omicron_uuid_kinds::TypedUuid<::omicron_uuid_kinds::CustomKind> { + ::omicron_uuid_kinds::TypedUuid::from(self.identity.id) + } + fn name(&self) -> &::omicron_common::api::external::Name { + &self.identity.name.0 + } + fn description(&self) -> &str { + &self.identity.description + } + fn time_created(&self) -> ::chrono::DateTime<::chrono::Utc> { + self.identity.time_created + } + fn time_modified(&self) -> ::chrono::DateTime<::chrono::Utc> { + self.identity.time_modified + } + fn time_deleted(&self) -> ::std::option::Option<::chrono::DateTime<::chrono::Utc>> { + self.identity.time_deleted + } +} diff --git a/nexus/db-macros/outputs/silo_user.txt b/nexus/db-macros/outputs/silo_user.txt index 2c9568ff54..1f42db54e8 100644 --- a/nexus/db-macros/outputs/silo_user.txt +++ b/nexus/db-macros/outputs/silo_user.txt @@ -169,14 +169,16 @@ impl<'a> SiloUser<'a> { e, ErrorHandler::NotFoundByLookup( ResourceType::SiloUser, - LookupType::ById(v0.clone()), + LookupType::ById( + ::omicron_uuid_kinds::GenericUuid::into_untyped_uuid(*v0), + ), ), ) })?; let authz_silo_user = Self::make_authz( &&authz::FLEET, &db_row, - LookupType::ById(v0.clone()), + LookupType::ById(::omicron_uuid_kinds::GenericUuid::into_untyped_uuid(*v0)), ); Ok((authz_silo_user, db_row)) } diff --git a/nexus/db-macros/outputs/sled.txt b/nexus/db-macros/outputs/sled.txt new file mode 100644 index 0000000000..30654b0e90 --- /dev/null +++ b/nexus/db-macros/outputs/sled.txt @@ -0,0 +1,182 @@ +///Selects a resource of type Sled (or any of its children, using the functions on this struct) for lookup or fetch +pub enum Sled<'a> { + /// An error occurred while selecting the resource + /// + /// This error will be returned by any lookup/fetch attempts. + Error(Root<'a>, Error), + /// We're looking for a resource with the given primary key + /// + /// This has no parent container -- a by-id lookup is always global + PrimaryKey( + Root<'a>, + ::omicron_uuid_kinds::TypedUuid<::omicron_uuid_kinds::SledKind>, + ), +} +impl<'a> Sled<'a> { + /// Fetch the record corresponding to the selected resource + /// + /// This is equivalent to `fetch_for(authz::Action::Read)`. + pub async fn fetch(&self) -> LookupResult<(authz::Sled, nexus_db_model::Sled)> { + self.fetch_for(authz::Action::Read).await + } + /// Turn the Result of [`fetch`] into a Result, E>. + pub async fn optional_fetch( + &self, + ) -> LookupResult> { + self.optional_fetch_for(authz::Action::Read).await + } + /// Fetch the record corresponding to the selected resource and + /// check whether the caller is allowed to do the specified `action` + /// + /// The return value is a tuple that also includes the `authz` + /// objects for all resources along the path to this one (i.e., all + /// parent resources) and the authz object for this resource itself. + /// These objects are useful for identifying those resources by + /// id, for doing other authz checks, or for looking up related + /// objects. + pub async fn fetch_for( + &self, + action: authz::Action, + ) -> LookupResult<(authz::Sled, nexus_db_model::Sled)> { + let lookup = self.lookup_root(); + let opctx = &lookup.opctx; + let datastore = &lookup.datastore; + match &self { + Sled::Error(_, error) => Err(error.clone()), + Sled::PrimaryKey(_, v0) => { + Self::fetch_by_id_for(opctx, datastore, v0, action).await + } + } + } + /// Turn the Result of [`fetch_for`] into a Result, E>. + pub async fn optional_fetch_for( + &self, + action: authz::Action, + ) -> LookupResult> { + let result = self.fetch_for(action).await; + match result { + Err(Error::ObjectNotFound { type_name: _, lookup_type: _ }) => Ok(None), + _ => Ok(Some(result?)), + } + } + /// Fetch an `authz` object for the selected resource and check + /// whether the caller is allowed to do the specified `action` + /// + /// The return value is a tuple that also includes the `authz` + /// objects for all resources along the path to this one (i.e., all + /// parent resources) and the authz object for this resource itself. + /// These objects are useful for identifying those resources by + /// id, for doing other authz checks, or for looking up related + /// objects. + pub async fn lookup_for( + &self, + action: authz::Action, + ) -> LookupResult<(authz::Sled,)> { + let lookup = self.lookup_root(); + let opctx = &lookup.opctx; + let (authz_sled,) = self.lookup().await?; + opctx.authorize(action, &authz_sled).await?; + Ok((authz_sled,)) + } + /// Turn the Result of [`lookup_for`] into a Result, E>. + pub async fn optional_lookup_for( + &self, + action: authz::Action, + ) -> LookupResult> { + let result = self.lookup_for(action).await; + match result { + Err(Error::ObjectNotFound { type_name: _, lookup_type: _ }) => Ok(None), + _ => Ok(Some(result?)), + } + } + /// Fetch the "authz" objects for the selected resource and all its + /// parents + /// + /// This function does not check whether the caller has permission + /// to read this information. That's why it's not `pub`. Outside + /// this module, you want `lookup_for(authz::Action)`. + async fn lookup(&self) -> LookupResult<(authz::Sled,)> { + let lookup = self.lookup_root(); + let opctx = &lookup.opctx; + let datastore = &lookup.datastore; + match &self { + Sled::Error(_, error) => Err(error.clone()), + Sled::PrimaryKey(_, v0) => { + let (authz_sled, _) = Self::lookup_by_id_no_authz(opctx, datastore, v0) + .await?; + Ok((authz_sled,)) + } + } + } + /// Build the `authz` object for this resource + fn make_authz( + authz_parent: &authz::Fleet, + db_row: &nexus_db_model::Sled, + lookup_type: LookupType, + ) -> authz::Sled { + authz::Sled::with_primary_key(authz_parent.clone(), db_row.id(), lookup_type) + } + /// Getting the [`LookupPath`] for this lookup + /// + /// This is used when we actually query the database. At that + /// point, we need the `OpContext` and `DataStore` that are being + /// used for this lookup. + fn lookup_root(&self) -> &LookupPath<'a> { + match &self { + Sled::Error(root, ..) => root.lookup_root(), + Sled::PrimaryKey(root, ..) => root.lookup_root(), + } + } + /// Fetch the database row for a resource by doing a lookup by id + /// + /// This function checks whether the caller has permissions to read + /// the requested data. However, it's not intended to be used + /// outside this module. See `fetch_for(authz::Action)`. + async fn fetch_by_id_for( + opctx: &OpContext, + datastore: &DataStore, + v0: &::omicron_uuid_kinds::TypedUuid<::omicron_uuid_kinds::SledKind>, + action: authz::Action, + ) -> LookupResult<(authz::Sled, nexus_db_model::Sled)> { + let (authz_sled, db_row) = Self::lookup_by_id_no_authz(opctx, datastore, v0) + .await?; + opctx.authorize(action, &authz_sled).await?; + Ok((authz_sled, db_row)) + } + /// Lowest-level function for looking up a resource in the database + /// by id + /// + /// This function does not check whether the caller has permission + /// to read this information. That's why it's not `pub`. Outside + /// this module, you want `fetch()` or `lookup_for(authz::Action)`. + async fn lookup_by_id_no_authz( + opctx: &OpContext, + datastore: &DataStore, + v0: &::omicron_uuid_kinds::TypedUuid<::omicron_uuid_kinds::SledKind>, + ) -> LookupResult<(authz::Sled, nexus_db_model::Sled)> { + use db::schema::sled::dsl; + let db_row = dsl::sled + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(::nexus_db_model::to_db_typed_uuid(v0.clone()))) + .select(nexus_db_model::Sled::as_select()) + .get_result_async(&*datastore.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::Sled, + LookupType::ById( + ::omicron_uuid_kinds::GenericUuid::into_untyped_uuid(*v0), + ), + ), + ) + })?; + let authz_sled = Self::make_authz( + &&authz::FLEET, + &db_row, + LookupType::ById(::omicron_uuid_kinds::GenericUuid::into_untyped_uuid(*v0)), + ); + Ok((authz_sled, db_row)) + } +} diff --git a/nexus/db-macros/src/lib.rs b/nexus/db-macros/src/lib.rs index fd15b59128..fd9aae4b0a 100644 --- a/nexus/db-macros/src/lib.rs +++ b/nexus/db-macros/src/lib.rs @@ -13,13 +13,17 @@ extern crate proc_macro; +use nexus_macros_common::PrimaryKeyType; use proc_macro2::TokenStream; use quote::{format_ident, quote}; +use serde_tokenstream::ParseWrapper; use syn::spanned::Spanned; -use syn::{Data, DataStruct, DeriveInput, Error, Fields, Ident}; +use syn::{parse_quote, Data, DataStruct, DeriveInput, Error, Fields, Ident}; mod lookup; mod subquery; +#[cfg(test)] +mod test_helpers; /// Defines a structure and helper functions for looking up resources /// @@ -65,8 +69,24 @@ mod subquery; /// } /// ``` /// -/// These define `Project<'a>` and `Instance<'a>`. For more on these structs -/// and how they're used, see nexus/src/db/lookup.rs. +/// These define `Project<'a>` and `Instance<'a>`. +/// +/// It is also possible to use a `TypedUuid` as a key column, by specifying +/// `uuid_kind` rather than `rust_type`. For example: +/// +/// ```ignore +/// lookup_resource! { +/// name = "Sled", +/// ancestors = [ "Organization", "Project" ], +/// children = [], +/// lookup_by_name = true, +/// soft_deletes = true, +/// primary_key_columns = [ { column_name = "id", uuid_kind = SledType } ] +/// } +/// ``` +/// +/// For more on these structs and how they're used, see +/// nexus/db-queries/src/lookup.rs. // Allow private intra-doc links. This is useful because the `Input` struct // cannot be exported (since we're a proc macro crate, and we can't expose // a struct), but its documentation is very useful. @@ -81,11 +101,14 @@ pub fn lookup_resource( } } -/// Looks for a Meta-style attribute with a particular identifier. +/// Looks for a Diesel Meta-style attribute with a particular identifier. /// /// As an example, for an attribute like `#[diesel(foo = bar)]`, we can find this /// attribute by calling `get_nv_attr(&item.attrs, "foo")`. -fn get_nv_attr(attrs: &[syn::Attribute], name: &str) -> Option { +fn get_diesel_nv_attr( + attrs: &[syn::Attribute], + name: &str, +) -> Option { attrs .iter() .filter(|attr| attr.path().is_ident("diesel")) @@ -141,11 +164,21 @@ pub fn subquery_target( } // Describes which derive macro is being used; allows sharing common code. +#[derive(Clone, Copy, Debug)] enum IdentityVariant { Asset, Resource, } +impl IdentityVariant { + fn attr_name(&self) -> &'static str { + match self { + IdentityVariant::Asset => "asset", + IdentityVariant::Resource => "resource", + } + } +} + /// Implements the "Resource" trait, and generates a bespoke Identity struct. /// /// Many tables within our database make use of common fields, @@ -160,7 +193,7 @@ enum IdentityVariant { /// Although these fields can be refactored into a common structure (to be used /// within the context of Diesel) they must be uniquely identified for a single /// table. -#[proc_macro_derive(Resource)] +#[proc_macro_derive(Resource, attributes(resource))] pub fn resource_target( input: proc_macro::TokenStream, ) -> proc_macro::TokenStream { @@ -175,7 +208,7 @@ pub fn resource_target( /// - ID /// - Time Created /// - Time Modified -#[proc_macro_derive(Asset)] +#[proc_macro_derive(Asset, attributes(asset))] pub fn asset_target(input: proc_macro::TokenStream) -> proc_macro::TokenStream { derive_impl(input.into(), IdentityVariant::Asset) .unwrap_or_else(|e| e.to_compile_error()) @@ -208,18 +241,22 @@ fn derive_impl( let name = &item.ident; // Ensure that the "table_name" attribute exists, and get it. - let table_nv = get_nv_attr(&item.attrs, "table_name").ok_or_else(|| { - Error::new( - item.span(), - format!( - "Resource needs 'table_name' attribute.\n\ + let table_nv = + get_diesel_nv_attr(&item.attrs, "table_name").ok_or_else(|| { + Error::new( + item.span(), + format!( + "Resource needs 'table_name' attribute.\n\ Try adding #[diesel(table_name = your_table_name)] to {}.", - name - ), - ) - })?; + name + ), + ) + })?; let table_name = table_nv.value; + let input = MacroAttributes::parse_from_attrs(&item.attrs, flavor)?; + let uuid_ty = input.uuid_ty(); + // Ensure that a field named "identity" exists within this struct. if let Data::Struct(ref data) = item.data { // We extract type of "identity" and enforce it is the expected type @@ -237,28 +274,64 @@ fn derive_impl( ) })?; - return Ok(build(name, &table_name, &field.ty, flavor)); + return Ok(build(name, &table_name, &field.ty, &uuid_ty, flavor)); } Err(Error::new(item.span(), "Resource can only be derived for structs")) } +/// Attributes specific to the `Resource` and `Asset` derive macros. +#[derive(serde::Deserialize)] +#[serde(deny_unknown_fields)] +struct MacroAttributes { + /// The `TypedUuid` type parameter. + #[serde(default)] + uuid_kind: Option>, +} + +impl MacroAttributes { + fn parse_from_attrs( + attrs: &[syn::Attribute], + flavor: IdentityVariant, + ) -> syn::Result { + let inner_attrs = attrs + .iter() + .filter_map(|attr| { + attr.path().is_ident(flavor.attr_name()).then(|| { + let meta_list = attr.meta.require_list()?; + Ok::<_, syn::Error>(&meta_list.tokens) + }) + }) + .collect::, _>>()?; + let tokens = quote! { #(#inner_attrs,)* }; + serde_tokenstream::from_tokenstream(&tokens) + } + + fn uuid_ty(&self) -> PrimaryKeyType { + self.uuid_kind.as_ref().map_or_else( + || PrimaryKeyType::Standard(parse_quote!(::uuid::Uuid)), + |v| PrimaryKeyType::new_typed_uuid(v), + ) + } +} + // Emits generated structures, depending on the requested flavor of identity. fn build( struct_name: &Ident, table_name: &syn::Path, observed_identity_ty: &syn::Type, + uuid_ty: &PrimaryKeyType, flavor: IdentityVariant, ) -> TokenStream { let (identity_struct, resource_impl) = { match flavor { IdentityVariant::Resource => ( - build_resource_identity(struct_name, table_name), - build_resource_impl(struct_name, observed_identity_ty), + build_resource_identity(struct_name, table_name, uuid_ty), + build_resource_impl(struct_name, observed_identity_ty, uuid_ty), ), IdentityVariant::Asset => ( - build_asset_identity(struct_name, table_name), - build_asset_impl(struct_name, observed_identity_ty), + build_asset_identity(struct_name, table_name, uuid_ty), + build_asset_impl(struct_name, observed_identity_ty, uuid_ty), ), } }; @@ -272,18 +345,25 @@ fn build( fn build_resource_identity( struct_name: &Ident, table_name: &syn::Path, + uuid_ty: &PrimaryKeyType, ) -> TokenStream { let identity_doc = format!( "Auto-generated identity for [`{}`] from deriving [`macro@Resource`].", struct_name, ); let identity_name = format_ident!("{}Identity", struct_name); + + let external_uuid_ty = uuid_ty.external(); + let db_uuid_ty = uuid_ty.db(); + let convert_external_to_db = + uuid_ty.external_to_db_nexus_db_model(quote! { id }); + quote! { #[doc = #identity_doc] #[derive(Clone, Debug, PartialEq, Eq, Selectable, Queryable, Insertable, serde::Serialize, serde::Deserialize)] #[diesel(table_name = #table_name) ] pub struct #identity_name { - pub id: ::uuid::Uuid, + pub id: #db_uuid_ty, pub name: crate::db::model::Name, pub description: ::std::string::String, pub time_created: ::chrono::DateTime<::chrono::Utc>, @@ -293,12 +373,12 @@ fn build_resource_identity( impl #identity_name { pub fn new( - id: ::uuid::Uuid, + id: #external_uuid_ty, params: ::omicron_common::api::external::IdentityMetadataCreateParams ) -> Self { let now = ::chrono::Utc::now(); Self { - id, + id: #convert_external_to_db, name: params.name.into(), description: params.description, time_created: now, @@ -314,29 +394,36 @@ fn build_resource_identity( fn build_asset_identity( struct_name: &Ident, table_name: &syn::Path, + uuid_ty: &PrimaryKeyType, ) -> TokenStream { let identity_doc = format!( "Auto-generated identity for [`{}`] from deriving [`macro@Asset`].", struct_name, ); let identity_name = format_ident!("{}Identity", struct_name); + + let external_uuid_ty = uuid_ty.external(); + let db_uuid_ty = uuid_ty.db(); + let convert_external_to_db = + uuid_ty.external_to_db_nexus_db_model(quote! { id }); + quote! { #[doc = #identity_doc] #[derive(Clone, Debug, PartialEq, Selectable, Queryable, Insertable, serde::Serialize, serde::Deserialize)] #[diesel(table_name = #table_name) ] pub struct #identity_name { - pub id: ::uuid::Uuid, + pub id: #db_uuid_ty, pub time_created: ::chrono::DateTime<::chrono::Utc>, pub time_modified: ::chrono::DateTime<::chrono::Utc>, } impl #identity_name { pub fn new( - id: ::uuid::Uuid, + id: #external_uuid_ty, ) -> Self { let now = ::chrono::Utc::now(); Self { - id, + id: #convert_external_to_db, time_created: now, time_modified: now, } @@ -349,9 +436,15 @@ fn build_asset_identity( fn build_resource_impl( struct_name: &Ident, observed_identity_type: &syn::Type, + uuid_ty: &PrimaryKeyType, ) -> TokenStream { let identity_trait = format_ident!("__{}IdentityMarker", struct_name); let identity_name = format_ident!("{}Identity", struct_name); + + let external_uuid_ty = uuid_ty.external(); + let convert_db_to_external = + uuid_ty.db_to_external(quote! { self.identity.id }); + quote! { // Verify that the field named "identity" is actually the generated // type within the struct deriving Resource. @@ -365,8 +458,10 @@ fn build_resource_impl( }; impl ::nexus_types::identity::Resource for #struct_name { - fn id(&self) -> ::uuid::Uuid { - self.identity.id + type IdType = #external_uuid_ty; + + fn id(&self) -> #external_uuid_ty { + #convert_db_to_external } fn name(&self) -> &::omicron_common::api::external::Name { @@ -396,9 +491,15 @@ fn build_resource_impl( fn build_asset_impl( struct_name: &Ident, observed_identity_type: &syn::Type, + uuid_ty: &PrimaryKeyType, ) -> TokenStream { let identity_trait = format_ident!("__{}IdentityMarker", struct_name); let identity_name = format_ident!("{}Identity", struct_name); + + let external_uuid_ty = uuid_ty.external(); + let convert_db_to_external = + uuid_ty.db_to_external(quote! { self.identity.id }); + quote! { // Verify that the field named "identity" is actually the generated // type within the struct deriving Asset. @@ -412,8 +513,10 @@ fn build_asset_impl( }; impl ::nexus_types::identity::Asset for #struct_name { - fn id(&self) -> ::uuid::Uuid { - self.identity.id + type IdType = #external_uuid_ty; + + fn id(&self) -> #external_uuid_ty { + #convert_db_to_external } fn time_created(&self) -> ::chrono::DateTime<::chrono::Utc> { @@ -431,6 +534,10 @@ fn build_asset_impl( mod tests { use super::*; + use crate::test_helpers::pretty_format; + + use expectorate::assert_contents; + #[test] fn test_derive_metadata_identity_fails_without_table_name() { let out = derive_impl( @@ -532,4 +639,64 @@ mod tests { ); assert!(out.is_ok()); } + + #[test] + fn test_derive_with_unknown_field() { + let out = derive_impl( + quote! { + #[derive(Resource)] + #[diesel(table_name = my_target)] + #[resource(foo = bar)] + struct MyTarget { + identity: MyTargetIdentity, + name: String, + is_cool: bool, + } + }, + IdentityVariant::Resource, + ); + let error = out.expect_err("input has unknown parameter for resource"); + assert!(error.to_string().contains("unknown field `foo`")); + } + + #[test] + fn test_derive_snapshots() { + let out = derive_impl( + quote! { + #[derive(Asset)] + #[diesel(table_name = my_target)] + #[asset(uuid_kind = CustomKind)] + struct AssetWithUuidKind { + identity: AssetWithUuidKindIdentity, + name: String, + is_cool: bool, + } + }, + IdentityVariant::Asset, + ) + .unwrap(); + assert_contents( + "outputs/asset_with_uuid_kind.txt", + &pretty_format(out), + ); + + let out = derive_impl( + quote! { + #[derive(Resource)] + #[diesel(table_name = my_target)] + #[resource(uuid_kind = CustomKind)] + struct ResourceWithUuidKind { + identity: ResourceWithUuidKindIdentity, + name: String, + is_cool: bool, + } + }, + IdentityVariant::Resource, + ) + .unwrap(); + assert_contents( + "outputs/resource_with_uuid_kind.txt", + &pretty_format(out), + ); + } } diff --git a/nexus/db-macros/src/lookup.rs b/nexus/db-macros/src/lookup.rs index c04c373ccb..3d3e93a863 100644 --- a/nexus/db-macros/src/lookup.rs +++ b/nexus/db-macros/src/lookup.rs @@ -6,10 +6,11 @@ //! //! See nexus/src/db/lookup.rs. +use nexus_macros_common::PrimaryKeyType; use proc_macro2::TokenStream; use quote::{format_ident, quote}; use serde_tokenstream::ParseWrapper; -use std::ops::Deref; +use syn::spanned::Spanned; // // INPUT (arguments to the macro) @@ -39,7 +40,7 @@ pub struct Input { /// whether lookup by name is supported (usually within the parent collection) lookup_by_name: bool, /// Description of the primary key columns - primary_key_columns: Vec, + primary_key_columns: Vec, /// This resources supports soft-deletes soft_deletes: bool, /// This resource appears under the `Silo` hierarchy, but nevertheless @@ -52,10 +53,47 @@ pub struct Input { visible_outside_silo: bool, } -#[derive(serde::Deserialize)] struct PrimaryKeyColumn { column_name: String, - rust_type: ParseWrapper, + ty: PrimaryKeyType, +} + +#[derive(serde::Deserialize)] +struct InputPrimaryKeyColumn { + column_name: String, + // Exactly one of rust_type and uuid_kind must be specified. + #[serde(default)] + rust_type: Option>, + #[serde(default)] + uuid_kind: Option>, +} + +impl InputPrimaryKeyColumn { + fn validate(self) -> syn::Result { + let ty = match (self.rust_type, self.uuid_kind) { + (Some(rust_type), Some(_)) => { + return Err(syn::Error::new( + rust_type.span(), + "only one of rust_type and uuid_kind may be specified", + )); + } + (Some(rust_type), None) => { + PrimaryKeyType::Standard(rust_type.into_inner()) + } + (None, Some(uuid_kind)) => { + PrimaryKeyType::new_typed_uuid(&uuid_kind) + } + (None, None) => { + return Err(syn::Error::new( + proc_macro2::Span::call_site(), + "for primary_key_columns, \ + one of rust_type and uuid_kind must be specified", + )); + } + }; + + Ok(PrimaryKeyColumn { column_name: self.column_name, ty }) + } } // @@ -106,7 +144,7 @@ pub struct Config { } impl Config { - fn for_input(input: Input) -> Config { + fn for_input(input: Input) -> syn::Result { let resource = Resource::for_name(&input.name); let mut path_types: Vec<_> = @@ -127,7 +165,13 @@ impl Config { let silo_restricted = !input.visible_outside_silo && input.ancestors.iter().any(|s| s == "Silo"); - Config { + let primary_key_columns: Vec<_> = input + .primary_key_columns + .into_iter() + .map(|c| c.validate()) + .collect::>()?; + + Ok(Config { resource, silo_restricted, path_types, @@ -135,9 +179,9 @@ impl Config { parent, child_resources, lookup_by_name: input.lookup_by_name, - primary_key_columns: input.primary_key_columns, + primary_key_columns, soft_deletes: input.soft_deletes, - } + }) } } @@ -172,7 +216,7 @@ pub fn lookup_resource( raw_input: TokenStream, ) -> Result { let input = serde_tokenstream::from_tokenstream::(&raw_input)?; - let config = Config::for_input(input); + let config = Config::for_input(input)?; let resource_name = &config.resource.name; let the_basics = generate_struct(&config); @@ -204,8 +248,7 @@ fn generate_struct(config: &Config) -> TokenStream { functions on this struct) for lookup or fetch", resource_name ); - let pkey_types = - config.primary_key_columns.iter().map(|c| c.rust_type.deref()); + let pkey_types = config.primary_key_columns.iter().map(|c| c.ty.external()); /* configure the lookup enum */ let name_variant = if config.lookup_by_name { @@ -706,11 +749,8 @@ fn generate_database_functions(config: &Config) -> TokenStream { let resource_as_snake = format_ident!("{}", &config.resource.name_as_snake); let path_types = &config.path_types; let path_authz_names = &config.path_authz_names; - let pkey_types: Vec<_> = config - .primary_key_columns - .iter() - .map(|c| c.rust_type.deref()) - .collect(); + let pkey_types: Vec<_> = + config.primary_key_columns.iter().map(|c| c.ty.external()).collect(); let pkey_column_names = config .primary_key_columns .iter() @@ -721,6 +761,18 @@ fn generate_database_functions(config: &Config) -> TokenStream { .enumerate() .map(|(i, _)| format_ident!("v{}", i)) .collect(); + + // Generate tokens that also perform conversion from external to db types, + // if necessary. + let pkey_names_convert: Vec<_> = config + .primary_key_columns + .iter() + .zip(pkey_names.iter()) + .map(|(col, name)| { + col.ty.external_to_db_other(quote! { #name.clone() }) + }) + .collect(); + let ( parent_lookup_arg_formal, parent_lookup_arg_actual, @@ -837,7 +889,11 @@ fn generate_database_functions(config: &Config) -> TokenStream { let lookup_type = if config.primary_key_columns.len() == 1 && config.primary_key_columns[0].column_name == "id" { - quote! { LookupType::ById(#(#pkey_names.clone())*) } + let pkey_name = &pkey_names[0]; + let by_id = quote! { + ::omicron_uuid_kinds::GenericUuid::into_untyped_uuid(*#pkey_name) + }; + quote! { LookupType::ById(#by_id) } } else { let fmtstr = config .primary_key_columns @@ -889,7 +945,7 @@ fn generate_database_functions(config: &Config) -> TokenStream { let db_row = dsl::#resource_as_snake #soft_delete_filter - #(.filter(dsl::#pkey_column_names.eq(#pkey_names.clone())))* + #(.filter(dsl::#pkey_column_names.eq(#pkey_names_convert)))* .select(nexus_db_model::#resource_name::as_select()) .get_result_async(&*datastore.pool_connection_authorized(opctx).await?) .await @@ -915,9 +971,10 @@ fn generate_database_functions(config: &Config) -> TokenStream { #[cfg(test)] mod test { + use crate::test_helpers::pretty_format; + use super::lookup_resource; use expectorate::assert_contents; - use proc_macro2::TokenStream; use quote::quote; /// Ensure that generated code is as expected. @@ -953,6 +1010,17 @@ mod test { .unwrap(); assert_contents("outputs/silo_user.txt", &pretty_format(output)); + let output = lookup_resource(quote! { + name = "Sled", + ancestors = [], + children = [], + lookup_by_name = false, + soft_deletes = true, + primary_key_columns = [ { column_name = "id", uuid_kind = SledKind } ] + }) + .unwrap(); + assert_contents("outputs/sled.txt", &pretty_format(output)); + let output = lookup_resource(quote! { name = "UpdateArtifact", ancestors = [], @@ -968,9 +1036,4 @@ mod test { .unwrap(); assert_contents("outputs/update_artifact.txt", &pretty_format(output)); } - - fn pretty_format(input: TokenStream) -> String { - let parsed = syn::parse2(input).unwrap(); - prettyplease::unparse(&parsed) - } } diff --git a/nexus/db-macros/src/test_helpers.rs b/nexus/db-macros/src/test_helpers.rs new file mode 100644 index 0000000000..c069cb5c02 --- /dev/null +++ b/nexus/db-macros/src/test_helpers.rs @@ -0,0 +1,10 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use proc_macro2::TokenStream; + +pub(crate) fn pretty_format(input: TokenStream) -> String { + let parsed = syn::parse2(input).unwrap(); + prettyplease::unparse(&parsed) +} diff --git a/nexus/db-model/Cargo.toml b/nexus/db-model/Cargo.toml index 477ce7d11f..43b6f6a4b0 100644 --- a/nexus/db-model/Cargo.toml +++ b/nexus/db-model/Cargo.toml @@ -10,11 +10,13 @@ omicron-rpaths.workspace = true [dependencies] anyhow.workspace = true chrono.workspace = true +derive-where.workspace = true diesel = { workspace = true, features = ["postgres", "r2d2", "chrono", "serde_json", "network-address", "uuid"] } hex.workspace = true ipnetwork.workspace = true macaddr.workspace = true newtype_derive.workspace = true +omicron-uuid-kinds.workspace = true parse-display.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. pq-sys = "*" diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index b77d56059e..ecbb8365fe 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -81,6 +81,7 @@ mod snapshot; mod ssh_key; mod switch; mod tuf_repo; +mod typed_uuid; mod unsigned; mod user_builtin; mod utilization; @@ -170,6 +171,7 @@ pub use switch::*; pub use switch_interface::*; pub use switch_port::*; pub use tuf_repo::*; +pub use typed_uuid::to_db_typed_uuid; pub use user_builtin::*; pub use utilization::*; pub use virtual_provisioning_collection::*; diff --git a/nexus/db-model/src/switch_interface.rs b/nexus/db-model/src/switch_interface.rs index 71673354ea..78f502ed4c 100644 --- a/nexus/db-model/src/switch_interface.rs +++ b/nexus/db-model/src/switch_interface.rs @@ -9,6 +9,8 @@ use ipnetwork::IpNetwork; use nexus_types::external_api::params; use nexus_types::identity::Asset; use omicron_common::api::external; +use omicron_uuid_kinds::LoopbackAddressKind; +use omicron_uuid_kinds::TypedUuid; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -105,6 +107,7 @@ impl Into for SwitchVlanInterfaceConfig { Deserialize, )] #[diesel(table_name = loopback_address)] +#[asset(uuid_kind = LoopbackAddressKind)] pub struct LoopbackAddress { #[diesel(embed)] pub identity: LoopbackAddressIdentity, @@ -118,7 +121,7 @@ pub struct LoopbackAddress { impl LoopbackAddress { pub fn new( - id: Option, + id: Option>, address_lot_block_id: Uuid, rsvd_address_lot_block_id: Uuid, rack_id: Uuid, @@ -128,7 +131,7 @@ impl LoopbackAddress { ) -> Self { Self { identity: LoopbackAddressIdentity::new( - id.unwrap_or(Uuid::new_v4()), + id.unwrap_or(TypedUuid::new_v4()), ), address_lot_block_id, rsvd_address_lot_block_id, diff --git a/nexus/db-model/src/tuf_repo.rs b/nexus/db-model/src/tuf_repo.rs index 5fa2a0aac7..4a64566a62 100644 --- a/nexus/db-model/src/tuf_repo.rs +++ b/nexus/db-model/src/tuf_repo.rs @@ -6,6 +6,7 @@ use std::str::FromStr; use crate::{ schema::{tuf_artifact, tuf_repo, tuf_repo_artifact}, + typed_uuid::DbTypedUuid, SemverVersion, }; use chrono::{DateTime, Utc}; @@ -17,6 +18,8 @@ use omicron_common::{ ArtifactKind, }, }; +use omicron_uuid_kinds::TufRepoKind; +use omicron_uuid_kinds::TypedUuid; use serde::{Deserialize, Serialize}; use std::fmt; use uuid::Uuid; @@ -73,7 +76,7 @@ impl TufRepoDescription { )] #[diesel(table_name = tuf_repo)] pub struct TufRepo { - pub id: Uuid, + pub id: DbTypedUuid, pub time_created: DateTime, // XXX: We're overloading ArtifactHash here to also mean the hash of the // repository zip itself. @@ -94,7 +97,7 @@ impl TufRepo { file_name: String, ) -> Self { Self { - id: Uuid::new_v4(), + id: TypedUuid::new_v4().into(), time_created: Utc::now(), sha256, targets_role_version: targets_role_version as i64, @@ -132,8 +135,8 @@ impl TufRepo { } /// Returns the repository's ID. - pub fn id(&self) -> Uuid { - self.id + pub fn id(&self) -> TypedUuid { + self.id.into() } /// Returns the targets role version. diff --git a/nexus/db-model/src/typed_uuid.rs b/nexus/db-model/src/typed_uuid.rs new file mode 100644 index 0000000000..7785b8c7dc --- /dev/null +++ b/nexus/db-model/src/typed_uuid.rs @@ -0,0 +1,116 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Typed UUID instances. + +use derive_where::derive_where; +use diesel::backend::Backend; +use diesel::deserialize::{self, FromSql}; +use diesel::serialize::{self, ToSql}; +use diesel::sql_types; +use omicron_uuid_kinds::{GenericUuid, TypedUuid, TypedUuidKind}; +use serde::{Deserialize, Serialize}; +use std::fmt; +use std::str::FromStr; +use uuid::Uuid; + +/// Returns the corresponding `DbTypedUuid` for this `TypedUuid`. +/// +/// Code external to the `db-model` crate sometimes needs a way to convert a +/// `TypedUuid` to a `DbTypedUuid`. We don't want `DbTypedUuid` to be used +/// anywhere, so we don't make it public. Instead, we expose this function. +#[inline] +pub fn to_db_typed_uuid(id: TypedUuid) -> DbTypedUuid { + DbTypedUuid(id) +} + +/// A UUID with information about the kind of type it is. +/// +/// Despite the fact that this is marked `pub`, this is *private* to the +/// `db-model` crate (this type is not exported at the top level). External +/// users must use omicron-common's `TypedUuid`. +#[derive_where(Clone, Copy, Eq, Ord, PartialEq, PartialOrd, Hash)] +#[derive(AsExpression, FromSqlRow, Serialize, Deserialize)] +#[diesel(sql_type = sql_types::Uuid)] +#[serde(transparent, bound = "")] +pub struct DbTypedUuid(pub(crate) TypedUuid); + +impl ToSql for DbTypedUuid +where + DB: Backend, + Uuid: ToSql, +{ + fn to_sql<'a>( + &'a self, + out: &mut serialize::Output<'a, '_, DB>, + ) -> serialize::Result { + self.0.as_untyped_uuid().to_sql(out) + } +} + +impl FromSql for DbTypedUuid +where + DB: Backend, + Uuid: FromSql, +{ + #[inline] + fn from_sql(bytes: DB::RawValue<'_>) -> deserialize::Result { + let id = Uuid::from_sql(bytes)?; + Ok(TypedUuid::from_untyped_uuid(id).into()) + } +} + +impl fmt::Debug for DbTypedUuid { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::Display for DbTypedUuid { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl FromStr for DbTypedUuid { + type Err = omicron_uuid_kinds::ParseError; + + #[inline] + fn from_str(s: &str) -> Result { + Ok(TypedUuid::from_str(s)?.into()) + } +} + +impl From> for DbTypedUuid { + #[inline] + fn from(id: TypedUuid) -> Self { + Self(id) + } +} + +impl From> for TypedUuid { + #[inline] + fn from(id: DbTypedUuid) -> Self { + id.0 + } +} + +impl GenericUuid for DbTypedUuid { + #[inline] + fn from_untyped_uuid(uuid: Uuid) -> Self { + TypedUuid::from_untyped_uuid(uuid).into() + } + + #[inline] + fn into_untyped_uuid(self) -> Uuid { + self.0.into_untyped_uuid() + } + + #[inline] + fn as_untyped_uuid(&self) -> &Uuid { + self.0.as_untyped_uuid() + } +} diff --git a/nexus/db-queries/Cargo.toml b/nexus/db-queries/Cargo.toml index 9cdcc88e6a..9c9a30799e 100644 --- a/nexus/db-queries/Cargo.toml +++ b/nexus/db-queries/Cargo.toml @@ -45,7 +45,7 @@ static_assertions.workspace = true steno.workspace = true swrite.workspace = true thiserror.workspace = true -tokio = { workspace = true, features = [ "full" ] } +tokio = { workspace = true, features = ["full"] } uuid.workspace = true usdt.workspace = true @@ -55,6 +55,7 @@ nexus-db-model.workspace = true nexus-types.workspace = true omicron-common.workspace = true omicron-passwords.workspace = true +omicron-uuid-kinds.workspace = true oximeter.workspace = true omicron-workspace-hack.workspace = true diff --git a/nexus/db-queries/src/authz/api_resources.rs b/nexus/db-queries/src/authz/api_resources.rs index b4fd4e1890..70bc9ab2eb 100644 --- a/nexus/db-queries/src/authz/api_resources.rs +++ b/nexus/db-queries/src/authz/api_resources.rs @@ -869,7 +869,7 @@ authz_resource! { authz_resource! { name = "LoopbackAddress", parent = "Fleet", - primary_key = Uuid, + primary_key = { uuid_kind = LoopbackAddressKind }, roles_allowed = false, polar_snippet = FleetChild, } @@ -1068,7 +1068,7 @@ authz_resource! { authz_resource! { name = "TufRepo", parent = "Fleet", - primary_key = Uuid, + primary_key = { uuid_kind = TufRepoKind }, roles_allowed = false, polar_snippet = FleetChild, } diff --git a/nexus/db-queries/src/authz/policy_test/resources.rs b/nexus/db-queries/src/authz/policy_test/resources.rs index 3e87f6db51..96cefb3db4 100644 --- a/nexus/db-queries/src/authz/policy_test/resources.rs +++ b/nexus/db-queries/src/authz/policy_test/resources.rs @@ -10,6 +10,7 @@ use crate::authz; use crate::db::model::ArtifactId; use nexus_db_model::SemverVersion; use omicron_common::api::external::LookupType; +use omicron_uuid_kinds::GenericUuid; use oso::PolarClass; use std::collections::BTreeSet; use uuid::Uuid; @@ -132,7 +133,7 @@ pub async fn make_resources( builder.new_resource(authz::TufRepo::new( authz::FLEET, tuf_repo_id, - LookupType::ById(tuf_repo_id), + LookupType::ById(tuf_repo_id.into_untyped_uuid()), )); let artifact_id = ArtifactId { @@ -160,7 +161,7 @@ pub async fn make_resources( builder.new_resource(authz::LoopbackAddress::new( authz::FLEET, loopback_address_id, - LookupType::ById(loopback_address_id), + LookupType::ById(loopback_address_id.into_untyped_uuid()), )); builder.build() diff --git a/nexus/db-queries/src/db/datastore/switch_interface.rs b/nexus/db-queries/src/db/datastore/switch_interface.rs index 67f16fa08f..aa4f6747fd 100644 --- a/nexus/db-queries/src/db/datastore/switch_interface.rs +++ b/nexus/db-queries/src/db/datastore/switch_interface.rs @@ -17,11 +17,14 @@ use crate::transaction_retry::OptionalError; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; use ipnetwork::IpNetwork; +use nexus_db_model::to_db_typed_uuid; use nexus_types::external_api::params::LoopbackAddressCreate; use omicron_common::api::external::{ CreateResult, DataPageParams, DeleteResult, Error, ListResultVec, LookupResult, ResourceType, }; +use omicron_uuid_kinds::LoopbackAddressKind; +use omicron_uuid_kinds::TypedUuid; use uuid::Uuid; impl DataStore { @@ -29,7 +32,7 @@ impl DataStore { &self, opctx: &OpContext, params: &LoopbackAddressCreate, - id: Option, + id: Option>, authz_address_lot: &authz::AddressLot, ) -> CreateResult { use db::schema::loopback_address::dsl; @@ -130,7 +133,7 @@ impl DataStore { self.transaction_retry_wrapper("loopback_address_delete") .transaction(&conn, |conn| async move { let la = diesel::delete(dsl::loopback_address) - .filter(dsl::id.eq(id)) + .filter(dsl::id.eq(to_db_typed_uuid(id))) .returning(LoopbackAddress::as_returning()) .get_result_async(&conn) .await?; @@ -159,7 +162,7 @@ impl DataStore { let conn = self.pool_connection_authorized(opctx).await?; loopback_dsl::loopback_address - .filter(loopback_address::id.eq(id)) + .filter(loopback_address::id.eq(to_db_typed_uuid(id))) .select(LoopbackAddress::as_select()) .limit(1) .first_async::(&*conn) diff --git a/nexus/db-queries/src/db/datastore/update.rs b/nexus/db-queries/src/db/datastore/update.rs index 3725797f83..d73a3d903f 100644 --- a/nexus/db-queries/src/db/datastore/update.rs +++ b/nexus/db-queries/src/db/datastore/update.rs @@ -21,8 +21,9 @@ use omicron_common::api::external::{ self, CreateResult, LookupResult, LookupType, ResourceType, TufRepoInsertStatus, }; +use omicron_uuid_kinds::TufRepoKind; +use omicron_uuid_kinds::TypedUuid; use swrite::{swrite, SWrite}; -use uuid::Uuid; /// The return value of [`DataStore::update_tuf_repo_description_insert`]. /// @@ -43,7 +44,7 @@ impl TufRepoInsertResponse { } async fn artifacts_for_repo( - repo_id: Uuid, + repo_id: TypedUuid, conn: &async_bb8_diesel::Connection, ) -> Result, DieselError> { use db::schema::tuf_artifact::dsl as tuf_artifact_dsl; @@ -61,7 +62,10 @@ async fn artifacts_for_repo( // Don't bother paginating because each repo should only have a few (under // 20) artifacts. tuf_repo_artifact_dsl::tuf_repo_artifact - .filter(tuf_repo_artifact_dsl::tuf_repo_id.eq(repo_id)) + .filter( + tuf_repo_artifact_dsl::tuf_repo_id + .eq(nexus_db_model::to_db_typed_uuid(repo_id)), + ) .inner_join(tuf_artifact_dsl::tuf_artifact.on(join_on_dsl)) .select(TufArtifact::as_select()) .load_async(conn) @@ -138,7 +142,7 @@ impl DataStore { ) })?; - let artifacts = artifacts_for_repo(repo.id, &conn) + let artifacts = artifacts_for_repo(repo.id.into(), &conn) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; Ok(TufRepoDescription { repo, artifacts }) @@ -177,7 +181,8 @@ async fn insert_impl( } // Just return the existing repo along with all of its artifacts. - let artifacts = artifacts_for_repo(existing_repo.id, &conn).await?; + let artifacts = + artifacts_for_repo(existing_repo.id.into(), &conn).await?; let recorded = TufRepoDescription { repo: existing_repo, artifacts }; diff --git a/nexus/db-queries/src/db/lookup.rs b/nexus/db-queries/src/db/lookup.rs index 1cf14c5a8f..18ea369685 100644 --- a/nexus/db-queries/src/db/lookup.rs +++ b/nexus/db-queries/src/db/lookup.rs @@ -21,6 +21,8 @@ use nexus_db_model::Name; use omicron_common::api::external::Error; use omicron_common::api::external::InternalContext; use omicron_common::api::external::{LookupResult, LookupType, ResourceType}; +use omicron_uuid_kinds::TufRepoKind; +use omicron_uuid_kinds::TypedUuid; use uuid::Uuid; /// Look up an API resource in the database @@ -431,7 +433,7 @@ impl<'a> LookupPath<'a> { } /// Select a resource of type TufRepo, identified by its UUID. - pub fn tuf_repo_id(self, id: Uuid) -> TufRepo<'a> { + pub fn tuf_repo_id(self, id: TypedUuid) -> TufRepo<'a> { TufRepo::PrimaryKey(Root { lookup_root: self }, id) } @@ -863,7 +865,7 @@ lookup_resource! { children = [], lookup_by_name = false, soft_deletes = false, - primary_key_columns = [ { column_name = "id", rust_type = Uuid } ] + primary_key_columns = [ { column_name = "id", uuid_kind = TufRepoKind } ] } lookup_resource! { diff --git a/nexus/macros-common/Cargo.toml b/nexus/macros-common/Cargo.toml new file mode 100644 index 0000000000..9d4390a6d2 --- /dev/null +++ b/nexus/macros-common/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "nexus-macros-common" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[dependencies] +proc-macro2.workspace = true +syn = { workspace = true, features = ["extra-traits"] } +quote.workspace = true +omicron-workspace-hack.workspace = true diff --git a/nexus/macros-common/src/lib.rs b/nexus/macros-common/src/lib.rs new file mode 100644 index 0000000000..a2f51e1b5c --- /dev/null +++ b/nexus/macros-common/src/lib.rs @@ -0,0 +1,109 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Code shared by Nexus macros. + +use proc_macro2::TokenStream; +use quote::quote; +use syn::parse_quote; + +/// The type of a primary key in a database. +#[derive(Debug)] +pub enum PrimaryKeyType { + /// A regular type. + Standard(syn::Type), + + /// A typed UUID, which requires special handling. + TypedUuid { + /// The external type. This is used almost everywhere. + external: syn::Type, + + /// The internal type used in nexus-db-model. + db: syn::Type, + }, +} + +impl PrimaryKeyType { + /// Constructs a new `TypedUuid` variant. + pub fn new_typed_uuid(kind: &syn::Ident) -> Self { + let external = parse_quote!(::omicron_uuid_kinds::TypedUuid<::omicron_uuid_kinds::#kind>); + let db = parse_quote!(crate::typed_uuid::DbTypedUuid<::omicron_uuid_kinds::#kind>); + PrimaryKeyType::TypedUuid { external, db } + } + + /// Returns the external type for this primary key. + pub fn external(&self) -> &syn::Type { + match self { + PrimaryKeyType::Standard(path) => path, + PrimaryKeyType::TypedUuid { external, .. } => external, + } + } + + /// Converts self into the external type. + pub fn into_external(self) -> syn::Type { + match self { + PrimaryKeyType::Standard(path) => path, + PrimaryKeyType::TypedUuid { external, .. } => external, + } + } + + /// Returns the database type for this primary key. + /// + /// For the `TypedUuid` variant, the db type is only accessible within the + /// `nexus-db-model` crate. + pub fn db(&self) -> &syn::Type { + match self { + PrimaryKeyType::Standard(path) => path, + PrimaryKeyType::TypedUuid { db, .. } => db, + } + } + + /// Converts self into the database type. + pub fn into_db(self) -> syn::Type { + match self { + PrimaryKeyType::Standard(path) => path, + PrimaryKeyType::TypedUuid { db, .. } => db, + } + } + + /// Returns tokens for a conversion from external to db types, given an + /// expression as input. + /// + /// This is specialized for the nexus-db-model crate. + pub fn external_to_db_nexus_db_model( + &self, + tokens: TokenStream, + ) -> TokenStream { + match self { + PrimaryKeyType::Standard(_) => tokens, + PrimaryKeyType::TypedUuid { .. } => { + quote! { crate::to_db_typed_uuid(#tokens) } + } + } + } + + /// Returns tokens for a conversion from external to db types, given an + /// expression as input. + /// + /// This is used for all crates *except* nexus-db-model. + pub fn external_to_db_other(&self, tokens: TokenStream) -> TokenStream { + match self { + PrimaryKeyType::Standard(_) => tokens, + PrimaryKeyType::TypedUuid { .. } => { + quote! { ::nexus_db_model::to_db_typed_uuid(#tokens) } + } + } + } + + /// Returns tokens for a conversion from db to external types, given an + /// expression as input. + pub fn db_to_external(&self, tokens: TokenStream) -> TokenStream { + match self { + PrimaryKeyType::Standard(_) => tokens, + PrimaryKeyType::TypedUuid { .. } => { + quote! { ::omicron_uuid_kinds::TypedUuid::from(#tokens) } + } + } + } +} diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index dff0f73be7..93716d4804 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -9,6 +9,7 @@ anyhow.workspace = true chrono.workspace = true base64.workspace = true futures.workspace = true +omicron-uuid-kinds.workspace = true openssl.workspace = true parse-display.workspace = true schemars = { workspace = true, features = ["chrono", "uuid1"] } diff --git a/nexus/types/src/identity.rs b/nexus/types/src/identity.rs index ededb926df..6f214d6f9a 100644 --- a/nexus/types/src/identity.rs +++ b/nexus/types/src/identity.rs @@ -9,6 +9,7 @@ use chrono::{DateTime, Utc}; use omicron_common::api::external::IdentityMetadata; use omicron_common::api::external::Name; +use omicron_uuid_kinds::GenericUuid; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -23,7 +24,9 @@ use uuid::Uuid; /// /// May be derived from [`macro@db-macros::Resource`]. pub trait Resource { - fn id(&self) -> Uuid; + type IdType: GenericUuid; + + fn id(&self) -> Self::IdType; fn name(&self) -> &Name; fn description(&self) -> &str; fn time_created(&self) -> DateTime; @@ -32,7 +35,7 @@ pub trait Resource { fn identity(&self) -> IdentityMetadata { IdentityMetadata { - id: self.id(), + id: self.id().into_untyped_uuid(), name: self.name().clone(), description: self.description().to_string(), time_created: self.time_created(), @@ -60,13 +63,15 @@ pub struct AssetIdentityMetadata { /// /// May be derived from [`macro@db-macros::Asset`]. pub trait Asset { - fn id(&self) -> Uuid; + type IdType: GenericUuid; + + fn id(&self) -> Self::IdType; fn time_created(&self) -> DateTime; fn time_modified(&self) -> DateTime; fn identity(&self) -> AssetIdentityMetadata { AssetIdentityMetadata { - id: self.id(), + id: self.id().into_untyped_uuid(), time_created: self.time_created(), time_modified: self.time_modified(), } diff --git a/uuid-kinds/Cargo.toml b/uuid-kinds/Cargo.toml new file mode 100644 index 0000000000..126ba8bcf8 --- /dev/null +++ b/uuid-kinds/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "omicron-uuid-kinds" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +# The dependencies and features are written so as to make it easy for no-std +# code to import omicron-uuid-kinds. All the features below are turned on +# within omicron. + +[dependencies] +newtype-uuid.workspace = true +schemars = { workspace = true, optional = true } + +[features] +default = ["std"] +serde = ["newtype-uuid/serde"] +schemars08 = ["newtype-uuid/schemars08", "schemars"] +std = ["newtype-uuid/std"] +uuid-v4 = ["newtype-uuid/v4"] diff --git a/uuid-kinds/README.adoc b/uuid-kinds/README.adoc new file mode 100644 index 0000000000..1a22477aad --- /dev/null +++ b/uuid-kinds/README.adoc @@ -0,0 +1,73 @@ +# omicron-uuid-kinds + +In omicron and other Oxide projects, we use UUIDs to identify many different +kinds of entities. Using a single `Uuid` type for all of them risks mixing up +all the different kinds of IDs. + +To address that, we're actively moving towards typed UUIDs with the +https://github.com/oxidecomputer/newtype-uuid[newtype-uuid] crate. The goal is +that each kind of entity will have a marker type representing a _UUID kind_ +associated with it. Then, the type system will make it significantly harder to +mix different kinds of entities. + +*`omicron-uuid-kinds` is a centralized registry for UUID kinds*, one that can +be shared across Oxide repos. `omicron-uuid-kinds` supports no-std so the kinds +can be shared with embedded code as well. + +## Determinations + +As part of this effort, we've made several decisions that could have gone a +different way. This section documents those choices. + +### One big registry or several smaller ones? + +Rather than having a single registry, an option is to have several smaller ones +(perhaps one to two in each repo). The best answer to that is currently +unclear, but putting them in the same crate for now has several advantages: + +* The macros updated in this PR know where uuid kinds will be found, so they +let you specify just a bare name as the kind. + +* It's simpler. + +The disadvantage of this is that any change to this registry will cause all of +omicron to be rebuilt. Hopefully, once most UUIDs are converted over, this +crate won't be touched too much. + +This decision involves some level of commitment, because splitting up a +registry will probably require several days of work. For example, macros may +need to be updated to handle multiple sources, import paths would have to be +changed across several repos, and so on. + +Crates which solve more abstract problems and are completely independent of +omicron, like https://github.com/oxidecomputer/dropshot/[Dropshot] and +https://github.com/oxidecomputer/steno[Steno], will have their own registries +(probably as a module within the crates themselves). + +### Where should the registry live? + +Once we've decided on a single registry, the next question is where this +registry should be stored. There was some debate in `#oxide-control-plane`, and +two options stood out: + +1. In omicron itself. + + * The upside of this is that it is easy to add new UUID kinds within + omicron, especially as we transition over existing untyped UUIDs. + + * The downside is that we also want to use these kinds in crucible and + propolis -- and pulling in `omicron-uuid-kinds` as a Git dependency would + typically cause two copies of `omicron-uuid-kinds` to be floating around. + That can result in a lot of pain, including confusing errors. + +2. In a separate repo. While it eliminates the issue with duplicated +dependencies, it does add a fair bit more friction. That's because users +updating omicron would now have to make two separate, coordinated PRs. + +We've chosen 1 for now, because agility within omicron outweighs other +concerns. *The downside is mitigated* by using a `[patch]` directive; see the +workspace root's `Cargo.toml` for more about this. + +This is straightforward to change in the future. If we find that 1 is too much +of a bother, we'll easily be able to switch to 2 while keeping the name of the +crate the same (minimizing churn). diff --git a/uuid-kinds/src/lib.rs b/uuid-kinds/src/lib.rs new file mode 100644 index 0000000000..12bc756d68 --- /dev/null +++ b/uuid-kinds/src/lib.rs @@ -0,0 +1,50 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#![cfg_attr(not(feature = "std"), no_std)] + +//! A registry for UUID kinds used in Omicron and related projects. +//! +//! See this crate's `README.md` for more information. + +// Export these types so that other users don't have to pull in newtype-uuid. +#[doc(no_inline)] +pub use newtype_uuid::{ + GenericUuid, ParseError, TagError, TypedUuid, TypedUuidKind, TypedUuidTag, +}; + +#[cfg(feature = "schemars08")] +use schemars::JsonSchema; + +macro_rules! impl_typed_uuid_kind { + ($($kind:ident => $tag:literal),* $(,)?) => { + $( + #[cfg_attr(feature = "schemars08", derive(JsonSchema))] + pub enum $kind {} + + impl TypedUuidKind for $kind { + #[inline] + fn tag() -> TypedUuidTag { + // `const` ensures that tags are validated at compile-time. + const TAG: TypedUuidTag = TypedUuidTag::new($tag); + TAG + } + } + )* + }; +} + +// NOTE: +// +// This should generally be an append-only list. Removing items from this list +// will not break things for now (because newtype-uuid does not currently alter +// any serialization formats), but it may involve some degree of churn across +// repos. +// +// Please keep this list in alphabetical order. + +impl_typed_uuid_kind! { + LoopbackAddressKind => "loopback_address", + TufRepoKind => "tuf_repo", +} From 475fb621d43b47ab3f4e9bd69fb21a695d147d73 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Fri, 16 Feb 2024 09:06:59 -0500 Subject: [PATCH 008/157] Fix bad service NIC slots (#5080) This is the second half of the fix for #5056. #5065 (already merged) fixed _how_ we were getting service NICs with nonzero slot values, and this PR adds a schema migration to apply a one-time fix to any existing service NICs with nonzero slot values. This matters to the Reconfigurator, because currently the NICs sled-agent thinks it has don't match the NICs recorded in CRDB (differing only by slot number). Closes #5056. --- nexus/db-model/src/schema.rs | 2 +- nexus/db-queries/src/db/mod.rs | 5 +++++ nexus/db-queries/src/db/pool_connection.rs | 2 +- nexus/tests/integration_tests/schema.rs | 6 ++++++ schema/crdb/35.0.0/up.sql | 6 ++++++ schema/crdb/dbinit.sql | 2 +- 6 files changed, 20 insertions(+), 3 deletions(-) create mode 100644 schema/crdb/35.0.0/up.sql diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 9ebf7516cf..3d9050b1af 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -13,7 +13,7 @@ use omicron_common::api::external::SemverVersion; /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(34, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(35, 0, 0); table! { disk (id) { diff --git a/nexus/db-queries/src/db/mod.rs b/nexus/db-queries/src/db/mod.rs index 184ebe5f8a..d5262166ee 100644 --- a/nexus/db-queries/src/db/mod.rs +++ b/nexus/db-queries/src/db/mod.rs @@ -35,6 +35,11 @@ pub mod subquery; pub(crate) mod true_or_cast_error; mod update_and_check; +/// Batch statement to disable full table scans. +// This is `pub` so tests that don't go through our connection pool can disable +// full table scans the same way pooled connections do. +pub use pool_connection::DISALLOW_FULL_TABLE_SCAN_SQL; + #[cfg(test)] mod test_utils; diff --git a/nexus/db-queries/src/db/pool_connection.rs b/nexus/db-queries/src/db/pool_connection.rs index d9c50ff26c..66fb125a7c 100644 --- a/nexus/db-queries/src/db/pool_connection.rs +++ b/nexus/db-queries/src/db/pool_connection.rs @@ -78,7 +78,7 @@ static CUSTOM_TYPE_KEYS: &'static [&'static str] = &[ ]; const CUSTOM_TYPE_SCHEMA: &'static str = "public"; -const DISALLOW_FULL_TABLE_SCAN_SQL: &str = +pub const DISALLOW_FULL_TABLE_SCAN_SQL: &str = "set disallow_full_table_scans = on; set large_full_scan_rows = 0;"; #[derive(Debug)] diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index 2d496fcd8e..1c23f1b842 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -10,6 +10,7 @@ use nexus_db_model::schema::SCHEMA_VERSION as LATEST_SCHEMA_VERSION; use nexus_db_queries::db::datastore::{ all_sql_for_version_migration, EARLIEST_SUPPORTED_VERSION, }; +use nexus_db_queries::db::DISALLOW_FULL_TABLE_SCAN_SQL; use nexus_test_utils::{db, load_test_config, ControlPlaneTestContextBuilder}; use omicron_common::api::external::SemverVersion; use omicron_common::api::internal::shared::SwitchLocation; @@ -118,6 +119,11 @@ async fn apply_update( let client = crdb.connect().await.expect("failed to connect"); + client + .batch_execute(DISALLOW_FULL_TABLE_SCAN_SQL) + .await + .expect("failed to disallow full table scans"); + // We skip this for the earliest supported version because these tables // might not exist yet. if version != EARLIEST_SUPPORTED_VERSION { diff --git a/schema/crdb/35.0.0/up.sql b/schema/crdb/35.0.0/up.sql new file mode 100644 index 0000000000..f50e8dc881 --- /dev/null +++ b/schema/crdb/35.0.0/up.sql @@ -0,0 +1,6 @@ +-- This isn't realy a schema migration, but is instead a one-off fix for +-- incorrect data (https://github.com/oxidecomputer/omicron/issues/5056) after +-- the root cause for the incorrect data has been addressed. +UPDATE omicron.public.network_interface + SET slot = 0 + WHERE kind = 'service' AND time_deleted IS NULL; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 254080e92d..6c82b63e6e 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -3515,7 +3515,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '34.0.0', NULL) + ( TRUE, NOW(), NOW(), '35.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; From e2f5eea78d0c3b59a117a3bc78bb4635ce740d94 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Fri, 16 Feb 2024 12:50:04 -0500 Subject: [PATCH 009/157] Fix typo in printing blueprint diff (#5087) This seems weird! :) ``` zone faa4010d-9979-47d3-b104-7c269def8cbb type crucible underlay IP fd00:1122:3344:101::c (unchanged) zone fb81a9cc-267e-48d8-bdb3-32dc6225a04f type crucible underlay IP fd00:1122:3344:101::8 (unchanged) + zone d86f17d2-e1a5-4744-85c7-8bb3d989ac76 type fd00:1122:3344:101::21 underlay IP nexus (added) ``` --- nexus/types/src/deployment.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 06427507d5..490097d46d 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -615,8 +615,8 @@ impl<'a> std::fmt::Display for OmicronZonesDiff<'a> { f, "+ zone {} type {} underlay IP {} (added)", zone.id, - zone.underlay_address, zone.zone_type.label(), + zone.underlay_address, )?; } } From cd901535e9a1075bdcf1f99b257cce2923ce0fb2 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Fri, 16 Feb 2024 10:06:52 -0800 Subject: [PATCH 010/157] chore(deps): update rust crate clap to 4.5 (#5083) --- Cargo.lock | 144 ++++++++++++++++---------------------- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 14 ++-- 3 files changed, 64 insertions(+), 96 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fca461f24b..b339054e14 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.5.0" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f58811cfac344940f1a400b6e6231ce35171f614f26439e80f8c1465c5cc0c" +checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" dependencies = [ "anstyle", "anstyle-parse", @@ -150,12 +150,12 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "2.1.0" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58f54d10c6dfa51283a066ceab3ec1ab78d13fae00aa49243a45e4571fb79dfd" +checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" dependencies = [ "anstyle", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -332,7 +332,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc7b2dbe9169059af0f821e811180fddc971fc210c776c133c7819ccd6e478db" dependencies = [ - "rustix 0.38.31", + "rustix", "tempfile", "windows-sys 0.52.0", ] @@ -988,9 +988,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.3" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84ed82781cea27b43c9b106a979fe450a13a31aab0500595fb3fc06616de08e6" +checksum = "80c21025abd42669a92efc996ef13cfb2c5c627858421ea58d5c3b331a6c134f" dependencies = [ "clap_builder", "clap_derive", @@ -998,22 +998,22 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.2" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bb9faaa7c2ef94b2743a21f5a29e6f0010dff4caa69ac8e9d6cf8b6fa74da08" +checksum = "458bf1f341769dfcf849846f65dffdf9146daa56bcd2a47cb4e1de9915567c99" dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.10.0", + "strsim 0.11.0", "terminal_size", ] [[package]] name = "clap_derive" -version = "4.4.2" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873" +checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" dependencies = [ "heck 0.4.1", "proc-macro2", @@ -1023,9 +1023,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.5.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "clipboard-win" @@ -1201,7 +1201,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.4.3", + "clap 4.5.0", "criterion-plot", "futures", "is-terminal", @@ -1852,7 +1852,7 @@ dependencies = [ "anyhow", "camino", "chrono", - "clap 4.4.3", + "clap 4.5.0", "dns-service-client", "dropshot", "expectorate", @@ -2268,7 +2268,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef033ed5e9bad94e55838ca0ca906db0e043f517adda0c8b79c7a8c66c93c1b5" dependencies = [ "cfg-if", - "rustix 0.38.31", + "rustix", "windows-sys 0.48.0", ] @@ -2279,7 +2279,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e5768da2206272c81ef0b5e951a41862938a6070da63bcea197899942d3b947" dependencies = [ "cfg-if", - "rustix 0.38.31", + "rustix", "windows-sys 0.52.0", ] @@ -2557,7 +2557,7 @@ name = "gateway-cli" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.4.3", + "clap 4.5.0", "futures", "gateway-client", "gateway-messages", @@ -3462,7 +3462,7 @@ dependencies = [ "bytes", "camino", "cancel-safe-futures", - "clap 4.4.3", + "clap 4.5.0", "ddm-admin-client", "display-error-chain", "futures", @@ -3522,7 +3522,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "clap 4.4.3", + "clap 4.5.0", "dropshot", "expectorate", "hyper 0.14.27", @@ -3603,7 +3603,7 @@ name = "internal-dns-cli" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.4.3", + "clap 4.5.0", "dropshot", "internal-dns", "omicron-common", @@ -3613,17 +3613,6 @@ dependencies = [ "trust-dns-resolver", ] -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi 0.3.2", - "libc", - "windows-sys 0.48.0", -] - [[package]] name = "ipcc" version = "0.1.0" @@ -3675,7 +3664,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.2", - "rustix 0.38.31", + "rustix", "windows-sys 0.48.0", ] @@ -3883,7 +3872,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d8de370f98a6cb8a4606618e53e802f93b094ddec0f96988eaec2c27e6e9ce7" dependencies = [ - "clap 4.4.3", + "clap 4.5.0", "termcolor", "threadpool", ] @@ -3911,12 +3900,6 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" -[[package]] -name = "linux-raw-sys" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - [[package]] name = "linux-raw-sys" version = "0.4.13" @@ -3945,7 +3928,7 @@ version = "0.2.4" source = "git+https://github.com/oxidecomputer/lpc55_support#96f064eaae5e95930efaab6c29fd1b2e22225dac" dependencies = [ "bitfield", - "clap 4.4.3", + "clap 4.5.0", "packed_struct", "serde", ] @@ -4925,7 +4908,7 @@ dependencies = [ "anyhow", "camino", "camino-tempfile", - "clap 4.4.3", + "clap 4.5.0", "dropshot", "expectorate", "futures", @@ -4958,7 +4941,7 @@ dependencies = [ "anyhow", "base64", "camino", - "clap 4.4.3", + "clap 4.5.0", "dropshot", "expectorate", "futures", @@ -5009,7 +4992,7 @@ dependencies = [ "camino-tempfile", "cancel-safe-futures", "chrono", - "clap 4.4.3", + "clap 4.5.0", "criterion", "crucible-agent-client", "crucible-pantry-client", @@ -5119,7 +5102,7 @@ dependencies = [ "anyhow", "async-bb8-diesel", "chrono", - "clap 4.4.3", + "clap 4.5.0", "crossterm", "crucible-agent-client", "csv", @@ -5170,7 +5153,7 @@ version = "0.1.0" dependencies = [ "anyhow", "camino", - "clap 4.4.3", + "clap 4.5.0", "expectorate", "futures", "hex", @@ -5237,7 +5220,7 @@ dependencies = [ "cancel-safe-futures", "cfg-if", "chrono", - "clap 4.4.3", + "clap 4.5.0", "crucible-agent-client", "ddm-admin-client", "derive_more", @@ -5373,7 +5356,7 @@ dependencies = [ "bytes", "chrono", "cipher", - "clap 4.4.3", + "clap 4.5.0", "clap_builder", "console", "const-oid", @@ -5387,7 +5370,6 @@ dependencies = [ "dof 0.3.0", "either", "elliptic-curve", - "errno", "ff", "flate2", "futures", @@ -5436,7 +5418,7 @@ dependencies = [ "regex-syntax 0.8.2", "reqwest", "ring 0.17.7", - "rustix 0.38.31", + "rustix", "schemars", "semver 1.0.21", "serde", @@ -5750,7 +5732,7 @@ dependencies = [ "anyhow", "camino", "chrono", - "clap 4.4.3", + "clap 4.5.0", "dropshot", "expectorate", "futures", @@ -5793,7 +5775,7 @@ dependencies = [ "bytes", "camino", "chrono", - "clap 4.4.3", + "clap 4.5.0", "dropshot", "expectorate", "futures", @@ -5863,7 +5845,7 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", - "clap 4.4.3", + "clap 4.5.0", "dropshot", "nexus-client", "omicron-common", @@ -5885,7 +5867,7 @@ dependencies = [ "anyhow", "camino", "chrono", - "clap 4.4.3", + "clap 4.5.0", "omicron-workspace-hack", "uuid", ] @@ -6689,7 +6671,7 @@ dependencies = [ "anyhow", "atty", "base64", - "clap 4.4.3", + "clap 4.5.0", "dropshot", "futures", "hyper 0.14.27", @@ -7429,20 +7411,6 @@ dependencies = [ "toolchain_find", ] -[[package]] -name = "rustix" -version = "0.37.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys 0.48.0", -] - [[package]] name = "rustix" version = "0.38.31" @@ -7452,7 +7420,7 @@ dependencies = [ "bitflags 2.4.0", "errno", "libc", - "linux-raw-sys 0.4.13", + "linux-raw-sys", "windows-sys 0.52.0", ] @@ -8465,7 +8433,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "clap 4.4.3", + "clap 4.5.0", "dropshot", "futures", "gateway-messages", @@ -8655,6 +8623,12 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "strsim" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" + [[package]] name = "structmeta" version = "0.2.0" @@ -8952,7 +8926,7 @@ checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" dependencies = [ "cfg-if", "fastrand", - "rustix 0.38.31", + "rustix", "windows-sys 0.52.0", ] @@ -8978,11 +8952,11 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.2.6" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6bf6f19e9f8ed8d4048dc22981458ebcf406d67e94cd422e5ecd73d63b3237" +checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ - "rustix 0.37.23", + "rustix", "windows-sys 0.48.0", ] @@ -9643,7 +9617,7 @@ dependencies = [ "assert_cmd", "camino", "chrono", - "clap 4.4.3", + "clap 4.5.0", "console", "datatest-stable", "fs-err", @@ -9889,7 +9863,7 @@ dependencies = [ "camino", "camino-tempfile", "chrono", - "clap 4.4.3", + "clap 4.5.0", "debug-ignore", "display-error-chain", "dropshot", @@ -9920,7 +9894,7 @@ dependencies = [ "camino", "camino-tempfile", "cancel-safe-futures", - "clap 4.4.3", + "clap 4.5.0", "debug-ignore", "derive-where", "either", @@ -10334,7 +10308,7 @@ dependencies = [ "buf-list", "camino", "ciborium", - "clap 4.4.3", + "clap 4.5.0", "crossterm", "futures", "humantime", @@ -10395,7 +10369,7 @@ dependencies = [ "bytes", "camino", "ciborium", - "clap 4.4.3", + "clap 4.5.0", "crossterm", "omicron-workspace-hack", "reedline", @@ -10420,7 +10394,7 @@ dependencies = [ "bytes", "camino", "camino-tempfile", - "clap 4.4.3", + "clap 4.5.0", "ddm-admin-client", "debug-ignore", "display-error-chain", @@ -10737,7 +10711,7 @@ dependencies = [ "camino", "cargo_metadata", "cargo_toml", - "clap 4.4.3", + "clap 4.5.0", ] [[package]] @@ -10870,7 +10844,7 @@ name = "zone-network-setup" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.4.3", + "clap 4.5.0", "dropshot", "illumos-utils", "omicron-common", diff --git a/Cargo.toml b/Cargo.toml index f71bd77730..b140b3946a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -177,7 +177,7 @@ chacha20poly1305 = "0.10.1" ciborium = "0.2.2" cfg-if = "1.0" chrono = { version = "0.4", features = [ "serde" ] } -clap = { version = "4.4", features = ["cargo", "derive", "env", "wrap_help"] } +clap = { version = "4.5", features = ["cargo", "derive", "env", "wrap_help"] } cookie = "0.18" criterion = { version = "0.5.1", features = [ "async_tokio" ] } crossbeam = "0.8" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 098be26460..28338ffce8 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -28,8 +28,8 @@ byteorder = { version = "1.5.0" } bytes = { version = "1.5.0", features = ["serde"] } chrono = { version = "0.4.31", features = ["alloc", "serde"] } cipher = { version = "0.4.4", default-features = false, features = ["block-padding", "zeroize"] } -clap = { version = "4.4.3", features = ["cargo", "derive", "env", "wrap_help"] } -clap_builder = { version = "4.4.2", default-features = false, features = ["cargo", "color", "env", "std", "suggestions", "usage", "wrap_help"] } +clap = { version = "4.5.0", features = ["cargo", "derive", "env", "wrap_help"] } +clap_builder = { version = "4.5.0", default-features = false, features = ["cargo", "color", "env", "std", "suggestions", "usage", "wrap_help"] } console = { version = "0.15.8" } const-oid = { version = "0.9.5", default-features = false, features = ["db", "std"] } crossbeam-epoch = { version = "0.9.15" } @@ -134,8 +134,8 @@ byteorder = { version = "1.5.0" } bytes = { version = "1.5.0", features = ["serde"] } chrono = { version = "0.4.31", features = ["alloc", "serde"] } cipher = { version = "0.4.4", default-features = false, features = ["block-padding", "zeroize"] } -clap = { version = "4.4.3", features = ["cargo", "derive", "env", "wrap_help"] } -clap_builder = { version = "4.4.2", default-features = false, features = ["cargo", "color", "env", "std", "suggestions", "usage", "wrap_help"] } +clap = { version = "4.5.0", features = ["cargo", "derive", "env", "wrap_help"] } +clap_builder = { version = "4.5.0", default-features = false, features = ["cargo", "color", "env", "std", "suggestions", "usage", "wrap_help"] } console = { version = "0.15.8" } const-oid = { version = "0.9.5", default-features = false, features = ["db", "std"] } crossbeam-epoch = { version = "0.9.15" } @@ -242,28 +242,24 @@ rustix = { version = "0.38.31", features = ["fs", "termios"] } [target.x86_64-apple-darwin.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -errno = { version = "0.3.8", default-features = false, features = ["std"] } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } [target.x86_64-apple-darwin.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -errno = { version = "0.3.8", default-features = false, features = ["std"] } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } [target.aarch64-apple-darwin.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -errno = { version = "0.3.8", default-features = false, features = ["std"] } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } [target.aarch64-apple-darwin.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -errno = { version = "0.3.8", default-features = false, features = ["std"] } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } @@ -271,7 +267,6 @@ rustix = { version = "0.38.31", features = ["fs", "termios"] } [target.x86_64-unknown-illumos.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } dof = { version = "0.3.0", default-features = false, features = ["des"] } -errno = { version = "0.3.8", default-features = false, features = ["std"] } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } @@ -281,7 +276,6 @@ toml_edit = { version = "0.19.15", features = ["serde"] } [target.x86_64-unknown-illumos.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } dof = { version = "0.3.0", default-features = false, features = ["des"] } -errno = { version = "0.3.8", default-features = false, features = ["std"] } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } From 25eb244f6a05691e456838e2da2fb985c72a5a52 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Fri, 16 Feb 2024 19:09:23 +0000 Subject: [PATCH 011/157] chore(deps): update rust crate either to 1.10.0 (#5084) --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b339054e14..eeca299eff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2094,9 +2094,9 @@ dependencies = [ [[package]] name = "either" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" [[package]] name = "elliptic-curve" diff --git a/Cargo.toml b/Cargo.toml index b140b3946a..abcd0db4f1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -201,7 +201,7 @@ dns-service-client = { path = "clients/dns-service-client" } dpd-client = { path = "clients/dpd-client" } dropshot = { git = "https://github.com/oxidecomputer/dropshot", branch = "main", features = [ "usdt-probes" ] } dyn-clone = "1.0.16" -either = "1.9.0" +either = "1.10.0" expectorate = "1.1.0" fatfs = "0.3.6" filetime = "0.2.23" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 28338ffce8..6b7096b8fe 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -39,7 +39,7 @@ crypto-common = { version = "0.1.6", default-features = false, features = ["getr der = { version = "0.7.8", default-features = false, features = ["derive", "flagset", "oid", "pem", "std"] } diesel = { version = "2.1.4", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } digest = { version = "0.10.7", features = ["mac", "oid", "std"] } -either = { version = "1.9.0" } +either = { version = "1.10.0" } elliptic-curve = { version = "0.13.8", features = ["ecdh", "hazmat", "pem", "std"] } ff = { version = "0.13.0", default-features = false, features = ["alloc"] } flate2 = { version = "1.0.28" } @@ -145,7 +145,7 @@ crypto-common = { version = "0.1.6", default-features = false, features = ["getr der = { version = "0.7.8", default-features = false, features = ["derive", "flagset", "oid", "pem", "std"] } diesel = { version = "2.1.4", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } digest = { version = "0.10.7", features = ["mac", "oid", "std"] } -either = { version = "1.9.0" } +either = { version = "1.10.0" } elliptic-curve = { version = "0.13.8", features = ["ecdh", "hazmat", "pem", "std"] } ff = { version = "0.13.0", default-features = false, features = ["alloc"] } flate2 = { version = "1.0.28" } From 9e9e60c7f6456c721348e9edb1b432d310e4b7aa Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Fri, 16 Feb 2024 12:43:26 -0800 Subject: [PATCH 012/157] chore(deps): update rust crate toml_edit to 0.22.6 (#4996) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 33 ++++++++++++++++----------------- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 6 ++++-- 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eeca299eff..ecdd8c235e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5442,6 +5442,7 @@ dependencies = [ "toml 0.7.8", "toml_datetime", "toml_edit 0.19.15", + "toml_edit 0.22.6", "tracing", "trust-dns-proto", "unicode-bidi", @@ -9338,7 +9339,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.4", + "toml_edit 0.22.6", ] [[package]] @@ -9360,31 +9361,20 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.5.15", ] [[package]] name = "toml_edit" -version = "0.21.1" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" -dependencies = [ - "indexmap 2.2.3", - "toml_datetime", - "winnow", -] - -[[package]] -name = "toml_edit" -version = "0.22.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9ffdf896f8daaabf9b66ba8e77ea1ed5ed0f72821b398aba62352e95062951" +checksum = "2c1b5fd4128cc8d3e0cb74d4ed9a9cc7c7284becd4df68f5f940e1ad123606f6" dependencies = [ "indexmap 2.2.3", "serde", "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.6.1", ] [[package]] @@ -10337,7 +10327,7 @@ dependencies = [ "tokio", "tokio-util", "toml 0.8.10", - "toml_edit 0.21.1", + "toml_edit 0.22.6", "tui-tree-widget", "unicode-width", "update-engine", @@ -10664,6 +10654,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "winnow" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d90f4e0f530c4c69f62b80d839e9ef3855edc9cba471a160c4d692deed62b401" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.50.0" diff --git a/Cargo.toml b/Cargo.toml index abcd0db4f1..f15170e2e1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -397,7 +397,7 @@ tokio-stream = "0.1.14" tokio-tungstenite = "0.20" tokio-util = { version = "0.7.10", features = ["io", "io-util"] } toml = "0.8.10" -toml_edit = "0.21.1" +toml_edit = "0.22.6" tough = { version = "0.16.0", features = [ "http" ] } trust-dns-client = "0.22" trust-dns-proto = "0.22" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 6b7096b8fe..c204b56b4d 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -107,6 +107,7 @@ tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serd tokio-stream = { version = "0.1.14", features = ["net"] } tokio-util = { version = "0.7.10", features = ["codec", "io-util"] } toml = { version = "0.7.8" } +toml_edit-3c51e837cfc5589a = { package = "toml_edit", version = "0.22.6", features = ["serde"] } tracing = { version = "0.1.37", features = ["log"] } trust-dns-proto = { version = "0.22.0" } unicode-bidi = { version = "0.3.13" } @@ -214,6 +215,7 @@ tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serd tokio-stream = { version = "0.1.14", features = ["net"] } tokio-util = { version = "0.7.10", features = ["codec", "io-util"] } toml = { version = "0.7.8" } +toml_edit-3c51e837cfc5589a = { package = "toml_edit", version = "0.22.6", features = ["serde"] } tracing = { version = "0.1.37", features = ["log"] } trust-dns-proto = { version = "0.22.0" } unicode-bidi = { version = "0.3.13" } @@ -271,7 +273,7 @@ mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } -toml_edit = { version = "0.19.15", features = ["serde"] } +toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] } [target.x86_64-unknown-illumos.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } @@ -280,6 +282,6 @@ mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } -toml_edit = { version = "0.19.15", features = ["serde"] } +toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] } ### END HAKARI SECTION From 47a416fe1ee5fcf9ade8788fabf4195369d1b9b3 Mon Sep 17 00:00:00 2001 From: bnaecker Date: Fri, 16 Feb 2024 13:31:23 -0800 Subject: [PATCH 013/157] Order `NextItem` subquery results predictably (#5067) - Fixes #5055 - Add a new column, `index` to the `NextItem` subquery, which indexes the shifts from 0..n. - Add an `ORDER BY index` clause to guarantee order. - Add test --- nexus/db-queries/src/db/queries/disk.rs | 11 +- .../src/db/queries/network_interface.rs | 104 +++++-- nexus/db-queries/src/db/queries/next_item.rs | 262 ++++++++++++++++-- nexus/db-queries/src/db/queries/vpc.rs | 8 +- 4 files changed, 331 insertions(+), 54 deletions(-) diff --git a/nexus/db-queries/src/db/queries/disk.rs b/nexus/db-queries/src/db/queries/disk.rs index 9fd56c3ce8..dc1a31dd01 100644 --- a/nexus/db-queries/src/db/queries/disk.rs +++ b/nexus/db-queries/src/db/queries/disk.rs @@ -46,11 +46,12 @@ struct NextDiskSlot { impl NextDiskSlot { fn new(instance_id: Uuid) -> Self { - let generator = DefaultShiftGenerator { - base: 0, - max_shift: i64::try_from(MAX_DISKS_PER_INSTANCE).unwrap(), - min_shift: 0, - }; + let generator = DefaultShiftGenerator::new( + 0, + i64::try_from(MAX_DISKS_PER_INSTANCE).unwrap(), + 0, + ) + .expect("invalid min/max shift"); Self { inner: NextItem::new_scoped(generator, instance_id) } } } diff --git a/nexus/db-queries/src/db/queries/network_interface.rs b/nexus/db-queries/src/db/queries/network_interface.rs index 0643089316..a22c80b232 100644 --- a/nexus/db-queries/src/db/queries/network_interface.rs +++ b/nexus/db-queries/src/db/queries/network_interface.rs @@ -516,8 +516,8 @@ impl NextIpv4Address { let subnet = IpNetwork::from(subnet); let net = IpNetwork::from(first_available_address(&subnet)); let max_shift = i64::from(last_address_offset(&subnet)); - let generator = - DefaultShiftGenerator { base: net, max_shift, min_shift: 0 }; + let generator = DefaultShiftGenerator::new(net, max_shift, 0) + .expect("invalid min/max shift"); Self { inner: NextItem::new_scoped(generator, subnet_id) } } } @@ -575,12 +575,13 @@ pub struct NextNicSlot { impl NextNicSlot { pub fn new(parent_id: Uuid) -> Self { - let generator = DefaultShiftGenerator { - base: 0, - max_shift: i64::try_from(MAX_NICS_PER_INSTANCE) + let generator = DefaultShiftGenerator::new( + 0, + i64::try_from(MAX_NICS_PER_INSTANCE) .expect("Too many network interfaces"), - min_shift: 0, - }; + 0, + ) + .expect("invalid min/max shift"); Self { inner: NextItem::new_scoped(generator, parent_id) } } } @@ -607,25 +608,62 @@ pub struct NextMacAddress { >, } +// Helper to ensure we correctly compute the min/max shifts for a next MAC +// query. +#[derive(Copy, Clone, Debug)] +struct NextMacShifts { + base: MacAddr, + min_shift: i64, + max_shift: i64, +} + +impl NextMacShifts { + fn for_guest() -> Self { + let base = MacAddr::random_guest(); + Self::shifts_for(base, MacAddr::MIN_GUEST_ADDR, MacAddr::MAX_GUEST_ADDR) + } + + fn for_system() -> NextMacShifts { + let base = MacAddr::random_system(); + Self::shifts_for( + base, + MacAddr::MIN_SYSTEM_ADDR, + MacAddr::MAX_SYSTEM_ADDR, + ) + } + + fn shifts_for(base: MacAddr, min: i64, max: i64) -> NextMacShifts { + let x = base.to_i64(); + + // The max shift is the distance to the last value. This min shift is + // always expressed as a negative number, giving the largest leftward + // shift, i.e., the distance to the first value. + let max_shift = max - x; + let min_shift = min - x; + Self { base, min_shift, max_shift } + } +} + impl NextMacAddress { pub fn new(vpc_id: Uuid, kind: NetworkInterfaceKind) -> Self { let (base, max_shift, min_shift) = match kind { NetworkInterfaceKind::Instance => { - let base = MacAddr::random_guest(); - let x = base.to_i64(); - let max_shift = MacAddr::MAX_GUEST_ADDR - x; - let min_shift = x - MacAddr::MIN_GUEST_ADDR; + let NextMacShifts { base, min_shift, max_shift } = + NextMacShifts::for_guest(); (base.into(), max_shift, min_shift) } NetworkInterfaceKind::Service => { - let base = MacAddr::random_system(); - let x = base.to_i64(); - let max_shift = MacAddr::MAX_SYSTEM_ADDR - x; - let min_shift = x - MacAddr::MAX_SYSTEM_ADDR; + let NextMacShifts { base, min_shift, max_shift } = + NextMacShifts::for_system(); (base.into(), max_shift, min_shift) } }; - let generator = DefaultShiftGenerator { base, max_shift, min_shift }; + let generator = DefaultShiftGenerator::new(base, max_shift, min_shift) + .unwrap_or_else(|| { + panic!( + "invalid min shift ({min_shift}) or max_shift ({max_shift})" + ) + }); Self { inner: NextItem::new_scoped(generator, vpc_id) } } } @@ -1713,6 +1751,7 @@ mod tests { use crate::db::model::NetworkInterface; use crate::db::model::Project; use crate::db::model::VpcSubnet; + use crate::db::queries::network_interface::NextMacShifts; use async_bb8_diesel::AsyncRunQueryDsl; use dropshot::test_util::LogContext; use ipnetwork::Ipv4Network; @@ -2801,4 +2840,37 @@ mod tests { "fd00::5".parse::().unwrap(), ); } + + #[test] + fn test_next_mac_shifts_for_system() { + let NextMacShifts { base, min_shift, max_shift } = + NextMacShifts::for_system(); + assert!(base.is_system()); + assert!( + min_shift <= 0, + "expected min shift to be negative, found {min_shift}" + ); + assert!(max_shift >= 0, "found {max_shift}"); + let x = base.to_i64(); + assert_eq!(x + min_shift, MacAddr::MIN_SYSTEM_ADDR); + assert_eq!(x + max_shift, MacAddr::MAX_SYSTEM_ADDR); + } + + #[test] + fn test_next_mac_shifts_for_guest() { + let NextMacShifts { base, min_shift, max_shift } = + NextMacShifts::for_guest(); + assert!(base.is_guest()); + assert!( + min_shift <= 0, + "expected min shift to be negative, found {min_shift}" + ); + assert!( + max_shift >= 0, + "expected max shift to be positive, found {max_shift}" + ); + let x = base.to_i64(); + assert_eq!(x + min_shift, MacAddr::MIN_GUEST_ADDR); + assert_eq!(x + max_shift, MacAddr::MAX_GUEST_ADDR); + } } diff --git a/nexus/db-queries/src/db/queries/next_item.rs b/nexus/db-queries/src/db/queries/next_item.rs index 007aec943d..769c891349 100644 --- a/nexus/db-queries/src/db/queries/next_item.rs +++ b/nexus/db-queries/src/db/queries/next_item.rs @@ -35,7 +35,7 @@ use uuid::Uuid; /// SELECT /// + shift AS ip /// FROM -/// generate_series(0, ) AS shift +/// generate_series(0, ) AS shift, /// LEFT OUTER JOIN /// network_interface /// ON @@ -43,21 +43,22 @@ use uuid::Uuid; /// (, + shift, TRUE) /// WHERE /// ip IS NULL +/// ORDER BY ip /// LIMIT 1 /// ``` /// -/// This query selects the lowest address in the IP subnet that's not already -/// allocated to a guest interface. Note that the query is linear in the number -/// of _allocated_ guest addresses. and are chosen -/// based on the subnet and its size, and take into account reserved IP -/// addresses (such as the broadcast address). +/// This query selects the next address after in the IP subnet +/// that's not already allocated to a guest interface. Note that the query is +/// linear in the number of _allocated_ guest addresses. and +/// are chosen based on the subnet and its size, and take into +/// account reserved IP addresses (such as the broadcast address). /// /// General query structure /// ----------------------- /// /// Much of the value of this type comes from the ability to specify the /// starting point for a scan for the next item. In the case above, of an IP -/// address for a guest NIC, we always try to allocate the lowest available +/// address for a guest NIC, we always try to allocate the next available /// address. This implies the search is linear in the number of allocated IP /// addresses, but that runtime cost is acceptable for a few reasons. First, the /// predictability of the addresses is nice. Second, the subnets can generally @@ -81,16 +82,16 @@ use uuid::Uuid; /// FROM /// ( /// SELECT -/// shift +/// "index", shift /// FROM -/// generate_series(0, ) -/// AS shift +/// generate_series(0, ) AS "index" +/// generate_series(0, ) AS shift, /// UNION ALL /// SELECT /// shift /// FROM -/// generate_series(, -1) -/// AS shift +/// generate_series(, ) AS "index" +/// generate_series(, -1) AS shift, /// LEFT OUTER JOIN /// /// ON @@ -98,6 +99,7 @@ use uuid::Uuid; /// (, + shift, TRUE) /// WHERE /// IS NULL +/// ORDER BY "index" /// LIMIT 1 /// ``` /// @@ -120,6 +122,18 @@ use uuid::Uuid; /// +------------------------------------------------+ /// ``` /// +/// Ordering +/// -------- +/// +/// The subquery is designed to select the next address _after_ the provided +/// base. To preserve this behavior even in situations where we wrap around the +/// end of the range, we include the `index` column when generating the shifts, +/// and order the result by that column. +/// +/// The CockroachDB docs on [ordering] specify that queries without an explicit +/// `ORDER BY` clause are returned "as the coordinating nodes receives them." +/// Without this clause, the order is non-deterministic. +/// /// Shift generators /// ---------------- /// @@ -146,6 +160,8 @@ use uuid::Uuid; /// there is no scope, which means the items must be globally unique in the /// entire table (among non-deleted items). The query is structured slightly /// differently in these two cases. +/// +/// [ordering]: https://www.cockroachlabs.com/docs/stable/select-clause#sorting-and-limiting-query-results #[derive(Debug, Clone, Copy)] pub(super) struct NextItem< Table, @@ -220,6 +236,9 @@ where } } +const SHIFT_COLUMN_IDENT: &str = "shift"; +const INDEX_COLUMN_IDENT: &str = "index"; + impl QueryFragment for NextItem @@ -262,7 +281,7 @@ where self.shift_generator.base(), )?; out.push_sql(" + "); - out.push_identifier("shift")?; + out.push_identifier(SHIFT_COLUMN_IDENT)?; out.push_sql(", TRUE) "); push_next_item_where_clause::(out.reborrow()) @@ -303,7 +322,7 @@ where self.shift_generator.base(), )?; out.push_sql(" + "); - out.push_identifier("shift")?; + out.push_identifier(SHIFT_COLUMN_IDENT)?; out.push_sql(", TRUE) "); push_next_item_where_clause::(out.reborrow()) @@ -338,7 +357,7 @@ where shift_generator.base(), )?; out.push_sql(" + "); - out.push_identifier("shift")?; + out.push_identifier(SHIFT_COLUMN_IDENT)?; out.push_sql(" AS "); out.push_identifier(ItemColumn::NAME)?; out.push_sql(" FROM ("); @@ -350,7 +369,7 @@ where // Push the final where clause shared by scoped and unscoped next item queries. // // ```sql -// WHERE IS NULL LIMIT 1 +// WHERE IS NULL ORDER BY "index" LIMIT 1 // ``` fn push_next_item_where_clause( mut out: AstPass, @@ -363,7 +382,9 @@ where { out.push_sql(" WHERE "); out.push_identifier(ItemColumn::NAME)?; - out.push_sql(" IS NULL LIMIT 1"); + out.push_sql(" IS NULL ORDER BY "); + out.push_identifier(INDEX_COLUMN_IDENT)?; + out.push_sql(" LIMIT 1"); Ok(()) } @@ -414,17 +435,24 @@ pub trait ShiftGenerator { /// Return the minimum shift from the base item for the scan. fn min_shift(&self) -> &i64; + /// Return the indices of the generated shifts for the scan. + fn shift_indices(&self) -> &ShiftIndices; + /// Insert the part of the query represented by the shift of items into the /// provided AstPass. /// /// The default implementation pushes: /// /// ```sql - /// SELECT generate_series(0, ) AS shift + /// SELECT + /// generate_series(0, ) as "index" + /// generate_series(0, ) AS shift, /// UNION ALL - /// SELECT generate_series(, -1) AS shift + /// SELECT + /// generate_series(, ) as "index" + /// generate_series(, -1) AS shift, /// ``` - fn walk_ast<'a, Table, ItemColumn>( + fn walk_ast<'a, 'b, Table, ItemColumn>( &'a self, mut out: AstPass<'_, 'a, Pg>, ) -> diesel::QueryResult<()> @@ -434,15 +462,83 @@ pub trait ShiftGenerator { Item: ToSql<::SqlType, Pg> + Copy, ItemColumn: Column
+ Copy, Pg: HasSqlType<::SqlType>, + 'a: 'b, { out.push_sql("SELECT generate_series(0, "); + out.push_bind_param::( + self.shift_indices().first_end(), + )?; + out.push_sql(") AS "); + out.push_identifier(INDEX_COLUMN_IDENT)?; + out.push_sql(", generate_series(0, "); out.push_bind_param::(self.max_shift())?; out.push_sql(") AS "); - out.push_identifier("shift")?; + out.push_identifier(SHIFT_COLUMN_IDENT)?; out.push_sql(" UNION ALL SELECT generate_series("); + out.push_bind_param::( + self.shift_indices().second_start(), + )?; + out.push_sql(", "); + out.push_bind_param::( + self.shift_indices().second_end(), + )?; + out.push_sql(") AS "); + out.push_identifier(INDEX_COLUMN_IDENT)?; + out.push_sql(", generate_series("); out.push_bind_param::(self.min_shift())?; out.push_sql(", -1) AS "); - out.push_identifier("shift") + out.push_identifier(SHIFT_COLUMN_IDENT) + } +} + +/// Helper to compute the range of the _index_ column, used to predictably sort +/// the generated items. +/// +/// This type cannot be created directly, it's generated internally by creating +/// a `DefaultShiftGenerator`. +// NOTE: This type mostly exists to satisfy annoying lifetime constraints +// imposed by Diesel's `AstPass`. One can only push bind parameters that outlive +// the AST pass itself, so you cannot push owned values or even references to +// values generated anywhere within the `walk_ast()` implementation; +#[derive(Copy, Clone, Debug)] +pub struct ShiftIndices { + // The end of the first range. + first_end: i64, + // The start of the second range. + second_start: i64, + // The end of the second range. + // + // This is equal to the number of items generated for the range. + second_end: i64, +} + +impl ShiftIndices { + fn new(max_shift: i64, min_shift: i64) -> Self { + assert!(max_shift >= 0); + assert!(min_shift <= 0); + + // We're just generating the list of indices (0, n_items), but we need + // to split it in the middle. Specifically, we'll split it at + // `max_shift`, and then generate the remainder. + let first_end = max_shift; + let second_start = first_end + 1; + let second_end = max_shift - min_shift; + Self { first_end, second_start, second_end } + } + + /// Return the end of the first set of indices. + pub fn first_end(&self) -> &i64 { + &self.first_end + } + + /// Return the start of the second set of indices. + pub fn second_start(&self) -> &i64 { + &self.second_start + } + + /// Return the end of the second set of indices. + pub fn second_end(&self) -> &i64 { + &self.second_end } } @@ -450,9 +546,24 @@ pub trait ShiftGenerator { /// implementation. #[derive(Debug, Clone, Copy)] pub struct DefaultShiftGenerator { - pub base: Item, - pub max_shift: i64, - pub min_shift: i64, + base: Item, + max_shift: i64, + min_shift: i64, + shift_indices: ShiftIndices, +} + +impl DefaultShiftGenerator { + /// Create a default generator, checking the provided ranges. + /// + /// Returns `None` if either the max_shift is less than 0, or the min_shift + /// is greater than 0. + pub fn new(base: Item, max_shift: i64, min_shift: i64) -> Option { + if max_shift < 0 || min_shift > 0 { + return None; + } + let shift_indices = ShiftIndices::new(max_shift, min_shift); + Some(Self { base, max_shift, min_shift, shift_indices }) + } } impl ShiftGenerator for DefaultShiftGenerator { @@ -467,6 +578,10 @@ impl ShiftGenerator for DefaultShiftGenerator { fn min_shift(&self) -> &i64 { &self.min_shift } + + fn shift_indices(&self) -> &ShiftIndices { + &self.shift_indices + } } #[cfg(test)] @@ -474,6 +589,7 @@ mod tests { use super::DefaultShiftGenerator; use super::NextItem; + use super::ShiftIndices; use crate::db; use async_bb8_diesel::AsyncRunQueryDsl; use async_bb8_diesel::AsyncSimpleConnection; @@ -602,8 +718,7 @@ mod tests { // // This generator should start at 0, and then select over the range [0, // 10], wrapping back to 0. - let generator = - DefaultShiftGenerator { base: 0, max_shift: 10, min_shift: 0 }; + let generator = DefaultShiftGenerator::new(0, 10, 0).unwrap(); let query = NextItemQuery::new(generator); let it = diesel::insert_into(item::dsl::item) .values(query) @@ -623,8 +738,7 @@ mod tests { assert_eq!(it.value, 1); // Insert 10, and guarantee that we get it back. - let generator = - DefaultShiftGenerator { base: 10, max_shift: 0, min_shift: -10 }; + let generator = DefaultShiftGenerator::new(10, 0, -10).unwrap(); let query = NextItemQuery::new(generator); let it = diesel::insert_into(item::dsl::item) .values(query) @@ -647,4 +761,94 @@ mod tests { db.cleanup().await.unwrap(); logctx.cleanup_successful(); } + + #[tokio::test] + async fn test_next_item_query_is_ordered_by_indices() { + // Setup the test database + let logctx = + dev::test_setup_log("test_next_item_query_is_ordered_by_indices"); + let log = logctx.log.new(o!()); + let mut db = test_setup_database(&log).await; + let cfg = crate::db::Config { url: db.pg_config().clone() }; + let pool = Arc::new(crate::db::Pool::new(&logctx.log, &cfg)); + let conn = pool.pool().get().await.unwrap(); + + // We're going to operate on a separate table, for simplicity. + setup_test_schema(&pool).await; + + // To test ordering behavior, we'll generate a range where the natural + // order of the _items_ differs from their indices. I.e., we have some + // non-zero base. We'll make sure we order everything by those indices, + // not the items themselves. + const MIN_SHIFT: i64 = -5; + const MAX_SHIFT: i64 = 5; + const BASE: i32 = 5; + let generator = + DefaultShiftGenerator::new(BASE, MAX_SHIFT, MIN_SHIFT).unwrap(); + let query = NextItemQuery::new(generator); + + // Insert all items until there are none left. + let first_range = i64::from(BASE)..=i64::from(BASE) + MAX_SHIFT; + let second_range = i64::from(BASE) + MIN_SHIFT..i64::from(BASE); + let mut expected = first_range.chain(second_range); + while let Some(expected_value) = expected.next() { + let it = diesel::insert_into(item::dsl::item) + .values(query) + .returning(Item::as_returning()) + .get_result_async(&*conn) + .await + .unwrap(); + assert_eq!(i64::from(it.value), expected_value); + } + assert!( + expected.next().is_none(), + "Should have exhausted the expected values" + ); + diesel::insert_into(item::dsl::item) + .values(query) + .returning(Item::as_returning()) + .get_result_async(&*conn) + .await + .expect_err( + "The next item query should not have further items to generate", + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[test] + fn test_shift_indices() { + // In this case, we're generating a list of 11 items, all sequential. So + // we want the first set of arguments to `generate_series()` to be 0, + // 10, and the second set 11, 10. That means the first indices will be + // 0..=10 and the second 11..=10, i.e., empty. + let min_shift = 0; + let max_shift = 10; + let indices = ShiftIndices::new(max_shift, min_shift); + assert_eq!(indices.first_end, 10); + assert_eq!(indices.second_start, 11); + assert_eq!(indices.second_end, 10); + + // Here, the list is split in half. We want to still result in a + // sequence 0..=10, split at 5. So the arguments to generate_series() + // should be (0, 5), and (6, 10). + let min_shift = -5; + let max_shift = 5; + let indices = ShiftIndices::new(max_shift, min_shift); + assert_eq!(indices.first_end, 5); + assert_eq!(indices.second_start, 6); + assert_eq!(indices.second_end, 10); + + // This case tests where most the range is _before_ the base, i.e., the + // max shift is zero. Note that this technically still means we have one + // item in the list of available, which is the base itself (at a shift + // of 0). + let min_shift = -10; + let max_shift = 0; + let indices = ShiftIndices::new(max_shift, min_shift); + assert_eq!(indices.first_end, 0); + assert_eq!(indices.second_start, 1); + assert_eq!(indices.second_end, 10); + } } diff --git a/nexus/db-queries/src/db/queries/vpc.rs b/nexus/db-queries/src/db/queries/vpc.rs index c29a51adb0..2875ae6a05 100644 --- a/nexus/db-queries/src/db/queries/vpc.rs +++ b/nexus/db-queries/src/db/queries/vpc.rs @@ -246,8 +246,8 @@ struct NextVni { impl NextVni { fn new(vni: Vni) -> Self { let VniShifts { min_shift, max_shift } = VniShifts::new(vni); - let generator = - DefaultShiftGenerator { base: vni, max_shift, min_shift }; + let generator = DefaultShiftGenerator::new(vni, max_shift, min_shift) + .expect("invalid min/max shift"); let inner = NextItem::new_unscoped(generator); Self { inner } } @@ -262,8 +262,8 @@ impl NextVni { -i32::try_from(base_u32) .expect("Expected a valid VNI at this point"), ); - let generator = - DefaultShiftGenerator { base: vni, max_shift, min_shift }; + let generator = DefaultShiftGenerator::new(vni, max_shift, min_shift) + .expect("invalid min/max shift"); let inner = NextItem::new_unscoped(generator); Self { inner } } From 4e6af7a5b67bb10327189c137a2e9e8584e18f1b Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sat, 17 Feb 2024 04:24:42 +0000 Subject: [PATCH 014/157] chore(deps): update rust crate supports-color to v3 (#4984) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 9 ++++----- Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ecdd8c235e..38a193bedb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3670,9 +3670,9 @@ dependencies = [ [[package]] name = "is_ci" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "616cde7c720bb2bb5824a224687d8f77bfd38922027f01d825cd7453be5099fb" +checksum = "7655c9839580ee829dfacba1d1278c2b7883e50a277ff7541299489d6bdfdc45" [[package]] name = "itertools" @@ -8775,11 +8775,10 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "supports-color" -version = "2.1.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6398cde53adc3c4557306a96ce67b302968513830a77a95b2b17305d9719a89" +checksum = "9829b314621dfc575df4e409e79f9d6a66a3bd707ab73f23cb4aa3a854ac854f" dependencies = [ - "is-terminal", "is_ci", ] diff --git a/Cargo.toml b/Cargo.toml index f15170e2e1..70d8e3ea48 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -377,7 +377,7 @@ static_assertions = "1.1.0" steno = "0.4.0" strum = { version = "0.26", features = [ "derive" ] } subprocess = "0.2.9" -supports-color = "2.1.0" +supports-color = "3.0.0" swrite = "0.1.0" libsw = { version = "3.3.1", features = ["tokio"] } syn = { version = "2.0" } From aa61741059df67a3d6bf9bc0472536191420c210 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sat, 17 Feb 2024 05:15:52 +0000 Subject: [PATCH 015/157] chore(deps): update taiki-e/install-action digest to bd71f12 (#5082) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index c263eecca3..5927cb43e6 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@14422f84f07a2d547893af56258f5e59333262df # v2 + uses: taiki-e/install-action@6943331e01261cdff7420bbc2508cb463574e404 # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From 754dd48946ad3bd85f11497d83a35ae5c2ee1bd5 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sat, 17 Feb 2024 05:16:51 +0000 Subject: [PATCH 016/157] chore(deps): update rust crate multimap to 0.10.0 (#5093) --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 38a193bedb..3b19e5cd95 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4186,9 +4186,9 @@ dependencies = [ [[package]] name = "multimap" -version = "0.8.3" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" dependencies = [ "serde", ] diff --git a/Cargo.toml b/Cargo.toml index 70d8e3ea48..2f39e41e64 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -252,7 +252,7 @@ mime_guess = "2.0.4" mockall = "0.12" newtype_derive = "0.1.6" mg-admin-client = { path = "clients/mg-admin-client" } -multimap = "0.8.1" +multimap = "0.10.0" nexus-blueprint-execution = { path = "nexus/blueprint-execution" } nexus-client = { path = "clients/nexus-client" } nexus-db-model = { path = "nexus/db-model" } From 41e1c79a773834d86c5d95192deafb984e4fadfc Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sat, 17 Feb 2024 05:56:29 +0000 Subject: [PATCH 017/157] chore(deps): update rust crate reedline to 0.29.0 (#5094) --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3b19e5cd95..6760688424 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4637,9 +4637,9 @@ checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" [[package]] name = "nu-ansi-term" -version = "0.49.0" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c073d3c1930d0751774acf49e66653acecb416c3a54c6ec095a9b11caddb5a68" +checksum = "dd2800e1520bdc966782168a627aa5d1ad92e33b984bf7c7615d31280c83ff14" dependencies = [ "windows-sys 0.48.0", ] @@ -6984,9 +6984,9 @@ dependencies = [ [[package]] name = "reedline" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f4e89a0f80909b3ca4bca9759ed37e4bfddb6f5d2ffb1b4ceb2b1638a3e1eb" +checksum = "9e01ebfbdb1a88963121d3c928c97be7f10fec7795bec8b918c8cda1db7c29e6" dependencies = [ "chrono", "crossterm", diff --git a/Cargo.toml b/Cargo.toml index 2f39e41e64..b0d82f8fd6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -323,7 +323,7 @@ rand = "0.8.5" ratatui = "0.26.1" rayon = "1.8" rcgen = "0.12.1" -reedline = "0.28.0" +reedline = "0.29.0" ref-cast = "1.0" regex = "1.10.3" regress = "0.8.0" From d177ccb6e8ab571f45de87635a5699d78d2c6b04 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sun, 18 Feb 2024 01:12:59 -0800 Subject: [PATCH 018/157] chore(deps): update rust crate ring to 0.17.8 (#5101) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 29 +++++++++++++++-------------- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 4 ++-- 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6760688424..8ed14304d0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5061,7 +5061,7 @@ dependencies = [ "ref-cast", "regex", "reqwest", - "ring 0.17.7", + "ring 0.17.8", "rustls 0.22.2", "rustls-pemfile 2.0.0", "samael", @@ -5164,7 +5164,7 @@ dependencies = [ "petgraph", "rayon", "reqwest", - "ring 0.17.7", + "ring 0.17.8", "semver 1.0.21", "serde", "sled-hardware", @@ -5317,7 +5317,7 @@ dependencies = [ "rcgen", "regex", "reqwest", - "ring 0.17.7", + "ring 0.17.8", "rustls 0.22.2", "slog", "subprocess", @@ -5417,7 +5417,7 @@ dependencies = [ "regex-automata 0.4.4", "regex-syntax 0.8.2", "reqwest", - "ring 0.17.7", + "ring 0.17.8", "rustix", "schemars", "semver 1.0.21", @@ -6930,7 +6930,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48406db8ac1f3cbc7dcdb56ec355343817958a356ff430259bb07baf7607e1e1" dependencies = [ "pem", - "ring 0.17.7", + "ring 0.17.8", "time", "yasna", ] @@ -7176,16 +7176,17 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", + "cfg-if", "getrandom 0.2.10", "libc", "spin 0.9.8", "untrusted 0.9.0", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -7432,7 +7433,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" dependencies = [ "log", - "ring 0.17.7", + "ring 0.17.8", "rustls-webpki 0.101.7", "sct", ] @@ -7444,7 +7445,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" dependencies = [ "log", - "ring 0.17.7", + "ring 0.17.8", "rustls-pki-types", "rustls-webpki 0.102.1", "subtle", @@ -7495,7 +7496,7 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.7", + "ring 0.17.8", "untrusted 0.9.0", ] @@ -7505,7 +7506,7 @@ version = "0.102.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef4ca26037c909dedb327b48c3327d0ba91d3dd3c4e05dad328f210ffb68e95b" dependencies = [ - "ring 0.17.7", + "ring 0.17.8", "rustls-pki-types", "untrusted 0.9.0", ] @@ -9415,7 +9416,7 @@ dependencies = [ "pem", "percent-encoding", "reqwest", - "ring 0.17.7", + "ring 0.17.8", "serde", "serde_json", "serde_plain", @@ -9647,7 +9648,7 @@ dependencies = [ "omicron-workspace-hack", "parse-size", "rand 0.8.5", - "ring 0.17.7", + "ring 0.17.8", "serde", "serde_json", "serde_path_to_error", diff --git a/Cargo.toml b/Cargo.toml index b0d82f8fd6..2ba68461d2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -328,7 +328,7 @@ ref-cast = "1.0" regex = "1.10.3" regress = "0.8.0" reqwest = { version = "0.11", default-features = false } -ring = "0.17.7" +ring = "0.17.8" rpassword = "7.3.1" rstest = "0.18.2" rustfmt-wrapper = "0.2" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index c204b56b4d..bbb5191b4c 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -86,7 +86,7 @@ regex = { version = "1.10.3" } regex-automata = { version = "0.4.4", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } regex-syntax = { version = "0.8.2" } reqwest = { version = "0.11.22", features = ["blocking", "json", "rustls-tls", "stream"] } -ring = { version = "0.17.7", features = ["std"] } +ring = { version = "0.17.8", features = ["std"] } schemars = { version = "0.8.16", features = ["bytes", "chrono", "uuid1"] } semver = { version = "1.0.21", features = ["serde"] } serde = { version = "1.0.196", features = ["alloc", "derive", "rc"] } @@ -193,7 +193,7 @@ regex = { version = "1.10.3" } regex-automata = { version = "0.4.4", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } regex-syntax = { version = "0.8.2" } reqwest = { version = "0.11.22", features = ["blocking", "json", "rustls-tls", "stream"] } -ring = { version = "0.17.7", features = ["std"] } +ring = { version = "0.17.8", features = ["std"] } schemars = { version = "0.8.16", features = ["bytes", "chrono", "uuid1"] } semver = { version = "1.0.21", features = ["serde"] } serde = { version = "1.0.196", features = ["alloc", "derive", "rc"] } From 15e10b251ffdba73a69f972a4b278cbaa461b61d Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sun, 18 Feb 2024 01:13:10 -0800 Subject: [PATCH 019/157] chore(deps): update rust crate rustls-pemfile to 2.1.0 (#5095) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 14 +++++++------- Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8ed14304d0..2c6328dc9c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1984,7 +1984,7 @@ dependencies = [ "percent-encoding", "proc-macro2", "rustls 0.22.2", - "rustls-pemfile 2.0.0", + "rustls-pemfile 2.1.0", "schemars", "serde", "serde_json", @@ -5063,7 +5063,7 @@ dependencies = [ "reqwest", "ring 0.17.8", "rustls 0.22.2", - "rustls-pemfile 2.0.0", + "rustls-pemfile 2.1.0", "samael", "schemars", "semver 1.0.21", @@ -7459,7 +7459,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" dependencies = [ "openssl-probe", - "rustls-pemfile 2.0.0", + "rustls-pemfile 2.1.0", "rustls-pki-types", "schannel", "security-framework", @@ -7476,9 +7476,9 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" +checksum = "3c333bb734fcdedcea57de1602543590f545f127dc8b533324318fd492c5c70b" dependencies = [ "base64", "rustls-pki-types", @@ -7486,9 +7486,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e9d979b3ce68192e42760c7810125eb6cf2ea10efae545a156063e61f314e2a" +checksum = "048a63e5b3ac996d78d402940b5fa47973d2d080c6c6fffa1d0f19c4445310b7" [[package]] name = "rustls-webpki" diff --git a/Cargo.toml b/Cargo.toml index 2ba68461d2..61b9bb67fd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -333,7 +333,7 @@ rpassword = "7.3.1" rstest = "0.18.2" rustfmt-wrapper = "0.2" rustls = "0.22.2" -rustls-pemfile = "2.0.0" +rustls-pemfile = "2.1.0" rustyline = "13.0.0" samael = { version = "0.0.14", features = ["xmlsec"] } schemars = "0.8.16" From 9dd2325cde45e70084ce6b10a17394ed628bce8a Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sun, 18 Feb 2024 10:11:45 +0000 Subject: [PATCH 020/157] fix(deps): update russh monorepo to 0.42.0 (#5098) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 23 +++++++++++++++++++---- end-to-end-tests/Cargo.toml | 4 ++-- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2c6328dc9c..884f398a73 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5885,6 +5885,20 @@ dependencies = [ "sha2", ] +[[package]] +name = "p521" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc9e2161f1f215afdfce23677034ae137bbd45016a880c2eb3ba8eb95f085b2" +dependencies = [ + "base16ct", + "ecdsa", + "elliptic-curve", + "primeorder", + "rand_core 0.6.4", + "sha2", +] + [[package]] name = "packed_struct" version = "0.10.1" @@ -7277,9 +7291,9 @@ dependencies = [ [[package]] name = "russh" -version = "0.40.2" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93dab9e1c313d0d04a42e39c0995943fc38c037e2e3fa9c33685777a1aecdfb2" +checksum = "394cc2733c5b5ca9f342d9532b78599849633ccabdbf40f1af094cacf4d86b62" dependencies = [ "aes", "aes-gcm", @@ -7322,9 +7336,9 @@ dependencies = [ [[package]] name = "russh-keys" -version = "0.40.1" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d0de3cb3cbfa773b7f170b6830565fac207a0d630cc666a29f80097cc374dd8" +checksum = "3e98aa03d476f8d2bf6e4525291c1eb8e22f4ae9653d7a5458fd53cb0191c741" dependencies = [ "aes", "async-trait", @@ -7345,6 +7359,7 @@ dependencies = [ "num-bigint", "num-integer", "p256", + "p521", "pbkdf2 0.11.0", "rand 0.7.3", "rand_core 0.6.4", diff --git a/end-to-end-tests/Cargo.toml b/end-to-end-tests/Cargo.toml index 5ff958dc9c..fee32a344e 100644 --- a/end-to-end-tests/Cargo.toml +++ b/end-to-end-tests/Cargo.toml @@ -16,8 +16,8 @@ omicron-test-utils.workspace = true oxide-client.workspace = true rand.workspace = true reqwest.workspace = true -russh = "0.40.2" -russh-keys = "0.40.1" +russh = "0.42.0" +russh-keys = "0.42.0" serde.workspace = true serde_json.workspace = true tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } From 563234e35ca18d0df64b51431bff10906ba69bf8 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sun, 18 Feb 2024 16:32:35 +0000 Subject: [PATCH 021/157] chore(deps): update rust crate textwrap to 0.16.1 (#5102) --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 884f398a73..20726b84fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5141,7 +5141,7 @@ dependencies = [ "strum 0.26.1", "subprocess", "tabled", - "textwrap 0.16.0", + "textwrap 0.16.1", "tokio", "unicode-width", "uuid", @@ -9014,9 +9014,9 @@ dependencies = [ [[package]] name = "textwrap" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" +checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" dependencies = [ "smawk", "unicode-linebreak", @@ -10338,7 +10338,7 @@ dependencies = [ "slog-term", "supports-color", "tempfile", - "textwrap 0.16.0", + "textwrap 0.16.1", "tokio", "tokio-util", "toml 0.8.10", diff --git a/Cargo.toml b/Cargo.toml index 61b9bb67fd..c7ef94eaa2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -387,7 +387,7 @@ tempdir = "0.3" tempfile = "3.10" term = "0.7" termios = "0.3" -textwrap = "0.16.0" +textwrap = "0.16.1" test-strategy = "0.3.1" thiserror = "1.0" tofino = { git = "http://github.com/oxidecomputer/tofino", branch = "main" } From 93de3fed3e82c39bed0cba9a6b128218e994af8f Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 20 Feb 2024 10:04:32 -0800 Subject: [PATCH 022/157] chore(deps): update rust crate serde_json to 1.0.114 (#5106) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 20726b84fe..dce0d06c3d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7867,9 +7867,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.113" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" +checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" dependencies = [ "itoa", "ryu", diff --git a/Cargo.toml b/Cargo.toml index c7ef94eaa2..ed2e94f8eb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -342,7 +342,7 @@ semver = { version = "1.0.21", features = ["std", "serde"] } serde = { version = "1.0", default-features = false, features = [ "derive" ] } serde_derive = "1.0" serde_human_bytes = { git = "http://github.com/oxidecomputer/serde_human_bytes", branch = "main" } -serde_json = "1.0.113" +serde_json = "1.0.114" serde_path_to_error = "0.1.15" serde_tokenstream = "0.2" serde_urlencoded = "0.7.1" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index bbb5191b4c..14e238ba61 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -90,7 +90,7 @@ ring = { version = "0.17.8", features = ["std"] } schemars = { version = "0.8.16", features = ["bytes", "chrono", "uuid1"] } semver = { version = "1.0.21", features = ["serde"] } serde = { version = "1.0.196", features = ["alloc", "derive", "rc"] } -serde_json = { version = "1.0.113", features = ["raw_value", "unbounded_depth"] } +serde_json = { version = "1.0.114", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } similar = { version = "2.3.0", features = ["inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } @@ -197,7 +197,7 @@ ring = { version = "0.17.8", features = ["std"] } schemars = { version = "0.8.16", features = ["bytes", "chrono", "uuid1"] } semver = { version = "1.0.21", features = ["serde"] } serde = { version = "1.0.196", features = ["alloc", "derive", "rc"] } -serde_json = { version = "1.0.113", features = ["raw_value", "unbounded_depth"] } +serde_json = { version = "1.0.114", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } similar = { version = "2.3.0", features = ["inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } From 172cf38036ce7ffd4a603c8f57b6172d3d3706d6 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 20 Feb 2024 10:04:41 -0800 Subject: [PATCH 023/157] chore(deps): update rust crate semver to 1.0.22 (#5103) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 24 ++++++++++++------------ Cargo.toml | 2 +- workspace-hack/Cargo.toml | 4 ++-- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dce0d06c3d..0f901bdade 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -803,7 +803,7 @@ checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" dependencies = [ "camino", "cargo-platform", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde_json", "thiserror", @@ -2786,7 +2786,7 @@ dependencies = [ "once_cell", "pathdiff", "petgraph", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde_json", "smallvec 1.13.1", @@ -4334,7 +4334,7 @@ dependencies = [ "rand 0.8.5", "ref-cast", "schemars", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde_json", "sled-agent-client", @@ -4885,7 +4885,7 @@ dependencies = [ "regress", "reqwest", "schemars", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde_human_bytes", "serde_json", @@ -5066,7 +5066,7 @@ dependencies = [ "rustls-pemfile 2.1.0", "samael", "schemars", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde_json", "serde_urlencoded", @@ -5165,7 +5165,7 @@ dependencies = [ "rayon", "reqwest", "ring 0.17.8", - "semver 1.0.21", + "semver 1.0.22", "serde", "sled-hardware", "slog", @@ -5266,7 +5266,7 @@ dependencies = [ "rcgen", "reqwest", "schemars", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde_human_bytes", "serde_json", @@ -5420,7 +5420,7 @@ dependencies = [ "ring 0.17.8", "rustix", "schemars", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde_json", "sha2", @@ -5475,7 +5475,7 @@ dependencies = [ "hex", "reqwest", "ring 0.16.20", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde_derive", "serde_json", @@ -7412,7 +7412,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.21", + "semver 1.0.22", ] [[package]] @@ -7780,9 +7780,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" dependencies = [ "serde", ] diff --git a/Cargo.toml b/Cargo.toml index ed2e94f8eb..d3f099589f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -338,7 +338,7 @@ rustyline = "13.0.0" samael = { version = "0.0.14", features = ["xmlsec"] } schemars = "0.8.16" secrecy = "0.8.0" -semver = { version = "1.0.21", features = ["std", "serde"] } +semver = { version = "1.0.22", features = ["std", "serde"] } serde = { version = "1.0", default-features = false, features = [ "derive" ] } serde_derive = "1.0" serde_human_bytes = { git = "http://github.com/oxidecomputer/serde_human_bytes", branch = "main" } diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 14e238ba61..ebe683e51a 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -88,7 +88,7 @@ regex-syntax = { version = "0.8.2" } reqwest = { version = "0.11.22", features = ["blocking", "json", "rustls-tls", "stream"] } ring = { version = "0.17.8", features = ["std"] } schemars = { version = "0.8.16", features = ["bytes", "chrono", "uuid1"] } -semver = { version = "1.0.21", features = ["serde"] } +semver = { version = "1.0.22", features = ["serde"] } serde = { version = "1.0.196", features = ["alloc", "derive", "rc"] } serde_json = { version = "1.0.114", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } @@ -195,7 +195,7 @@ regex-syntax = { version = "0.8.2" } reqwest = { version = "0.11.22", features = ["blocking", "json", "rustls-tls", "stream"] } ring = { version = "0.17.8", features = ["std"] } schemars = { version = "0.8.16", features = ["bytes", "chrono", "uuid1"] } -semver = { version = "1.0.21", features = ["serde"] } +semver = { version = "1.0.22", features = ["serde"] } serde = { version = "1.0.196", features = ["alloc", "derive", "rc"] } serde_json = { version = "1.0.114", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } From 915276d47096a9ce076453eb75aaa9de29477e80 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 20 Feb 2024 19:06:11 +0000 Subject: [PATCH 024/157] chore(deps): update rust crate assert_cmd to 2.0.14 (#5105) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0f901bdade..bcbcb3e9c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -236,9 +236,9 @@ dependencies = [ [[package]] name = "assert_cmd" -version = "2.0.13" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00ad3f3a942eee60335ab4342358c161ee296829e0d16ff42fc1d6cb07815467" +checksum = "ed72493ac66d5804837f480ab3766c72bdfab91a65e565fc54fa9e42db0073a8" dependencies = [ "anstyle", "bstr 1.6.0", diff --git a/Cargo.toml b/Cargo.toml index d3f099589f..db37547ea0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -155,7 +155,7 @@ anyhow = "1.0" api_identity = { path = "api_identity" } approx = "0.5.1" assert_matches = "1.5.0" -assert_cmd = "2.0.13" +assert_cmd = "2.0.14" async-bb8-diesel = { git = "https://github.com/oxidecomputer/async-bb8-diesel", rev = "ed7ab5ef0513ba303d33efd41d3e9e381169d59b" } async-trait = "0.1.77" atomicwrites = "0.4.3" From 1e76acade0bcac1415e8709ec52d23515164489c Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Tue, 20 Feb 2024 18:44:12 -0800 Subject: [PATCH 025/157] update internal DNS during blueprint execution (#4989) --- Cargo.lock | 8 + clients/dns-service-client/Cargo.toml | 1 + clients/dns-service-client/src/diff.rs | 230 ++++++ clients/dns-service-client/src/lib.rs | 31 +- common/src/api/external/mod.rs | 6 + dev-tools/omdb/src/bin/omdb/nexus.rs | 2 +- internal-dns/src/config.rs | 102 ++- internal-dns/src/names.rs | 6 - nexus/blueprint-execution/Cargo.toml | 11 +- nexus/blueprint-execution/src/dns.rs | 745 ++++++++++++++++++ nexus/blueprint-execution/src/lib.rs | 76 +- .../blueprint-execution/src/omicron_zones.rs | 192 ++--- nexus/db-model/src/deployment.rs | 3 + nexus/db-model/src/schema.rs | 4 +- nexus/db-queries/src/context.rs | 59 ++ .../db-queries/src/db/datastore/deployment.rs | 67 +- nexus/db-queries/src/db/datastore/dns.rs | 277 ++++++- nexus/db-queries/src/db/datastore/mod.rs | 9 + nexus/db-queries/src/db/datastore/silo.rs | 5 +- nexus/db-queries/src/db/datastore/sled.rs | 76 ++ nexus/deployment/src/blueprint_builder.rs | 109 ++- nexus/deployment/src/planner.rs | 47 +- .../src/app/background/blueprint_execution.rs | 71 +- nexus/src/app/background/blueprint_load.rs | 2 + nexus/src/app/background/init.rs | 3 +- nexus/src/app/deployment.rs | 35 +- nexus/test-utils/src/lib.rs | 23 - nexus/types/src/deployment.rs | 18 +- nexus/types/src/inventory.rs | 8 + openapi/nexus-internal.json | 18 + schema/crdb/36.0.0/up1.sql | 8 + schema/crdb/36.0.0/up2.sql | 2 + schema/crdb/dbinit.sql | 7 +- sled-agent/src/rack_setup/plan/service.rs | 95 ++- 34 files changed, 1975 insertions(+), 381 deletions(-) create mode 100644 clients/dns-service-client/src/diff.rs create mode 100644 nexus/blueprint-execution/src/dns.rs create mode 100644 schema/crdb/36.0.0/up1.sql create mode 100644 schema/crdb/36.0.0/up2.sql diff --git a/Cargo.lock b/Cargo.lock index bcbcb3e9c8..e15afdfbab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1887,6 +1887,7 @@ dependencies = [ name = "dns-service-client" version = "0.1.0" dependencies = [ + "anyhow", "chrono", "http 0.2.11", "omicron-workspace-hack", @@ -4267,21 +4268,28 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", + "dns-service-client", "futures", "httptest", + "internal-dns", + "ipnet", "nexus-db-model", "nexus-db-queries", + "nexus-deployment", + "nexus-inventory", "nexus-test-utils", "nexus-test-utils-macros", "nexus-types", "omicron-common", "omicron-nexus", "omicron-rpaths", + "omicron-test-utils", "omicron-workspace-hack", "pq-sys", "reqwest", "sled-agent-client", "slog", + "slog-error-chain", "tokio", "uuid", ] diff --git a/clients/dns-service-client/Cargo.toml b/clients/dns-service-client/Cargo.toml index 6132222b8a..27ffb66d88 100644 --- a/clients/dns-service-client/Cargo.toml +++ b/clients/dns-service-client/Cargo.toml @@ -5,6 +5,7 @@ edition = "2021" license = "MPL-2.0" [dependencies] +anyhow.workspace = true chrono.workspace = true http.workspace = true progenitor.workspace = true diff --git a/clients/dns-service-client/src/diff.rs b/clients/dns-service-client/src/diff.rs new file mode 100644 index 0000000000..39d51cc974 --- /dev/null +++ b/clients/dns-service-client/src/diff.rs @@ -0,0 +1,230 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::types::DnsConfigParams; +use crate::types::DnsRecord; +use crate::DnsRecords; +use anyhow::ensure; +use anyhow::Context; + +/// Compare the DNS records contained in two sets of DNS configuration +#[derive(Debug)] +pub struct DnsDiff<'a> { + left: &'a DnsRecords, + right: &'a DnsRecords, +} + +impl<'a> DnsDiff<'a> { + /// Compare the DNS records contained in two sets of DNS configuration + /// + /// Both configurations are expected to contain exactly one zone and they + /// should have the same name. + pub fn new( + left: &'a DnsConfigParams, + right: &'a DnsConfigParams, + ) -> Result, anyhow::Error> { + let left_zone = left.sole_zone().context("left side of diff")?; + let right_zone = right.sole_zone().context("right side of diff")?; + + ensure!( + left_zone.zone_name == right_zone.zone_name, + "cannot compare DNS configuration from zones with different names: \ + {:?} vs. {:?}", left_zone.zone_name, right_zone.zone_name, + ); + + Ok(DnsDiff { left: &left_zone.records, right: &right_zone.records }) + } + + /// Iterate over the names that are present in the `right` config but + /// absent in the `left` one (i.e., added between `left` and `right`) + pub fn names_added(&self) -> impl Iterator { + self.right + .iter() + .filter(|(k, _)| !self.left.contains_key(*k)) + .map(|(k, v)| (k.as_ref(), v.as_ref())) + } + + /// Iterate over the names that are present in the `left` config but + /// absent in the `right` one (i.e., removed between `left` and `right`) + pub fn names_removed(&self) -> impl Iterator { + self.left + .iter() + .filter(|(k, _)| !self.right.contains_key(*k)) + .map(|(k, v)| (k.as_ref(), v.as_ref())) + } + + /// Iterate over the names whose records changed between `left` and `right`. + pub fn names_changed( + &self, + ) -> impl Iterator { + self.left.iter().filter_map(|(k, v1)| match self.right.get(k) { + Some(v2) if v1 != v2 => { + Some((k.as_ref(), v1.as_ref(), v2.as_ref())) + } + _ => None, + }) + } + + /// Returns true iff there are no differences in the DNS names and records + /// described by the given configurations + pub fn is_empty(&self) -> bool { + self.names_added().next().is_none() + && self.names_removed().next().is_none() + && self.names_changed().next().is_none() + } +} + +#[cfg(test)] +mod test { + use super::DnsDiff; + use crate::types::DnsConfigParams; + use crate::types::DnsConfigZone; + use crate::types::DnsRecord; + use chrono::Utc; + use std::collections::HashMap; + use std::net::Ipv4Addr; + + const ZONE_NAME: &str = "dummy"; + + fn example() -> DnsConfigParams { + DnsConfigParams { + generation: 4, + time_created: Utc::now(), + zones: vec![DnsConfigZone { + zone_name: ZONE_NAME.to_string(), + records: HashMap::from([ + ( + "ex1".to_string(), + vec![DnsRecord::A(Ipv4Addr::LOCALHOST)], + ), + ( + "ex2".to_string(), + vec![DnsRecord::A("192.168.1.3".parse().unwrap())], + ), + ]), + }], + } + } + + #[test] + fn diff_invalid() { + let example_empty = DnsConfigParams { + generation: 3, + time_created: Utc::now(), + zones: vec![], + }; + + // Configs must have at least one zone. + let error = DnsDiff::new(&example_empty, &example_empty) + .expect_err("unexpectedly succeeded comparing two empty configs"); + assert!( + format!("{:#}", error).contains("expected exactly one DNS zone") + ); + + let example = example(); + let error = DnsDiff::new(&example_empty, &example) + .expect_err("unexpectedly succeeded comparing an empty config"); + assert!( + format!("{:#}", error).contains("expected exactly one DNS zone") + ); + + // Configs must not have more than one zone. + let example_multiple = DnsConfigParams { + generation: 3, + time_created: Utc::now(), + zones: vec![ + DnsConfigZone { + zone_name: ZONE_NAME.to_string(), + records: HashMap::new(), + }, + DnsConfigZone { + zone_name: "two".to_string(), + records: HashMap::new(), + }, + ], + }; + let error = DnsDiff::new(&example_multiple, &example).expect_err( + "unexpectedly succeeded comparing config with multiple zones", + ); + assert!( + format!("{:#}", error).contains("expected exactly one DNS zone") + ); + + // Cannot compare different zone names + let example_different_zone = DnsConfigParams { + generation: 3, + time_created: Utc::now(), + zones: vec![DnsConfigZone { + zone_name: format!("{}-other", ZONE_NAME), + records: HashMap::new(), + }], + }; + let error = DnsDiff::new(&example_different_zone, &example).expect_err( + "unexpectedly succeeded comparing configs with \ + different zone names", + ); + assert_eq!( + format!("{:#}", error), + "cannot compare DNS configuration from zones with different \ + names: \"dummy-other\" vs. \"dummy\"" + ); + } + + #[test] + fn diff_equivalent() { + let example = example(); + let diff = DnsDiff::new(&example, &example).unwrap(); + assert!(diff.is_empty()); + assert_eq!(diff.names_removed().count(), 0); + assert_eq!(diff.names_added().count(), 0); + assert_eq!(diff.names_changed().count(), 0); + } + + #[test] + fn diff_different() { + let example = example(); + let example2 = DnsConfigParams { + generation: 4, + time_created: Utc::now(), + zones: vec![DnsConfigZone { + zone_name: ZONE_NAME.to_string(), + records: HashMap::from([ + ( + "ex2".to_string(), + vec![DnsRecord::A("192.168.1.4".parse().unwrap())], + ), + ( + "ex3".to_string(), + vec![DnsRecord::A(std::net::Ipv4Addr::LOCALHOST)], + ), + ]), + }], + }; + + let diff = DnsDiff::new(&example, &example2).unwrap(); + assert!(!diff.is_empty()); + + let removed = diff.names_removed().collect::>(); + assert_eq!(removed.len(), 1); + assert_eq!(removed[0].0, "ex1"); + assert_eq!(removed[0].1, vec![DnsRecord::A(Ipv4Addr::LOCALHOST)]); + + let added = diff.names_added().collect::>(); + assert_eq!(added.len(), 1); + assert_eq!(added[0].0, "ex3"); + assert_eq!(added[0].1, vec![DnsRecord::A(Ipv4Addr::LOCALHOST)]); + + let changed = diff.names_changed().collect::>(); + assert_eq!(changed.len(), 1); + assert_eq!(changed[0].0, "ex2"); + assert_eq!( + changed[0].1, + vec![DnsRecord::A("192.168.1.3".parse().unwrap())] + ); + assert_eq!( + changed[0].2, + vec![DnsRecord::A("192.168.1.4".parse().unwrap())] + ); + } +} diff --git a/clients/dns-service-client/src/lib.rs b/clients/dns-service-client/src/lib.rs index 52c2b8bcd2..cd17a1559c 100644 --- a/clients/dns-service-client/src/lib.rs +++ b/clients/dns-service-client/src/lib.rs @@ -2,10 +2,17 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +mod diff; + +use crate::Error as DnsConfigError; +use anyhow::ensure; +pub use diff::DnsDiff; +use std::collections::HashMap; + progenitor::generate_api!( spec = "../../openapi/dns-server.json", inner_type = slog::Logger, - derives = [schemars::JsonSchema, Eq, PartialEq], + derives = [schemars::JsonSchema, Clone, Eq, PartialEq], pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { slog::debug!(log, "client request"; "method" => %request.method(), @@ -22,8 +29,6 @@ pub const ERROR_CODE_UPDATE_IN_PROGRESS: &'static str = "UpdateInProgress"; pub const ERROR_CODE_BAD_UPDATE_GENERATION: &'static str = "BadUpdateGeneration"; -use crate::Error as DnsConfigError; - /// Returns whether an error from this client should be retried pub fn is_retryable(error: &DnsConfigError) -> bool { let response_value = match error { @@ -84,3 +89,23 @@ pub fn is_retryable(error: &DnsConfigError) -> bool { false } + +type DnsRecords = HashMap>; + +impl types::DnsConfigParams { + /// Given a high-level DNS configuration, return a reference to its sole + /// DNS zone. + /// + /// # Errors + /// + /// Returns an error if there are 0 or more than one zones in this + /// configuration. + pub fn sole_zone(&self) -> Result<&types::DnsConfigZone, anyhow::Error> { + ensure!( + self.zones.len() == 1, + "expected exactly one DNS zone, but found {}", + self.zones.len() + ); + Ok(&self.zones[0]) + } +} diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index cdc929d89b..17b4826f8c 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -679,6 +679,12 @@ impl From<&Generation> for i64 { } } +impl From for u64 { + fn from(g: Generation) -> Self { + g.0 + } +} + impl From for Generation { fn from(value: u32) -> Self { Generation(u64::from(value)) diff --git a/dev-tools/omdb/src/bin/omdb/nexus.rs b/dev-tools/omdb/src/bin/omdb/nexus.rs index 9904263067..aed7d86ba0 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus.rs @@ -870,7 +870,7 @@ async fn cmd_nexus_blueprints_diff( let b2 = client.blueprint_view(&args.blueprint2_id).await.with_context( || format!("fetching blueprint {}", args.blueprint2_id), )?; - println!("{}", b1.diff(&b2)); + println!("{}", b1.diff_sleds(&b2)); Ok(()) } diff --git a/internal-dns/src/config.rs b/internal-dns/src/config.rs index bf1d9b763b..5eee34bd51 100644 --- a/internal-dns/src/config.rs +++ b/internal-dns/src/config.rs @@ -63,9 +63,9 @@ use crate::names::{ServiceName, DNS_ZONE}; use anyhow::{anyhow, ensure}; use dns_service_client::types::{DnsConfigParams, DnsConfigZone, DnsRecord}; -use omicron_common::api::internal::shared::SwitchLocation; +use omicron_common::api::external::Generation; use std::collections::BTreeMap; -use std::net::{Ipv6Addr, SocketAddrV6}; +use std::net::Ipv6Addr; use uuid::Uuid; /// Zones that can be referenced within the internal DNS system. @@ -147,8 +147,6 @@ pub struct DnsConfigBuilder { /// network sleds: BTreeMap, - scrimlets: BTreeMap, - /// set of hosts of type "zone" that have been configured so far, mapping /// each zone's unique uuid to its sole IPv6 address on the control plane /// network @@ -163,6 +161,9 @@ pub struct DnsConfigBuilder { /// similar to service_instances_zones, but for services that run on sleds service_instances_sleds: BTreeMap>, + + /// generation number for this config + generation: Generation, } /// Describes a host of type "sled" in the control plane DNS zone @@ -192,16 +193,17 @@ impl DnsConfigBuilder { DnsConfigBuilder { sleds: BTreeMap::new(), zones: BTreeMap::new(), - scrimlets: BTreeMap::new(), service_instances_zones: BTreeMap::new(), service_instances_sleds: BTreeMap::new(), + generation: Generation::new(), } } /// Add a new host of type "sled" to the configuration /// - /// Returns a [`Sled`] that can be used with [`Self::service_backend_sled()`] to - /// specify that this sled is a backend for some higher-level service. + /// Returns a [`Sled`] that can be used with + /// [`Self::service_backend_sled()`] to specify that this sled is a backend + /// for some higher-level service. /// /// # Errors /// @@ -223,19 +225,11 @@ impl DnsConfigBuilder { } } - pub fn host_scrimlet( - &mut self, - switch_location: SwitchLocation, - addr: SocketAddrV6, - ) -> anyhow::Result<()> { - self.scrimlets.insert(switch_location, addr); - Ok(()) - } - /// Add a new dendrite host of type "zone" to the configuration /// - /// Returns a [`Zone`] that can be used with [`Self::service_backend_zone()`] to - /// specify that this zone is a backend for some higher-level service. + /// Returns a [`Zone`] that can be used with + /// [`Self::service_backend_zone()`] to specify that this zone is a backend + /// for some higher-level service. /// /// # Errors /// @@ -251,8 +245,9 @@ impl DnsConfigBuilder { /// Add a new host of type "zone" to the configuration /// - /// Returns a [`Zone`] that can be used with [`Self::service_backend_zone()`] to - /// specify that this zone is a backend for some higher-level service. + /// Returns a [`Zone`] that can be used with + /// [`Self::service_backend_zone()`] to specify that this zone is a backend + /// for some higher-level service. /// /// # Errors /// @@ -363,6 +358,52 @@ impl DnsConfigBuilder { } } + /// Higher-level shorthand for adding a zone with a single backend service + /// + /// # Errors + /// + /// This function fails only if the given zone has already been added to the + /// configuration. + pub fn host_zone_with_one_backend( + &mut self, + zone_id: Uuid, + addr: Ipv6Addr, + service: ServiceName, + port: u16, + ) -> anyhow::Result<()> { + let zone = self.host_zone(zone_id, addr)?; + self.service_backend_zone(service, &zone, port) + } + + /// Higher-level shorthand for adding a "switch" zone with its usual set of + /// backend services + /// + /// # Errors + /// + /// This function fails only if the given zone has already been added to the + /// configuration. + pub fn host_zone_switch( + &mut self, + sled_id: Uuid, + switch_zone_ip: Ipv6Addr, + dendrite_port: u16, + mgs_port: u16, + mgd_port: u16, + ) -> anyhow::Result<()> { + let zone = self.host_dendrite(sled_id, switch_zone_ip)?; + self.service_backend_zone(ServiceName::Dendrite, &zone, dendrite_port)?; + self.service_backend_zone( + ServiceName::ManagementGatewayService, + &zone, + mgs_port, + )?; + self.service_backend_zone(ServiceName::Mgd, &zone, mgd_port) + } + + pub fn generation(&mut self, generation: Generation) { + self.generation = generation; + } + /// Construct a complete [`DnsConfigParams`] (suitable for propagating to /// our DNS servers) for the control plane DNS zone described up to this /// point @@ -378,23 +419,6 @@ impl DnsConfigBuilder { (zone.dns_name(), vec![DnsRecord::Aaaa(zone_ip)]) }); - let scrimlet_srv_records = - self.scrimlets.clone().into_iter().map(|(location, addr)| { - let srv = DnsRecord::Srv(dns_service_client::types::Srv { - prio: 0, - weight: 0, - port: addr.port(), - target: format!("{location}.scrimlet.{}", DNS_ZONE), - }); - (ServiceName::Scrimlet(location).dns_name(), vec![srv]) - }); - - let scrimlet_aaaa_records = - self.scrimlets.into_iter().map(|(location, addr)| { - let aaaa = DnsRecord::Aaaa(*addr.ip()); - (format!("{location}.scrimlet"), vec![aaaa]) - }); - // Assemble the set of SRV records, which implicitly point back at // zones' AAAA records. let srv_records_zones = self.service_instances_zones.into_iter().map( @@ -439,12 +463,10 @@ impl DnsConfigBuilder { .chain(zone_records) .chain(srv_records_sleds) .chain(srv_records_zones) - .chain(scrimlet_aaaa_records) - .chain(scrimlet_srv_records) .collect(); DnsConfigParams { - generation: 1, + generation: u64::from(self.generation), time_created: chrono::Utc::now(), zones: vec![DnsConfigZone { zone_name: DNS_ZONE.to_owned(), diff --git a/internal-dns/src/names.rs b/internal-dns/src/names.rs index bffe6e829a..8cafe4ac97 100644 --- a/internal-dns/src/names.rs +++ b/internal-dns/src/names.rs @@ -4,7 +4,6 @@ //! Well-known DNS names and related types for internal DNS (see RFD 248) -use omicron_common::api::internal::shared::SwitchLocation; use uuid::Uuid; /// Name for the control plane DNS zone @@ -35,7 +34,6 @@ pub enum ServiceName { InternalNtp, Maghemite, //TODO change to Dpd - maghemite has several services. Mgd, - Scrimlet(SwitchLocation), } impl ServiceName { @@ -59,7 +57,6 @@ impl ServiceName { ServiceName::InternalNtp => "internal-ntp", ServiceName::Maghemite => "maghemite", ServiceName::Mgd => "mgd", - ServiceName::Scrimlet(_) => "scrimlet", } } @@ -91,9 +88,6 @@ impl ServiceName { ServiceName::Crucible(id) => { format!("_{}._tcp.{}", self.service_kind(), id) } - ServiceName::Scrimlet(location) => { - format!("_{location}._scrimlet._tcp") - } } } diff --git a/nexus/blueprint-execution/Cargo.toml b/nexus/blueprint-execution/Cargo.toml index 11d8003599..3284bda27e 100644 --- a/nexus/blueprint-execution/Cargo.toml +++ b/nexus/blueprint-execution/Cargo.toml @@ -8,12 +8,17 @@ omicron-rpaths.workspace = true [dependencies] anyhow.workspace = true +dns-service-client.workspace = true futures.workspace = true +internal-dns.workspace = true +nexus-db-model.workspace = true nexus-db-queries.workspace = true nexus-types.workspace = true +omicron-common.workspace = true reqwest.workspace = true sled-agent-client.workspace = true slog.workspace = true +slog-error-chain.workspace = true uuid.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. This is needed @@ -26,9 +31,11 @@ omicron-workspace-hack.workspace = true [dev-dependencies] chrono.workspace = true httptest.workspace = true -nexus-db-model.workspace = true +ipnet.workspace = true +nexus-deployment.workspace = true +nexus-inventory.workspace = true nexus-test-utils.workspace = true nexus-test-utils-macros.workspace = true -omicron-common.workspace = true omicron-nexus.workspace = true +omicron-test-utils.workspace = true tokio.workspace = true diff --git a/nexus/blueprint-execution/src/dns.rs b/nexus/blueprint-execution/src/dns.rs new file mode 100644 index 0000000000..6dd9266f32 --- /dev/null +++ b/nexus/blueprint-execution/src/dns.rs @@ -0,0 +1,745 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Propagates internal DNS changes in a given blueprint + +use crate::Sled; +use dns_service_client::DnsDiff; +use internal_dns::DnsConfigBuilder; +use internal_dns::ServiceName; +use nexus_db_model::DnsGroup; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::datastore::DnsVersionUpdateBuilder; +use nexus_db_queries::db::DataStore; +use nexus_types::deployment::Blueprint; +use nexus_types::deployment::OmicronZoneType; +use nexus_types::internal_api::params::DnsConfigParams; +use omicron_common::address::get_switch_zone_address; +use omicron_common::address::CLICKHOUSE_KEEPER_PORT; +use omicron_common::address::CLICKHOUSE_PORT; +use omicron_common::address::COCKROACH_PORT; +use omicron_common::address::CRUCIBLE_PANTRY_PORT; +use omicron_common::address::CRUCIBLE_PORT; +use omicron_common::address::DENDRITE_PORT; +use omicron_common::address::DNS_HTTP_PORT; +use omicron_common::address::MGD_PORT; +use omicron_common::address::MGS_PORT; +use omicron_common::address::NEXUS_INTERNAL_PORT; +use omicron_common::address::NTP_PORT; +use omicron_common::address::OXIMETER_PORT; +use omicron_common::api::external::Error; +use omicron_common::api::external::Generation; +use omicron_common::api::external::InternalContext; +use slog::{debug, info, o}; +use std::collections::BTreeMap; +use uuid::Uuid; + +pub(crate) async fn deploy_dns( + opctx: &OpContext, + datastore: &DataStore, + creator: String, + blueprint: &Blueprint, + sleds_by_id: &BTreeMap, +) -> Result<(), Error> { + // First, fetch the current DNS config. + let dns_config_current = datastore + .dns_config_read(opctx, DnsGroup::Internal) + .await + .internal_context("reading current DNS")?; + + // We could check here that the DNS version we found isn't newer than when + // the blueprint was generated. But we have to check later when we try to + // update the database anyway. And we're not wasting much effort allowing + // this proceed for now. This way, we have only one code path for this and + // we know it's being hit when we exercise this condition. + + // Next, construct the DNS config represented by the blueprint. + let dns_config_blueprint = blueprint_dns_config(blueprint, sleds_by_id); + + // Looking at the current contents of DNS, prepare an update that will make + // it match what it should be. + let log = opctx.log.new(o!("blueprint_execution" => "DNS")); + let comment = format!("blueprint {} ({})", blueprint.id, blueprint.comment); + let maybe_update = dns_compute_update( + &log, + comment, + creator, + &dns_config_current, + &dns_config_blueprint, + )?; + let Some(update) = maybe_update else { + // Nothing to do. + return Ok(()); + }; + + // Our goal here is to update the DNS configuration stored in the database + // to match the blueprint. But it's always possible that we're executing a + // blueprint that's no longer the current target. In that case, we want to + // fail without making any changes. We definitely don't want to + // accidentally clobber changes that have been made by another instance + // executing a newer target blueprint. + // + // To avoid this problem, before generating a blueprint, Nexus fetches the + // current internal DNS generation and stores that into the blueprint + // itself. Here, when we execute the blueprint, we make our database update + // conditional on that still being the current internal DNS generation. + // If some other instance has already come along and updated the database, + // whether for this same blueprint or a newer one, our attempt to update the + // database will fail. + // + // Let's look at a tricky example. Suppose: + // + // 1. The system starts with some initial blueprint B1 with DNS version 3. + // The blueprint has been fully executed and all is well. + // + // 2. Blueprint B2 gets generated. It stores DNS version 3. It's made the + // current target. Execution has not started yet. + // + // 3. Blueprint B3 gets generated. It also stores DNS version 3 because + // that's still the current version in DNS. B3 is made the current + // target. + // + // Assume B2 and B3 specify different internal DNS contents (e.g., have a + // different set of Omicron zones in them). + // + // 4. Nexus instance N1 finds B2 to be the current target and starts + // executing it. (Assume it found this between 2 and 3 above.) + // + // 5. Nexus instance N2 finds B3 to be the current target and starts + // executing it. + // + // During execution: + // + // * N1 will assemble a new version of DNS called version 4, generate a diff + // between version 3 (which is fixed) and version 4, and attempt to apply + // this to the database conditional on the current version being version + // 3. + // + // * N2 will do the same, but its version 4 will look different. + // + // Now, one of two things could happen: + // + // 1. N1 wins. Its database update applies successfully. In the database, + // the internal DNS version becomes version 4. In this case, N2 loses. + // Its database operation fails altogether. At this point, any + // subsequent attempt to execute blueprint B3 will fail because any DNS + // update will be conditional on the database having version 3. The only + // way out of this is for the planner to generate a new blueprint B4 + // that's exactly equivalent to B3 except that the stored internal DNS + // version is 4. Then we'll be able to execute that. + // + // 2. N2 wins. Its database update applies successfully. In the database, + // the internal DNS version becomes version 4. In this case, N1 loses. + // Its database operation fails altogether. At this point, any + // subsequent attempt to execute blueprint B3 will fail because any DNS + // update will be conditional on the databae having version 3. No + // further action is needed, though, because we've successfully executed + // the latest target blueprint. + // + // In both cases, the system will (1) converge to having successfully + // executed the target blueprint, and (2) never have rolled any changes back + // -- DNS only ever moves forward, closer to the latest desired state. + info!( + log, + "attempting to update from generation {} to generation {}", + dns_config_current.generation, + dns_config_blueprint.generation, + ); + let generation_u32 = + u32::try_from(dns_config_current.generation).map_err(|e| { + Error::internal_error(&format!( + "internal DNS generation got too large: {}", + e, + )) + })?; + let generation = + nexus_db_model::Generation::from(Generation::from(generation_u32)); + datastore.dns_update_from_version(opctx, update, generation).await +} + +/// Returns the expected contents of internal DNS based on the given blueprint +pub fn blueprint_dns_config( + blueprint: &Blueprint, + sleds_by_id: &BTreeMap, +) -> DnsConfigParams { + // The DNS names configured here should match what RSS configures for the + // same zones. It's tricky to have RSS share the same code because it uses + // Sled Agent's _internal_ `OmicronZoneConfig` (and friends), whereas we're + // using `sled-agent-client`'s version of that type. However, the + // DnsConfigBuilder's interface is high-level enough that it handles most of + // the details. + let mut dns_builder = DnsConfigBuilder::new(); + + // The code below assumes that all zones are using the default port numbers. + // That should be true, as those are the only ports ever used today. + // In an ideal world, the correct port would be pulled out of the + // `OmicronZoneType` variant instead. Although that information is present, + // it's irritatingly non-trivial to do right now because SocketAddrs are + // represented as strings, so we'd need to parse all of them and handle all + // the errors, even though they should never happen. + // See oxidecomputer/omicron#4988. + for (_, omicron_zone) in blueprint.all_omicron_zones() { + if !blueprint.zones_in_service.contains(&omicron_zone.id) { + continue; + } + + let (service_name, port) = match omicron_zone.zone_type { + OmicronZoneType::BoundaryNtp { .. } => { + (ServiceName::BoundaryNtp, NTP_PORT) + } + OmicronZoneType::InternalNtp { .. } => { + (ServiceName::InternalNtp, NTP_PORT) + } + OmicronZoneType::Clickhouse { .. } => { + (ServiceName::Clickhouse, CLICKHOUSE_PORT) + } + OmicronZoneType::ClickhouseKeeper { .. } => { + (ServiceName::ClickhouseKeeper, CLICKHOUSE_KEEPER_PORT) + } + OmicronZoneType::CockroachDb { .. } => { + (ServiceName::Cockroach, COCKROACH_PORT) + } + OmicronZoneType::Nexus { .. } => { + (ServiceName::Nexus, NEXUS_INTERNAL_PORT) + } + OmicronZoneType::Crucible { .. } => { + (ServiceName::Crucible(omicron_zone.id), CRUCIBLE_PORT) + } + OmicronZoneType::CruciblePantry { .. } => { + (ServiceName::CruciblePantry, CRUCIBLE_PANTRY_PORT) + } + OmicronZoneType::Oximeter { .. } => { + (ServiceName::Oximeter, OXIMETER_PORT) + } + OmicronZoneType::ExternalDns { .. } => { + (ServiceName::ExternalDns, DNS_HTTP_PORT) + } + OmicronZoneType::InternalDns { .. } => { + (ServiceName::InternalDns, DNS_HTTP_PORT) + } + }; + + // This unwrap is safe because this function only fails if we provide + // the same zone id twice, which should not be possible here. + dns_builder + .host_zone_with_one_backend( + omicron_zone.id, + omicron_zone.underlay_address, + service_name, + port, + ) + .unwrap(); + } + + let scrimlets = sleds_by_id.values().filter(|sled| sled.is_scrimlet); + for scrimlet in scrimlets { + let sled_subnet = scrimlet.subnet(); + let switch_zone_ip = get_switch_zone_address(sled_subnet); + // unwrap(): see above. + dns_builder + .host_zone_switch( + scrimlet.id, + switch_zone_ip, + DENDRITE_PORT, + MGS_PORT, + MGD_PORT, + ) + .unwrap(); + } + + // We set the generation number for the internal DNS to be newer than + // whatever it was when this blueprint was generated. This will only be + // used if the generated DNS contents are different from what's current. + dns_builder.generation(blueprint.internal_dns_version.next()); + dns_builder.build() +} + +fn dns_compute_update( + log: &slog::Logger, + comment: String, + creator: String, + current_config: &DnsConfigParams, + new_config: &DnsConfigParams, +) -> Result, Error> { + let mut update = + DnsVersionUpdateBuilder::new(DnsGroup::Internal, comment, creator); + + let diff = DnsDiff::new(¤t_config, &new_config) + .map_err(|e| Error::internal_error(&format!("{:#}", e)))?; + if diff.is_empty() { + info!(log, "no changes"); + return Ok(None); + } + + for (name, new_records) in diff.names_added() { + debug!( + log, + "adding name"; + "name" => name, + "new_records" => ?new_records, + ); + update.add_name( + name.to_string(), + new_records.into_iter().cloned().collect(), + )?; + } + + for (name, old_records) in diff.names_removed() { + debug!( + log, + "removing name"; + "name" => name, + "old_records" => ?old_records, + ); + update.remove_name(name.to_string())?; + } + + for (name, old_records, new_records) in diff.names_changed() { + debug!( + log, + "updating name"; + "name" => name, + "old_records" => ?old_records, + "new_records" => ?new_records, + ); + update.remove_name(name.to_string())?; + update.add_name( + name.to_string(), + new_records.into_iter().cloned().collect(), + )?; + } + + Ok(Some(update)) +} + +#[cfg(test)] +mod test { + use super::blueprint_dns_config; + use super::dns_compute_update; + use crate::Sled; + use internal_dns::ServiceName; + use internal_dns::DNS_ZONE; + use nexus_deployment::blueprint_builder::BlueprintBuilder; + use nexus_inventory::CollectionBuilder; + use nexus_types::deployment::Blueprint; + use nexus_types::deployment::OmicronZoneConfig; + use nexus_types::deployment::OmicronZoneType; + use nexus_types::deployment::Policy; + use nexus_types::deployment::SledResources; + use nexus_types::deployment::ZpoolName; + use nexus_types::external_api::views::SledProvisionState; + use nexus_types::internal_api::params::DnsConfigParams; + use nexus_types::internal_api::params::DnsConfigZone; + use nexus_types::internal_api::params::DnsRecord; + use omicron_common::address::get_sled_address; + use omicron_common::address::get_switch_zone_address; + use omicron_common::address::Ipv6Subnet; + use omicron_common::address::RACK_PREFIX; + use omicron_common::address::SLED_PREFIX; + use omicron_common::api::external::Generation; + use omicron_test_utils::dev::test_setup_log; + use std::collections::BTreeMap; + use std::collections::BTreeSet; + use std::collections::HashMap; + use std::net::Ipv4Addr; + use std::net::Ipv6Addr; + use std::net::SocketAddrV6; + use std::str::FromStr; + use uuid::Uuid; + + fn blueprint_empty() -> Blueprint { + let builder = CollectionBuilder::new("test-suite"); + let collection = builder.build(); + let policy = Policy { + sleds: BTreeMap::new(), + service_ip_pool_ranges: vec![], + target_nexus_zone_count: 3, + }; + BlueprintBuilder::build_initial_from_collection( + &collection, + Generation::new(), + &policy, + "test-suite", + ) + .expect("failed to generate empty blueprint") + } + + fn dns_config_empty() -> DnsConfigParams { + DnsConfigParams { + generation: 1, + time_created: chrono::Utc::now(), + zones: vec![DnsConfigZone { + zone_name: String::from("internal"), + records: HashMap::new(), + }], + } + } + + /// test blueprint_dns_config(): trivial case of an empty blueprint + #[test] + fn test_blueprint_dns_empty() { + let blueprint = blueprint_empty(); + let blueprint_dns = blueprint_dns_config(&blueprint, &BTreeMap::new()); + assert!(blueprint_dns.sole_zone().unwrap().records.is_empty()); + } + + /// test blueprint_dns_config(): exercise various different conditions + /// - one of each type of zone in service + /// - some zones not in service + #[test] + fn test_blueprint_dns_basic() { + // We'll use the standard representative inventory collection to build a + // blueprint. The main thing we care about here is that we have at + // least one zone of each type. Later, we'll mark a couple of the sleds + // as Scrimlets to exercise that case. + let representative = nexus_inventory::examples::representative(); + let collection = representative.builder.build(); + let rack_subnet_base: Ipv6Addr = + "fd00:1122:3344:0100::".parse().unwrap(); + let rack_subnet = + ipnet::Ipv6Net::new(rack_subnet_base, RACK_PREFIX).unwrap(); + let possible_sled_subnets = rack_subnet.subnets(SLED_PREFIX).unwrap(); + // Ignore sleds with no associated zones in the inventory. + // This is included in the "representative" collection, but it's + // not allowed by BlueprintBuilder::build_initial_from_collection(). + let policy_sleds = collection + .omicron_zones + .keys() + .zip(possible_sled_subnets) + .map(|(sled_id, subnet)| { + let sled_resources = SledResources { + provision_state: SledProvisionState::Provisionable, + zpools: BTreeSet::from([ZpoolName::from_str(&format!( + "oxp_{}", + Uuid::new_v4() + )) + .unwrap()]), + subnet: Ipv6Subnet::new(subnet.network()), + }; + (*sled_id, sled_resources) + }) + .collect(); + + let policy = Policy { + sleds: policy_sleds, + service_ip_pool_ranges: vec![], + target_nexus_zone_count: 3, + }; + let dns_empty = dns_config_empty(); + let initial_dns_generation = + Generation::from(u32::try_from(dns_empty.generation).unwrap()); + let mut blueprint = BlueprintBuilder::build_initial_from_collection( + &collection, + initial_dns_generation, + &policy, + "test-suite", + ) + .expect("failed to build initial blueprint"); + + // To make things slightly more interesting, let's add a zone that's + // not currently in service. + let out_of_service_id = Uuid::new_v4(); + let out_of_service_addr = Ipv6Addr::LOCALHOST; + blueprint.omicron_zones.values_mut().next().unwrap().zones.push( + OmicronZoneConfig { + id: out_of_service_id, + underlay_address: out_of_service_addr, + zone_type: OmicronZoneType::Oximeter { + address: SocketAddrV6::new( + out_of_service_addr, + 12345, + 0, + 0, + ) + .to_string(), + }, + }, + ); + + // To generate the blueprint's DNS config, we need to make up a + // different set of information about the sleds in our fake system. + let sleds_by_id = policy + .sleds + .iter() + .enumerate() + .map(|(i, (sled_id, sled_resources))| { + let sled_info = Sled { + id: *sled_id, + sled_agent_address: get_sled_address(sled_resources.subnet), + // The first two of these (arbitrarily) will be marked + // Scrimlets. + is_scrimlet: i < 2, + }; + (*sled_id, sled_info) + }) + .collect(); + + let dns_config_blueprint = + blueprint_dns_config(&blueprint, &sleds_by_id); + assert_eq!( + dns_config_blueprint.generation, + u64::from(initial_dns_generation.next()) + ); + let blueprint_dns_zone = dns_config_blueprint.sole_zone().unwrap(); + assert_eq!(blueprint_dns_zone.zone_name, DNS_ZONE); + + // Now, verify a few different properties about the generated DNS + // configuration: + // + // 1. Every zone (except for the one that we added not-in-service) + // should have some DNS name with a AAAA record that points at the + // zone's underlay IP. (i.e., every Omiron zone is _in_ DNS) + // + // 2. Every SRV record that we find should have a "target" that points + // to another name within the DNS configuration, and that name should + // be one of the ones with a AAAA record pointing to an Omicron zone. + // + // 3. There is at least one SRV record for each service that we expect + // to appear in the representative system that we're working with. + // + // 4. Our out-of-service zone does *not* appear in the DNS config, + // neither with an AAAA record nor in an SRV record. + // + // Together, this tells us that we have SRV records for all services, + // that those SRV records all point to at least one of the Omicron zones + // for that service, and that we correctly ignored zones that were not + // in service. + + // To start, we need a mapping from underlay IP to the corresponding + // Omicron zone. + let mut omicron_zones_by_ip: BTreeMap<_, _> = blueprint + .all_omicron_zones() + .filter(|(_, zone)| zone.id != out_of_service_id) + .map(|(_, zone)| (zone.underlay_address, zone.id)) + .collect(); + println!("omicron zones by IP: {:#?}", omicron_zones_by_ip); + + // We also want a mapping from underlay IP to the corresponding switch + // zone. In this case, the value is the Scrimlet's sled id. + let mut switch_sleds_by_ip: BTreeMap<_, _> = sleds_by_id + .iter() + .filter_map(|(sled_id, sled)| { + if sled.is_scrimlet { + let sled_subnet = policy.sleds.get(sled_id).unwrap().subnet; + let switch_zone_ip = get_switch_zone_address(sled_subnet); + Some((switch_zone_ip, *sled_id)) + } else { + None + } + }) + .collect(); + + // Now go through all the DNS names that have AAAA records and remove + // any corresponding Omicron zone. While doing this, construct a set of + // the fully-qualified DNS names (i.e., with the zone name suffix + // appended) that had AAAA records. We'll use this later to make sure + // all the SRV records' targets that we find are valid. + let mut expected_srv_targets: BTreeSet<_> = BTreeSet::new(); + for (name, records) in &blueprint_dns_zone.records { + let addrs: Vec<_> = records + .iter() + .filter_map(|dns_record| match dns_record { + DnsRecord::Aaaa(addr) => Some(addr), + _ => None, + }) + .collect(); + for addr in addrs { + if let Some(zone_id) = omicron_zones_by_ip.remove(addr) { + println!( + "IP {} found in DNS corresponds with zone {}", + addr, zone_id + ); + expected_srv_targets.insert(format!( + "{}.{}", + name, blueprint_dns_zone.zone_name + )); + continue; + } + + if let Some(scrimlet_id) = switch_sleds_by_ip.remove(addr) { + println!( + "IP {} found in DNS corresponds with switch zone \ + for Scrimlet {}", + addr, scrimlet_id + ); + expected_srv_targets.insert(format!( + "{}.{}", + name, blueprint_dns_zone.zone_name + )); + continue; + } + + println!( + "note: found IP ({}) not corresponding to any \ + Omicron zone or switch zone (name {:?})", + addr, name + ); + } + } + + println!( + "Omicron zones whose IPs were not found in DNS: {:?}", + omicron_zones_by_ip, + ); + assert!( + omicron_zones_by_ip.is_empty(), + "some Omicron zones' IPs were not found in DNS" + ); + + println!( + "Scrimlets whose switch zone IPs were not found in DNS: {:?}", + switch_sleds_by_ip, + ); + assert!( + switch_sleds_by_ip.is_empty(), + "some switch zones' IPs were not found in DNS" + ); + + // Now go through all DNS names that have SRV records. For each one, + // + // 1. If its name corresponds to the name of one of the SRV services + // that we expect the system to have, record that fact. At the end + // we'll verify that we found at least one SRV record for each such + // service. + // + // 2. Make sure that the SRV record points at a name that we found in + // the previous pass (i.e., that corresponds to an Omicron zone). + // + // There are some ServiceNames missing here because they are not part of + // our representative config (e.g., ClickhouseKeeper) or they don't + // currently have DNS record at all (e.g., SledAgent, Maghemite, Mgd, + // Tfport). + let mut srv_kinds_expected = BTreeSet::from([ + ServiceName::Clickhouse, + ServiceName::Cockroach, + ServiceName::InternalDns, + ServiceName::ExternalDns, + ServiceName::Nexus, + ServiceName::Oximeter, + ServiceName::Dendrite, + ServiceName::CruciblePantry, + ServiceName::BoundaryNtp, + ServiceName::InternalNtp, + ]); + + for (name, records) in &blueprint_dns_zone.records { + let srvs: Vec<_> = records + .iter() + .filter_map(|dns_record| match dns_record { + DnsRecord::Srv(srv) => Some(srv), + _ => None, + }) + .collect(); + for srv in srvs { + assert!( + expected_srv_targets.contains(&srv.target), + "found SRV record with target {:?} that does not \ + correspond to a name that points to any Omicron zone", + srv.target + ); + } + + let kinds_left: Vec<_> = + srv_kinds_expected.iter().copied().collect(); + for kind in kinds_left { + if kind.dns_name() == *name { + srv_kinds_expected.remove(&kind); + } + } + } + + println!("SRV kinds with no records found: {:?}", srv_kinds_expected); + assert!(srv_kinds_expected.is_empty()); + } + + #[test] + fn test_dns_compute_update() { + let logctx = test_setup_log("dns_compute_update"); + + // Start with an empty DNS config. There's no database update needed + // when updating the DNS config to itself. + let dns_empty = dns_config_empty(); + match dns_compute_update( + &logctx.log, + "test-suite".to_string(), + "test-suite".to_string(), + &dns_empty, + &dns_empty, + ) { + Ok(None) => (), + Err(error) => { + panic!("unexpected error generating update: {:?}", error) + } + Ok(Some(diff)) => panic!("unexpected delta: {:?}", diff), + }; + + // Now let's do something a little less trivial. Set up two slightly + // different DNS configurations, compute the database update, and make + // sure it matches what we expect. + let dns_config1 = DnsConfigParams { + generation: 4, + time_created: chrono::Utc::now(), + zones: vec![DnsConfigZone { + zone_name: "my-zone".to_string(), + records: HashMap::from([ + ( + "ex1".to_string(), + vec![DnsRecord::A(Ipv4Addr::LOCALHOST)], + ), + ( + "ex2".to_string(), + vec![DnsRecord::A("192.168.1.3".parse().unwrap())], + ), + ]), + }], + }; + + let dns_config2 = DnsConfigParams { + generation: 4, + time_created: chrono::Utc::now(), + zones: vec![DnsConfigZone { + zone_name: "my-zone".to_string(), + records: HashMap::from([ + ( + "ex2".to_string(), + vec![DnsRecord::A("192.168.1.4".parse().unwrap())], + ), + ( + "ex3".to_string(), + vec![DnsRecord::A(Ipv4Addr::LOCALHOST)], + ), + ]), + }], + }; + + let update = dns_compute_update( + &logctx.log, + "test-suite".to_string(), + "test-suite".to_string(), + &dns_config1, + &dns_config2, + ) + .expect("failed to compute update") + .expect("unexpectedly produced no update"); + + let mut removed: Vec<_> = update.names_removed().collect(); + removed.sort(); + assert_eq!(removed, vec!["ex1", "ex2"]); + + let mut added: Vec<_> = update.names_added().collect(); + added.sort_by_key(|n| n.0); + assert_eq!( + added, + vec![ + ( + "ex2", + [DnsRecord::A("192.168.1.4".parse().unwrap())].as_ref() + ), + ("ex3", [DnsRecord::A(Ipv4Addr::LOCALHOST)].as_ref()), + ] + ); + + logctx.cleanup_successful(); + } +} diff --git a/nexus/blueprint-execution/src/lib.rs b/nexus/blueprint-execution/src/lib.rs index f7bfd7d30c..a13acdf265 100644 --- a/nexus/blueprint-execution/src/lib.rs +++ b/nexus/blueprint-execution/src/lib.rs @@ -6,29 +6,87 @@ //! //! See `nexus_deployment` crate-level docs for background. +use anyhow::{anyhow, Context}; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; use nexus_types::deployment::Blueprint; -use slog::o; +use nexus_types::identity::Asset; +use omicron_common::address::Ipv6Subnet; +use omicron_common::address::SLED_PREFIX; +use slog::info; +use slog_error_chain::InlineErrorChain; +use std::collections::BTreeMap; +use std::net::SocketAddrV6; +use uuid::Uuid; +mod dns; mod omicron_zones; +struct Sled { + id: Uuid, + sled_agent_address: SocketAddrV6, + is_scrimlet: bool, +} + +impl Sled { + pub fn subnet(&self) -> Ipv6Subnet { + Ipv6Subnet::::new(*self.sled_agent_address.ip()) + } +} + +impl From for Sled { + fn from(value: nexus_db_model::Sled) -> Self { + Sled { + id: value.id(), + sled_agent_address: value.address(), + is_scrimlet: value.is_scrimlet(), + } + } +} + /// Make one attempt to realize the given blueprint, meaning to take actions to /// alter the real system to match the blueprint /// /// The assumption is that callers are running this periodically or in a loop to /// deal with transient errors or changes in the underlying system state. -pub async fn realize_blueprint( +pub async fn realize_blueprint( opctx: &OpContext, datastore: &DataStore, blueprint: &Blueprint, -) -> Result<(), Vec> { - let log = opctx.log.new(o!("comment" => blueprint.comment.clone())); - omicron_zones::deploy_zones( - &log, - opctx, - datastore, - &blueprint.omicron_zones, + nexus_label: S, +) -> Result<(), Vec> +where + String: From, +{ + let opctx = opctx.child(BTreeMap::from([( + "comment".to_string(), + blueprint.comment.clone(), + )])); + + info!( + opctx.log, + "attempting to realize blueprint"; + "blueprint_id" => ?blueprint.id + ); + + let sleds_by_id: BTreeMap = datastore + .sled_list_all_batched(&opctx) + .await + .context("listing all sleds") + .map_err(|e| vec![e])? + .into_iter() + .map(|db_sled| (db_sled.id(), Sled::from(db_sled))) + .collect(); + omicron_zones::deploy_zones(&opctx, &sleds_by_id, &blueprint.omicron_zones) + .await?; + + dns::deploy_dns( + &opctx, + &datastore, + String::from(nexus_label), + &blueprint, + &sleds_by_id, ) .await + .map_err(|e| vec![anyhow!("{}", InlineErrorChain::new(&e))]) } diff --git a/nexus/blueprint-execution/src/omicron_zones.rs b/nexus/blueprint-execution/src/omicron_zones.rs index f3e81d283d..1d5c4444b1 100644 --- a/nexus/blueprint-execution/src/omicron_zones.rs +++ b/nexus/blueprint-execution/src/omicron_zones.rs @@ -4,50 +4,49 @@ //! Manges deployment of Omicron zones to Sled Agents +use crate::Sled; +use anyhow::anyhow; use anyhow::Context; use futures::stream; use futures::StreamExt; use nexus_db_queries::context::OpContext; -use nexus_db_queries::db::lookup::LookupPath; -use nexus_db_queries::db::DataStore; use nexus_types::deployment::OmicronZonesConfig; use sled_agent_client::Client as SledAgentClient; use slog::info; use slog::warn; -use slog::Logger; use std::collections::BTreeMap; use uuid::Uuid; /// Idempotently ensure that the specified Omicron zones are deployed to the /// corresponding sleds pub(crate) async fn deploy_zones( - log: &Logger, opctx: &OpContext, - datastore: &DataStore, + sleds_by_id: &BTreeMap, zones: &BTreeMap, ) -> Result<(), Vec> { let errors: Vec<_> = stream::iter(zones) .filter_map(|(sled_id, config)| async move { - let client = match sled_client(opctx, datastore, *sled_id).await { - Ok(client) => client, - Err(err) => { - warn!(log, "{err:#}"); + let db_sled = match sleds_by_id.get(sled_id) { + Some(sled) => sled, + None => { + let err = anyhow!("sled not found in db list: {}", sled_id); + warn!(opctx.log, "{err:#}"); return Some(err); } }; + let client = sled_client(opctx, &db_sled); let result = client.omicron_zones_put(&config).await.with_context(|| { format!("Failed to put {config:#?} to sled {sled_id}") }); - match result { Err(error) => { - warn!(log, "{error:#}"); + warn!(opctx.log, "{error:#}"); Some(error) } Ok(_) => { info!( - log, + opctx.log, "Successfully deployed zones for sled agent"; "sled_id" => %sled_id, "generation" => %config.generation, @@ -71,43 +70,27 @@ pub(crate) async fn deploy_zones( // method on the `Nexus` type. We want to have a more constrained type we can // pass into background tasks for this type of functionality, but for now we // just copy the functionality. -async fn sled_client( - opctx: &OpContext, - datastore: &DataStore, - sled_id: Uuid, -) -> Result { - let (.., sled) = LookupPath::new(opctx, datastore) - .sled_id(sled_id) - .fetch() - .await - .with_context(|| { - format!( - "Failed to create sled_agent::Client for sled_id: {}", - sled_id - ) - })?; +fn sled_client(opctx: &OpContext, sled: &Sled) -> SledAgentClient { let dur = std::time::Duration::from_secs(60); let client = reqwest::ClientBuilder::new() .connect_timeout(dur) .timeout(dur) .build() .unwrap(); - Ok(SledAgentClient::new_with_client( - &format!("http://{}", sled.address()), + SledAgentClient::new_with_client( + &format!("http://{}", sled.sled_agent_address), client, opctx.log.clone(), - )) + ) } #[cfg(test)] mod test { use super::deploy_zones; + use crate::Sled; use httptest::matchers::{all_of, json_decoded, request}; use httptest::responders::status_code; use httptest::Expectation; - use nexus_db_model::{ - ByteCount, SledBaseboard, SledSystemHardware, SledUpdate, - }; use nexus_db_queries::context::OpContext; use nexus_test_utils_macros::nexus_test; use nexus_types::deployment::OmicronZonesConfig; @@ -140,6 +123,7 @@ mod test { omicron_zones, zones_in_service: BTreeSet::new(), parent_blueprint_id: None, + internal_dns_version: Generation::new(), time_created: chrono::Utc::now(), creator: "test".to_string(), comment: "test blueprint".to_string(), @@ -155,14 +139,6 @@ mod test { cptestctx.logctx.log.clone(), datastore.clone(), ); - let log = opctx.log.clone(); - - // Get a success result back when the blueprint has an empty set of - // zones. - let blueprint = Arc::new(create_blueprint(BTreeMap::new())); - deploy_zones(&log, &opctx, &datastore, &blueprint.1.omicron_zones) - .await - .expect("failed to deploy no zones"); // Create some fake sled-agent servers to respond to zone puts and add // sleds to CRDB. @@ -170,67 +146,61 @@ mod test { let mut s2 = httptest::Server::run(); let sled_id1 = Uuid::new_v4(); let sled_id2 = Uuid::new_v4(); - let rack_id = Uuid::new_v4(); - for (i, (sled_id, server)) in - [(sled_id1, &s1), (sled_id2, &s2)].iter().enumerate() - { - let SocketAddr::V6(addr) = server.addr() else { - panic!("Expected Ipv6 address. Got {}", server.addr()); - }; - let update = SledUpdate::new( - *sled_id, - addr, - SledBaseboard { - serial_number: i.to_string(), - part_number: "test".into(), - revision: 1, - }, - SledSystemHardware { - is_scrimlet: false, - usable_hardware_threads: 4, - usable_physical_ram: ByteCount(1000.into()), - reservoir_size: ByteCount(999.into()), - }, - rack_id, - ); - datastore - .sled_upsert(update) - .await - .expect("Failed to insert sled to db"); - } - - // The particular dataset doesn't matter for this test. - // We re-use the same one to not obfuscate things - let dataset = OmicronZoneDataset { - pool_name: format!("oxp_{}", Uuid::new_v4()).parse().unwrap(), - }; + let sleds_by_id: BTreeMap = + [(sled_id1, &s1), (sled_id2, &s2)] + .into_iter() + .map(|(sled_id, server)| { + let SocketAddr::V6(addr) = server.addr() else { + panic!("Expected Ipv6 address. Got {}", server.addr()); + }; + let sled = Sled { + id: sled_id, + sled_agent_address: addr, + is_scrimlet: false, + }; + (sled_id, sled) + }) + .collect(); - let generation = Generation::new(); + // Get a success result back when the blueprint has an empty set of + // zones. + let blueprint = Arc::new(create_blueprint(BTreeMap::new())); + deploy_zones(&opctx, &sleds_by_id, &blueprint.1.omicron_zones) + .await + .expect("failed to deploy no zones"); // Zones are updated in a particular order, but each request contains // the full set of zones that must be running. // See `rack_setup::service::ServiceInner::run` for more details. - let mut zones = OmicronZonesConfig { - generation, - zones: vec![OmicronZoneConfig { - id: Uuid::new_v4(), - underlay_address: "::1".parse().unwrap(), - zone_type: OmicronZoneType::InternalDns { - dataset, - dns_address: "oh-hello-internal-dns".into(), - gz_address: "::1".parse().unwrap(), - gz_address_index: 0, - http_address: "some-ipv6-address".into(), - }, - }], - }; + fn make_zones() -> OmicronZonesConfig { + OmicronZonesConfig { + generation: Generation::new(), + zones: vec![OmicronZoneConfig { + id: Uuid::new_v4(), + underlay_address: "::1".parse().unwrap(), + zone_type: OmicronZoneType::InternalDns { + dataset: OmicronZoneDataset { + pool_name: format!("oxp_{}", Uuid::new_v4()) + .parse() + .unwrap(), + }, + dns_address: "oh-hello-internal-dns".into(), + gz_address: "::1".parse().unwrap(), + gz_address_index: 0, + http_address: "some-ipv6-address".into(), + }, + }], + } + } // Create a blueprint with only the `InternalDns` zone for both servers // We reuse the same `OmicronZonesConfig` because the details don't // matter for this test. + let mut zones1 = make_zones(); + let mut zones2 = make_zones(); let blueprint = Arc::new(create_blueprint(BTreeMap::from([ - (sled_id1, zones.clone()), - (sled_id2, zones.clone()), + (sled_id1, zones1.clone()), + (sled_id2, zones2.clone()), ]))); // Set expectations for the initial requests sent to the fake @@ -250,7 +220,7 @@ mod test { } // Execute it. - deploy_zones(&log, &opctx, &datastore, &blueprint.1.omicron_zones) + deploy_zones(&opctx, &sleds_by_id, &blueprint.1.omicron_zones) .await .expect("failed to deploy initial zones"); @@ -267,7 +237,7 @@ mod test { .respond_with(status_code(204)), ); } - deploy_zones(&log, &opctx, &datastore, &blueprint.1.omicron_zones) + deploy_zones(&opctx, &sleds_by_id, &blueprint.1.omicron_zones) .await .expect("failed to deploy same zones"); s1.verify_and_clear(); @@ -291,7 +261,7 @@ mod test { ); let errors = - deploy_zones(&log, &opctx, &datastore, &blueprint.1.omicron_zones) + deploy_zones(&opctx, &sleds_by_id, &blueprint.1.omicron_zones) .await .expect_err("unexpectedly succeeded in deploying zones"); @@ -304,21 +274,25 @@ mod test { s2.verify_and_clear(); // Add an `InternalNtp` zone for our next update - zones.generation = generation.next(); - zones.zones.push(OmicronZoneConfig { - id: Uuid::new_v4(), - underlay_address: "::1".parse().unwrap(), - zone_type: OmicronZoneType::InternalNtp { - address: "::1".into(), - dns_servers: vec!["::1".parse().unwrap()], - domain: None, - ntp_servers: vec!["some-ntp-server-addr".into()], - }, - }); + fn append_zone(zones: &mut OmicronZonesConfig) { + zones.generation = zones.generation.next(); + zones.zones.push(OmicronZoneConfig { + id: Uuid::new_v4(), + underlay_address: "::1".parse().unwrap(), + zone_type: OmicronZoneType::InternalNtp { + address: "::1".into(), + dns_servers: vec!["::1".parse().unwrap()], + domain: None, + ntp_servers: vec!["some-ntp-server-addr".into()], + }, + }); + } + append_zone(&mut zones1); + append_zone(&mut zones2); let blueprint = Arc::new(create_blueprint(BTreeMap::from([ - (sled_id1, zones.clone()), - (sled_id2, zones.clone()), + (sled_id1, zones1), + (sled_id2, zones2), ]))); // Set our new expectations @@ -337,7 +311,7 @@ mod test { } // Activate the task - deploy_zones(&log, &opctx, &datastore, &blueprint.1.omicron_zones) + deploy_zones(&opctx, &sleds_by_id, &blueprint.1.omicron_zones) .await .expect("failed to deploy last round of zones"); s1.verify_and_clear(); diff --git a/nexus/db-model/src/deployment.rs b/nexus/db-model/src/deployment.rs index 34fe08d78c..a1f285fbef 100644 --- a/nexus/db-model/src/deployment.rs +++ b/nexus/db-model/src/deployment.rs @@ -23,6 +23,7 @@ use uuid::Uuid; pub struct Blueprint { pub id: Uuid, pub parent_blueprint_id: Option, + pub internal_dns_version: Generation, pub time_created: DateTime, pub creator: String, pub comment: String, @@ -33,6 +34,7 @@ impl From<&'_ nexus_types::deployment::Blueprint> for Blueprint { Self { id: bp.id, parent_blueprint_id: bp.parent_blueprint_id, + internal_dns_version: Generation(bp.internal_dns_version), time_created: bp.time_created, creator: bp.creator.clone(), comment: bp.comment.clone(), @@ -45,6 +47,7 @@ impl From for nexus_types::deployment::BlueprintMetadata { Self { id: value.id, parent_blueprint_id: value.parent_blueprint_id, + internal_dns_version: *value.internal_dns_version, time_created: value.time_created, creator: value.creator, comment: value.comment, diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 3d9050b1af..54755486e5 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -13,7 +13,7 @@ use omicron_common::api::external::SemverVersion; /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(35, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(36, 0, 0); table! { disk (id) { @@ -1421,6 +1421,8 @@ table! { time_created -> Timestamptz, creator -> Text, comment -> Text, + + internal_dns_version -> Int8, } } diff --git a/nexus/db-queries/src/context.rs b/nexus/db-queries/src/context.rs index 835256fc58..72da01a5f1 100644 --- a/nexus/db-queries/src/context.rs +++ b/nexus/db-queries/src/context.rs @@ -257,6 +257,65 @@ impl OpContext { ); result } + + /// Returns an error if we're currently in a context where expensive or + /// complex operations should not be allowed + /// + /// This is intended for checking that we're not trying to perform expensive + /// or complex (multi-step) operations from HTTP request handlers. + /// Generally, expensive or complex operations should be broken up into + /// multiple requests (e.g., pagination). That's for a variety reasons: + /// + /// - DoS mitigation: requests that kick off an arbitrarily-large amount of + /// work can tie up server resources without requiring commensurate + /// resources from the client, which makes it very easy to attack the + /// system + /// + /// - monitoring: it's easier to reason about metrics for operations that + /// are roughly bounded in size (otherwise, things like "requests per + /// second" can become meaningless) + /// + /// - stability: very large database queries have outsize effects on the + /// rest of the system (potentially blocking other clients for extended + /// periods of time, or preventing the database from timely cleanup of + /// invalidated data, etc.) + /// + /// - throttling: separate requests gives us an opportunity to dynamically + /// throttle clients that are hitting the system hard + /// + /// - failure transparency: when one request kicks off a complex + /// (multi-step) operation, it's harder to communicate programmatically + /// why the request failed + /// + /// - retries: when failures happen during smaller operations, clients can + /// retry only the part that failed. When failures happen during complex + /// (multi-step) operations, the client has to retry the whole thing. + /// This is much worse than it sounds: it means that for the request to + /// succeed, _all_ of the suboperations have to succeed. During a period + /// of transient failures, that could be extremely unlikely. With smaller + /// requests, clients can just retry each one until it succeeds without + /// having to retry the requests that already succeeded (whose failures + /// would trigger another attempt at the whole thing). + /// + /// - Simple request-response HTTP is not well-suited to long-running + /// operations. There's no way to communicate progress or even that the + /// request is still going. There's no good way to cancel such a request, + /// either. Clients and proxies often don't expect long requests and + /// apply aggressive timeouts. Depending on the HTTP version, a + /// long-running request can tie up the TCP connection. + pub fn check_complex_operations_allowed(&self) -> Result<(), Error> { + let api_handler = match self.kind { + OpKind::ExternalApiRequest | OpKind::InternalApiRequest => true, + OpKind::Saga | OpKind::Background | OpKind::Test => false, + }; + if api_handler { + Err(Error::internal_error( + "operation not allowed from API handlers", + )) + } else { + Ok(()) + } + } } impl Session for ConsoleSessionWithSiloId { diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index cde6e7e8c6..d9df143022 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -233,7 +233,13 @@ impl DataStore { // Read the metadata from the primary blueprint row, and ensure that it // exists. - let (parent_blueprint_id, time_created, creator, comment) = { + let ( + parent_blueprint_id, + internal_dns_version, + time_created, + creator, + comment, + ) = { use db::schema::blueprint::dsl; let Some(blueprint) = dsl::blueprint @@ -251,6 +257,7 @@ impl DataStore { ( blueprint.parent_blueprint_id, + *blueprint.internal_dns_version, blueprint.time_created, blueprint.creator, blueprint.comment, @@ -479,6 +486,7 @@ impl DataStore { omicron_zones, zones_in_service, parent_blueprint_id, + internal_dns_version, time_created, creator, comment, @@ -1056,6 +1064,7 @@ mod tests { use nexus_types::external_api::views::SledProvisionState; use nexus_types::inventory::Collection; use omicron_common::address::Ipv6Subnet; + use omicron_common::api::external::Generation; use omicron_test_utils::dev; use rand::thread_rng; use rand::Rng; @@ -1174,6 +1183,7 @@ mod tests { let policy = policy_from_collection(&collection); let blueprint = BlueprintBuilder::build_initial_from_collection( &collection, + Generation::new(), &policy, "test", ) @@ -1207,6 +1217,7 @@ mod tests { nexus_inventory::CollectionBuilder::new("test").build(); let blueprint1 = BlueprintBuilder::build_initial_from_collection( &collection, + Generation::new(), &EMPTY_POLICY, "test", ) @@ -1332,10 +1343,16 @@ mod tests { policy.sleds.insert(new_sled_id, fake_sled_resources(None)); let new_sled_zpools = &policy.sleds.get(&new_sled_id).unwrap().zpools; - // Create a builder for a child blueprint. - let mut builder = - BlueprintBuilder::new_based_on(&blueprint1, &policy, "test") - .expect("failed to create builder"); + // Create a builder for a child blueprint. While we're at it, use a + // different DNS version to test that that works. + let new_dns_version = blueprint1.internal_dns_version.next(); + let mut builder = BlueprintBuilder::new_based_on( + &blueprint1, + new_dns_version, + &policy, + "test", + ) + .expect("failed to create builder"); // Add zones to our new sled. assert_eq!( @@ -1381,8 +1398,9 @@ mod tests { .blueprint_read(&opctx, &authz_blueprint2) .await .expect("failed to read collection back"); - println!("diff: {}", blueprint2.diff(&blueprint_read)); + println!("diff: {}", blueprint2.diff_sleds(&blueprint_read)); assert_eq!(blueprint2, blueprint_read); + assert_eq!(blueprint2.internal_dns_version, new_dns_version); { let mut expected_ids = [blueprint1.id, blueprint2.id]; expected_ids.sort(); @@ -1474,18 +1492,27 @@ mod tests { nexus_inventory::CollectionBuilder::new("test").build(); let blueprint1 = BlueprintBuilder::build_initial_from_collection( &collection, + Generation::new(), &EMPTY_POLICY, "test1", ) .unwrap(); - let blueprint2 = - BlueprintBuilder::new_based_on(&blueprint1, &EMPTY_POLICY, "test2") - .expect("failed to create builder") - .build(); - let blueprint3 = - BlueprintBuilder::new_based_on(&blueprint1, &EMPTY_POLICY, "test3") - .expect("failed to create builder") - .build(); + let blueprint2 = BlueprintBuilder::new_based_on( + &blueprint1, + Generation::new(), + &EMPTY_POLICY, + "test2", + ) + .expect("failed to create builder") + .build(); + let blueprint3 = BlueprintBuilder::new_based_on( + &blueprint1, + Generation::new(), + &EMPTY_POLICY, + "test3", + ) + .expect("failed to create builder") + .build(); assert_eq!(blueprint1.parent_blueprint_id, None); assert_eq!(blueprint2.parent_blueprint_id, Some(blueprint1.id)); assert_eq!(blueprint3.parent_blueprint_id, Some(blueprint1.id)); @@ -1574,10 +1601,14 @@ mod tests { // Create a child of blueprint3, and ensure when we set it as the target // with enabled=false, that status is serialized. - let blueprint4 = - BlueprintBuilder::new_based_on(&blueprint3, &EMPTY_POLICY, "test3") - .expect("failed to create builder") - .build(); + let blueprint4 = BlueprintBuilder::new_based_on( + &blueprint3, + Generation::new(), + &EMPTY_POLICY, + "test3", + ) + .expect("failed to create builder") + .build(); assert_eq!(blueprint4.parent_blueprint_id, Some(blueprint3.id)); datastore.blueprint_insert(&opctx, &blueprint4).await.unwrap(); let bp4_target = BlueprintTarget { diff --git a/nexus/db-queries/src/db/datastore/dns.rs b/nexus/db-queries/src/db/datastore/dns.rs index 552ad31487..180764d38c 100644 --- a/nexus/db-queries/src/db/datastore/dns.rs +++ b/nexus/db-queries/src/db/datastore/dns.rs @@ -346,8 +346,77 @@ impl DataStore { Ok(()) } + /// Update the configuration of a DNS zone as specified in `update`, + /// conditional on the _current_ DNS version being `old_version`. + /// + /// Unlike `dns_update_incremental()`, this function assumes the caller has + /// already constructed `update` based on a specific DNS version + /// (`old_version`) and only wants to apply these changes if the DNS version + /// in the database has not changed. + /// + /// Also unlike `dns_update_incremental()`, this function creates its own + /// transaction to apply the update. + /// + /// Like `dns_update_incremental()`, **callers almost certainly want to wake + /// up the corresponding Nexus background task to cause these changes to be + /// propagated to the corresponding DNS servers.** + pub async fn dns_update_from_version( + &self, + opctx: &OpContext, + update: DnsVersionUpdateBuilder, + old_version: Generation, + ) -> Result<(), Error> { + opctx.authorize(authz::Action::Modify, &authz::DNS_CONFIG).await?; + let conn = self.pool_connection_authorized(opctx).await?; + conn.transaction_async(|c| async move { + let zones = self + .dns_zones_list_all_on_connection(opctx, &c, update.dns_group) + .await?; + // This looks like a time-of-check-to-time-of-use race, but this + // approach works because we're inside a transaction and the + // isolation level is SERIALIZABLE. + let version = self + .dns_group_latest_version_conn(opctx, &c, update.dns_group) + .await?; + if version.version != old_version { + return Err(TransactionError::CustomError(Error::conflict( + format!( + "expected current DNS version to be {}, found {}", + *old_version, *version.version, + ), + ))); + } + + self.dns_write_version_internal( + &c, + update, + zones, + Generation(old_version.next()), + ) + .await + }) + .await + .map_err(|e| match e { + TransactionError::CustomError(e) => e, + TransactionError::Database(e) => { + public_error_from_diesel(e, ErrorHandler::Server) + } + }) + } + /// Update the configuration of a DNS zone as specified in `update` /// + /// Unlike `dns_update_from_version()`, this function assumes that the + /// caller's changes are valid regardless of the current DNS configuration. + /// This function fetches the latest version and always writes the updated + /// config as the next version. This is appropriate if the caller is adding + /// something wholly new or removing something that it knows should be + /// present (as in the case when we add or remove DNS names for a Silo, + /// since we control exactly when that happens). This is _not_ appropriate + /// if the caller is making arbitrary changes that might conflict with a + /// concurrent caller. For that, you probably want + /// `dns_update_from_version()`. + /// /// This function runs the body inside a transaction (if no transaction is /// open) or a nested transaction (savepoint, if a transaction is already /// open). Generally, the caller should invoke this function while already @@ -358,14 +427,14 @@ impl DataStore { /// /// It's recommended to put this step last in any transaction because the /// more time elapses between running this function and attempting to commit - /// the transaction, the greater the change of either transaction failure + /// the transaction, the greater the chance of either transaction failure /// due to a conflict error (if some other caller attempts to update the /// same DNS group) or another client blocking (for the same reason). /// /// **Callers almost certainly want to wake up the corresponding Nexus /// background task to cause these changes to be propagated to the /// corresponding DNS servers.** - pub async fn dns_update( + pub async fn dns_update_incremental( &self, opctx: &OpContext, conn: &async_bb8_diesel::Connection, @@ -378,19 +447,32 @@ impl DataStore { .await?; conn.transaction_async(|c| async move { - self.dns_update_internal(opctx, &c, update, zones).await + let version = self + .dns_group_latest_version_conn(opctx, conn, update.dns_group) + .await?; + self.dns_write_version_internal( + &c, + update, + zones, + Generation(version.version.next()), + ) + .await }) .await } // This must only be used inside a transaction. Otherwise, it may make - // invalid changes to the database state. Use `dns_update()` instead. - async fn dns_update_internal( + // invalid changes to the database state. Use one of the `dns_update_*()` + // functions instead. + // + // The caller should already have checked (in the same transaction) that + // their version number is the correct next version. + async fn dns_write_version_internal( &self, - opctx: &OpContext, conn: &async_bb8_diesel::Connection, update: DnsVersionUpdateBuilder, zones: Vec, + new_version_num: Generation, ) -> Result<(), TransactionError> { // TODO-scalability TODO-performance This would be much better as a CTE // for all the usual reasons described in RFD 192. Using an interactive @@ -401,11 +483,6 @@ impl DataStore { // operations fail spuriously as far as the client is concerned). We // expect these problems to be small or unlikely at small scale but // significant as the system scales up. - let dns_group = update.dns_group; - let version = - self.dns_group_latest_version_conn(opctx, conn, dns_group).await?; - let new_version_num = - nexus_db_model::Generation(version.version.next()); let new_version = DnsVersion { dns_group: update.dns_group, version: new_version_num, @@ -498,15 +575,16 @@ impl DataStore { /// and add the name back with different records. /// /// You use this object to build up a _description_ of the changes to the DNS -/// zone's configuration. Then you call [`DataStore::dns_update()`] to apply -/// these changes transactionally to the database. The changes are then -/// propagated asynchronously to the DNS servers. No changes are made (to -/// either the database or the DNS servers) while you modify this object. +/// zone's configuration. Then you call [`DataStore::dns_update_incremental()`] +/// or [`DataStore::dns_update_from_version()`] to apply these changes +/// transactionally to the database. The changes are then propagated +/// asynchronously to the DNS servers. No changes are made (to either the +/// database or the DNS servers) while you modify this object. /// /// This object changes all of the zones associated with a particular DNS group /// because the assumption right now is that they're equivalent. (In practice, /// we should only ever have one zone in each group right now.) -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct DnsVersionUpdateBuilder { dns_group: DnsGroup, comment: String, @@ -596,6 +674,16 @@ impl DnsVersionUpdateBuilder { Ok(()) } } + + pub fn names_removed(&self) -> impl Iterator { + self.names_removed.iter().map(AsRef::as_ref) + } + + pub fn names_added(&self) -> impl Iterator { + self.names_added + .iter() + .map(|(name, list)| (name.as_ref(), list.as_ref())) + } } #[cfg(test)] @@ -1360,8 +1448,8 @@ mod test { } #[tokio::test] - async fn test_dns_update() { - let logctx = dev::test_setup_log("test_dns_update"); + async fn test_dns_update_incremental() { + let logctx = dev::test_setup_log("test_dns_update_incremental"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; let now = Utc::now(); @@ -1457,7 +1545,10 @@ mod test { update.add_name(String::from("n2"), records2.clone()).unwrap(); let conn = datastore.pool_connection_for_tests().await.unwrap(); - datastore.dns_update(&opctx, &conn, update).await.unwrap(); + datastore + .dns_update_incremental(&opctx, &conn, update) + .await + .unwrap(); } // Verify the new config. @@ -1490,7 +1581,10 @@ mod test { update.add_name(String::from("n1"), records12.clone()).unwrap(); let conn = datastore.pool_connection_for_tests().await.unwrap(); - datastore.dns_update(&opctx, &conn, update).await.unwrap(); + datastore + .dns_update_incremental(&opctx, &conn, update) + .await + .unwrap(); } let dns_config = datastore @@ -1520,7 +1614,10 @@ mod test { update.remove_name(String::from("n1")).unwrap(); let conn = datastore.pool_connection_for_tests().await.unwrap(); - datastore.dns_update(&opctx, &conn, update).await.unwrap(); + datastore + .dns_update_incremental(&opctx, &conn, update) + .await + .unwrap(); } let dns_config = datastore @@ -1547,7 +1644,10 @@ mod test { update.add_name(String::from("n1"), records2.clone()).unwrap(); let conn = datastore.pool_connection_for_tests().await.unwrap(); - datastore.dns_update(&opctx, &conn, update).await.unwrap(); + datastore + .dns_update_incremental(&opctx, &conn, update) + .await + .unwrap(); } let dns_config = datastore @@ -1586,7 +1686,9 @@ mod test { let copctx = opctx.child(std::collections::BTreeMap::new()); let mut fut = conn1 .transaction_async(|c1| async move { - cds.dns_update(&copctx, &c1, update1).await.unwrap(); + cds.dns_update_incremental(&copctx, &c1, update1) + .await + .unwrap(); // Let the outside scope know we've done the update, but we // haven't committed the transaction yet. Wait for them to // tell us to proceed. @@ -1613,7 +1715,10 @@ mod test { String::from("the test suite"), ); update2.add_name(String::from("n1"), records1.clone()).unwrap(); - datastore.dns_update(&opctx, &conn2, update2).await.unwrap(); + datastore + .dns_update_incremental(&opctx, &conn2, update2) + .await + .unwrap(); // Now let the first one finish. wait2_tx.send(()).unwrap(); @@ -1657,8 +1762,10 @@ mod test { update.remove_name(String::from("n4")).unwrap(); let conn = datastore.pool_connection_for_tests().await.unwrap(); - let error = - datastore.dns_update(&opctx, &conn, update).await.unwrap_err(); + let error = datastore + .dns_update_incremental(&opctx, &conn, update) + .await + .unwrap_err(); let error = match error { TransactionError::CustomError(err) => err, _ => panic!("Unexpected error: {:?}", error), @@ -1687,7 +1794,10 @@ mod test { let conn = datastore.pool_connection_for_tests().await.unwrap(); let error = Error::from( - datastore.dns_update(&opctx, &conn, update).await.unwrap_err(), + datastore + .dns_update_incremental(&opctx, &conn, update) + .await + .unwrap_err(), ); let msg = error.to_string(); assert!(msg.starts_with("Internal Error: "), "Message: {msg:}"); @@ -1714,4 +1824,117 @@ mod test { db.cleanup().await.unwrap(); logctx.cleanup_successful(); } + + #[tokio::test] + async fn test_dns_update_from_version() { + let logctx = dev::test_setup_log("test_dns_update_from_version"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // The guts of `dns_update_from_version()` are shared with + // `dns_update_incremental()`. The main cases worth testing here are + // (1) quick check that the happy path works, plus (2) make sure it + // fails when the precondition fails (current version doesn't match + // what's expected). + // + // Start by loading some initial data. + let conn = datastore.pool_connection_for_tests().await.unwrap(); + let initial_data = InitialDnsGroup::new( + DnsGroup::Internal, + "my-zone", + "test-suite", + "test-suite", + HashMap::from([ + ( + "wendell".to_string(), + vec![DnsRecord::Aaaa(Ipv6Addr::LOCALHOST)], + ), + ( + "krabappel".to_string(), + vec![DnsRecord::Aaaa(Ipv6Addr::LOCALHOST)], + ), + ]), + ); + DataStore::load_dns_data(&conn, initial_data) + .await + .expect("failed to insert initial data"); + + // Construct an update and apply it conditional on the current + // generation matching the initial one. This should succeed. + let mut update1 = DnsVersionUpdateBuilder::new( + DnsGroup::Internal, + String::from("test-suite-1"), + String::from("test-suite-1"), + ); + update1.remove_name(String::from("wendell")).unwrap(); + update1 + .add_name( + String::from("nelson"), + vec![DnsRecord::Aaaa(Ipv6Addr::LOCALHOST)], + ) + .unwrap(); + let gen1 = Generation::new(); + datastore + .dns_update_from_version(&opctx, update1, gen1) + .await + .expect("failed to update from first generation"); + + // Now construct another update based on the _first_ version and try to + // apply that. It should not work because the version has changed from + // under us. + let mut update2 = DnsVersionUpdateBuilder::new( + DnsGroup::Internal, + String::from("test-suite-2"), + String::from("test-suite-2"), + ); + update2.remove_name(String::from("krabappel")).unwrap(); + update2 + .add_name( + String::from("hoover"), + vec![DnsRecord::Aaaa(Ipv6Addr::LOCALHOST)], + ) + .unwrap(); + let error = datastore + .dns_update_from_version(&opctx, update2.clone(), gen1) + .await + .expect_err("update unexpectedly succeeded"); + assert!(error + .to_string() + .contains("expected current DNS version to be 1, found 2")); + + // At this point, the database state should reflect the first update but + // not the second. + let config = datastore + .dns_config_read(&opctx, DnsGroup::Internal) + .await + .expect("failed to read config"); + let gen2 = nexus_db_model::Generation(gen1.next()); + assert_eq!(u64::from(*gen2), config.generation); + assert_eq!(1, config.zones.len()); + let records = &config.zones[0].records; + assert!(records.contains_key("nelson")); + assert!(!records.contains_key("wendell")); + assert!(records.contains_key("krabappel")); + + // We can apply the second update, as long as we say it's conditional on + // the current generation. + datastore + .dns_update_from_version(&opctx, update2, gen2) + .await + .expect("failed to update from first generation"); + let config = datastore + .dns_config_read(&opctx, DnsGroup::Internal) + .await + .expect("failed to read config"); + assert_eq!(u64::from(gen2.next()), config.generation); + assert_eq!(1, config.zones.len()); + let records = &config.zones[0].records; + assert!(records.contains_key("nelson")); + assert!(!records.contains_key("wendell")); + assert!(!records.contains_key("krabappel")); + assert!(records.contains_key("hoover")); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } } diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index f9e0be81c1..5f05aa1760 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -105,6 +105,7 @@ pub use instance::InstanceAndActiveVmm; pub use inventory::DataStoreInventoryTest; pub use rack::RackInit; pub use silo::Discoverability; +use std::num::NonZeroU32; pub use switch_port::SwitchPortSettingsCombinedResult; pub use virtual_provisioning_collection::StorageType; pub use volume::read_only_resources_associated_with_volume; @@ -121,6 +122,14 @@ pub const SERVICE_IP_POOL_NAME: &str = "oxide-service-pool"; /// The name of the built-in Project and VPC for Oxide services. pub const SERVICES_DB_NAME: &str = "oxide-services"; +/// "limit" to be used in SQL queries that paginate through large result sets +/// +/// This value is chosen to be small enough to avoid any queries being too +/// expensive. +// unsafe: `new_unchecked` is only unsound if the argument is 0. +pub const SQL_BATCH_SIZE: NonZeroU32 = + unsafe { NonZeroU32::new_unchecked(1000) }; + // Represents a query that is ready to be executed. // // This helper trait lets the statement either be executed or explained. diff --git a/nexus/db-queries/src/db/datastore/silo.rs b/nexus/db-queries/src/db/datastore/silo.rs index 6cfda3c093..df10b1c072 100644 --- a/nexus/db-queries/src/db/datastore/silo.rs +++ b/nexus/db-queries/src/db/datastore/silo.rs @@ -282,7 +282,8 @@ impl DataStore { .await?; } - self.dns_update(nexus_opctx, &conn, dns_update).await?; + self.dns_update_incremental(nexus_opctx, &conn, dns_update) + .await?; self.silo_quotas_create( &conn, @@ -420,7 +421,7 @@ impl DataStore { ) .await?; - self.dns_update(dns_opctx, &conn, dns_update).await?; + self.dns_update_incremental(dns_opctx, &conn, dns_update).await?; info!(opctx.log, "deleted silo {}", id); diff --git a/nexus/db-queries/src/db/datastore/sled.rs b/nexus/db-queries/src/db/datastore/sled.rs index 7b94d64418..eb50061272 100644 --- a/nexus/db-queries/src/db/datastore/sled.rs +++ b/nexus/db-queries/src/db/datastore/sled.rs @@ -5,6 +5,7 @@ //! [`DataStore`] methods on [`Sled`]s. use super::DataStore; +use super::SQL_BATCH_SIZE; use crate::authz; use crate::context::OpContext; use crate::db; @@ -14,11 +15,13 @@ use crate::db::model::Sled; use crate::db::model::SledResource; use crate::db::model::SledUpdate; use crate::db::pagination::paginated; +use crate::db::pagination::Paginator; use crate::db::update_and_check::UpdateAndCheck; use crate::transaction_retry::OptionalError; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; +use nexus_types::identity::Asset; use omicron_common::api::external; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; @@ -77,6 +80,29 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } + /// List all sleds, making as many queries as needed to get them all + /// + /// This should generally not be used in API handlers or other + /// latency-sensitive contexts, but it can make sense in saga actions or + /// background tasks. + pub async fn sled_list_all_batched( + &self, + opctx: &OpContext, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + opctx.check_complex_operations_allowed()?; + + let mut all_sleds = Vec::new(); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + let batch = self.sled_list(opctx, &p.current_pagparams()).await?; + paginator = + p.found_batch(&batch, &|s: &nexus_db_model::Sled| s.id()); + all_sleds.extend(batch); + } + Ok(all_sleds) + } + pub async fn sled_reservation_create( &self, opctx: &OpContext, @@ -465,4 +491,54 @@ mod test { limit, } } + + /// Tests listing large numbers of sleds via the batched interface + #[tokio::test] + async fn sled_list_batch() { + let logctx = + dev::test_setup_log("sled_reservation_create_non_provisionable"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let size = usize::try_from(2 * SQL_BATCH_SIZE.get()).unwrap(); + let mut new_sleds = Vec::with_capacity(size); + new_sleds.resize_with(size, test_new_sled_update); + let mut expected_ids: Vec<_> = + new_sleds.iter().map(|s| s.id()).collect(); + expected_ids.sort(); + + // This is essentially the same as `sled_upsert()`. But since we know + // none of these exist already, we can just insert them. And that means + // we can do them all in one SQL statement. This is considerably + // faster. + let values_to_insert: Vec<_> = + new_sleds.into_iter().map(|s| s.into_insertable()).collect(); + let ninserted = { + use db::schema::sled::dsl; + diesel::insert_into(dsl::sled) + .values(values_to_insert) + .execute_async( + &*datastore + .pool_connection_for_tests() + .await + .expect("failed to get connection"), + ) + .await + .expect("failed to insert sled") + }; + assert_eq!(ninserted, size); + + let sleds = datastore + .sled_list_all_batched(&opctx) + .await + .expect("failed to list all sleds"); + // We don't need to sort these ids because the sleds are enumerated in + // id order. + let found_ids: Vec<_> = sleds.into_iter().map(|s| s.id()).collect(); + assert_eq!(expected_ids, found_ids); + assert_eq!(found_ids.len(), size); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } } diff --git a/nexus/deployment/src/blueprint_builder.rs b/nexus/deployment/src/blueprint_builder.rs index 1bf46d34b2..86a9b8da6e 100644 --- a/nexus/deployment/src/blueprint_builder.rs +++ b/nexus/deployment/src/blueprint_builder.rs @@ -100,6 +100,7 @@ pub enum EnsureMultiple { pub struct BlueprintBuilder<'a> { /// previous blueprint, on which this one will be based parent_blueprint: &'a Blueprint, + internal_dns_version: Generation, // These fields are used to allocate resources from sleds. policy: &'a Policy, @@ -128,6 +129,7 @@ impl<'a> BlueprintBuilder<'a> { /// collection (representing no changes from the collection state) pub fn build_initial_from_collection( collection: &'a Collection, + internal_dns_version: Generation, policy: &'a Policy, creator: &str, ) -> Result { @@ -174,6 +176,7 @@ impl<'a> BlueprintBuilder<'a> { omicron_zones, zones_in_service, parent_blueprint_id: None, + internal_dns_version, time_created: now_db_precision(), creator: creator.to_owned(), comment: format!("from collection {}", collection.id), @@ -184,6 +187,7 @@ impl<'a> BlueprintBuilder<'a> { /// starting with no changes from that state pub fn new_based_on( parent_blueprint: &'a Blueprint, + internal_dns_version: Generation, policy: &'a Policy, creator: &str, ) -> anyhow::Result> { @@ -284,6 +288,7 @@ impl<'a> BlueprintBuilder<'a> { Ok(BlueprintBuilder { parent_blueprint, + internal_dns_version, policy, sled_ip_allocators: BTreeMap::new(), zones: BlueprintZones::new(parent_blueprint), @@ -307,6 +312,7 @@ impl<'a> BlueprintBuilder<'a> { omicron_zones, zones_in_service: self.zones_in_service, parent_blueprint_id: Some(self.parent_blueprint.id), + internal_dns_version: self.internal_dns_version, time_created: now_db_precision(), creator: self.creator, comment: self.comments.join(", "), @@ -929,6 +935,7 @@ pub mod test { let blueprint_initial = BlueprintBuilder::build_initial_from_collection( &collection, + Generation::new(), &policy, "the_test", ) @@ -939,7 +946,7 @@ pub mod test { // provide that ourselves. For our purposes though we don't care. let zones_in_service = blueprint_initial.zones_in_service.clone(); let diff = blueprint_initial - .diff_from_collection(&collection, &zones_in_service); + .diff_sleds_from_collection(&collection, &zones_in_service); println!( "collection -> initial blueprint (expected no changes):\n{}", diff @@ -951,13 +958,14 @@ pub mod test { // Test a no-op blueprint. let builder = BlueprintBuilder::new_based_on( &blueprint_initial, + Generation::new(), &policy, "test_basic", ) .expect("failed to create builder"); let blueprint = builder.build(); verify_blueprint(&blueprint); - let diff = blueprint_initial.diff(&blueprint); + let diff = blueprint_initial.diff_sleds(&blueprint); println!( "initial blueprint -> next blueprint (expected no changes):\n{}", diff @@ -972,15 +980,20 @@ pub mod test { let (collection, mut policy) = example(); let blueprint1 = BlueprintBuilder::build_initial_from_collection( &collection, + Generation::new(), &policy, "the_test", ) .expect("failed to create initial blueprint"); verify_blueprint(&blueprint1); - let mut builder = - BlueprintBuilder::new_based_on(&blueprint1, &policy, "test_basic") - .expect("failed to create builder"); + let mut builder = BlueprintBuilder::new_based_on( + &blueprint1, + Generation::new(), + &policy, + "test_basic", + ) + .expect("failed to create builder"); // The initial blueprint should have internal NTP zones on all the // existing sleds, plus Crucible zones on all pools. So if we ensure @@ -996,7 +1009,7 @@ pub mod test { let blueprint2 = builder.build(); verify_blueprint(&blueprint2); - let diff = blueprint1.diff(&blueprint2); + let diff = blueprint1.diff_sleds(&blueprint2); println!( "initial blueprint -> next blueprint (expected no changes):\n{}", diff @@ -1008,9 +1021,13 @@ pub mod test { // The next step is adding these zones to a new sled. let new_sled_id = Uuid::new_v4(); let _ = policy_add_sled(&mut policy, new_sled_id); - let mut builder = - BlueprintBuilder::new_based_on(&blueprint2, &policy, "test_basic") - .expect("failed to create builder"); + let mut builder = BlueprintBuilder::new_based_on( + &blueprint2, + Generation::new(), + &policy, + "test_basic", + ) + .expect("failed to create builder"); builder.sled_ensure_zone_ntp(new_sled_id).unwrap(); let new_sled_resources = policy.sleds.get(&new_sled_id).unwrap(); for pool_name in &new_sled_resources.zpools { @@ -1021,7 +1038,7 @@ pub mod test { let blueprint3 = builder.build(); verify_blueprint(&blueprint3); - let diff = blueprint2.diff(&blueprint3); + let diff = blueprint2.diff_sleds(&blueprint3); println!("expecting new NTP and Crucible zones:\n{}", diff); // No sleds were changed or removed. @@ -1081,6 +1098,9 @@ pub mod test { fn test_add_nexus_with_no_existing_nexus_zones() { let (mut collection, policy) = example(); + // We don't care about the internal DNS version here. + let internal_dns_version = Generation::new(); + // Adding a new Nexus zone currently requires copying settings from an // existing Nexus zone. If we remove all Nexus zones from the // collection, create a blueprint, then try to add a Nexus zone, it @@ -1093,14 +1113,19 @@ pub mod test { let parent = BlueprintBuilder::build_initial_from_collection( &collection, + internal_dns_version, &policy, "test", ) .expect("failed to create initial blueprint"); - let mut builder = - BlueprintBuilder::new_based_on(&parent, &policy, "test") - .expect("failed to create builder"); + let mut builder = BlueprintBuilder::new_based_on( + &parent, + internal_dns_version, + &policy, + "test", + ) + .expect("failed to create builder"); let err = builder .sled_ensure_zone_multiple_nexus( @@ -1124,6 +1149,9 @@ pub mod test { fn test_add_nexus_error_cases() { let (mut collection, policy) = example(); + // We don't care about the internal DNS version here. + let internal_dns_version = Generation::new(); + // Remove the Nexus zone from one of the sleds so that // `sled_ensure_zone_nexus` can attempt to add a Nexus zone to // `sled_id`. @@ -1144,6 +1172,7 @@ pub mod test { let parent = BlueprintBuilder::build_initial_from_collection( &collection, + Generation::new(), &policy, "test", ) @@ -1152,9 +1181,13 @@ pub mod test { { // Attempting to add Nexus to the sled we removed it from (with no // other changes to the environment) should succeed. - let mut builder = - BlueprintBuilder::new_based_on(&parent, &policy, "test") - .expect("failed to create builder"); + let mut builder = BlueprintBuilder::new_based_on( + &parent, + internal_dns_version, + &policy, + "test", + ) + .expect("failed to create builder"); let added = builder .sled_ensure_zone_multiple_nexus(sled_id, 1) .expect("failed to ensure nexus zone"); @@ -1166,9 +1199,13 @@ pub mod test { // Attempting to add multiple Nexus zones to the sled we removed it // from (with no other changes to the environment) should also // succeed. - let mut builder = - BlueprintBuilder::new_based_on(&parent, &policy, "test") - .expect("failed to create builder"); + let mut builder = BlueprintBuilder::new_based_on( + &parent, + internal_dns_version, + &policy, + "test", + ) + .expect("failed to create builder"); let added = builder .sled_ensure_zone_multiple_nexus(sled_id, 3) .expect("failed to ensure nexus zone"); @@ -1194,9 +1231,13 @@ pub mod test { assert!(!used_ip_ranges.is_empty()); policy.service_ip_pool_ranges = used_ip_ranges; - let mut builder = - BlueprintBuilder::new_based_on(&parent, &policy, "test") - .expect("failed to create builder"); + let mut builder = BlueprintBuilder::new_based_on( + &parent, + internal_dns_version, + &policy, + "test", + ) + .expect("failed to create builder"); let err = builder .sled_ensure_zone_multiple_nexus(sled_id, 1) .unwrap_err(); @@ -1247,12 +1288,18 @@ pub mod test { let parent = BlueprintBuilder::build_initial_from_collection( &collection, + Generation::new(), &policy, "test", ) .unwrap(); - match BlueprintBuilder::new_based_on(&parent, &policy, "test") { + match BlueprintBuilder::new_based_on( + &parent, + Generation::new(), + &policy, + "test", + ) { Ok(_) => panic!("unexpected success"), Err(err) => assert!( err.to_string().contains("duplicate external IP"), @@ -1290,12 +1337,18 @@ pub mod test { let parent = BlueprintBuilder::build_initial_from_collection( &collection, + Generation::new(), &policy, "test", ) .unwrap(); - match BlueprintBuilder::new_based_on(&parent, &policy, "test") { + match BlueprintBuilder::new_based_on( + &parent, + Generation::new(), + &policy, + "test", + ) { Ok(_) => panic!("unexpected success"), Err(err) => assert!( err.to_string().contains("duplicate Nexus NIC IP"), @@ -1333,12 +1386,18 @@ pub mod test { let parent = BlueprintBuilder::build_initial_from_collection( &collection, + Generation::new(), &policy, "test", ) .unwrap(); - match BlueprintBuilder::new_based_on(&parent, &policy, "test") { + match BlueprintBuilder::new_based_on( + &parent, + Generation::new(), + &policy, + "test", + ) { Ok(_) => panic!("unexpected success"), Err(err) => assert!( err.to_string().contains("duplicate service vNIC MAC"), diff --git a/nexus/deployment/src/planner.rs b/nexus/deployment/src/planner.rs index cbdcfd80c0..7973157068 100644 --- a/nexus/deployment/src/planner.rs +++ b/nexus/deployment/src/planner.rs @@ -14,8 +14,8 @@ use nexus_types::deployment::Blueprint; use nexus_types::deployment::Policy; use nexus_types::external_api::views::SledProvisionState; use nexus_types::inventory::Collection; -use slog::warn; -use slog::{info, Logger}; +use omicron_common::api::external::Generation; +use slog::{info, warn, Logger}; use std::collections::BTreeMap; use std::collections::BTreeSet; use uuid::Uuid; @@ -40,14 +40,19 @@ impl<'a> Planner<'a> { pub fn new_based_on( log: Logger, parent_blueprint: &'a Blueprint, + internal_dns_version: Generation, policy: &'a Policy, creator: &str, // NOTE: Right now, we just assume that this is the latest inventory // collection. See the comment on the corresponding field in `Planner`. inventory: &'a Collection, ) -> anyhow::Result> { - let blueprint = - BlueprintBuilder::new_based_on(parent_blueprint, policy, creator)?; + let blueprint = BlueprintBuilder::new_based_on( + parent_blueprint, + internal_dns_version, + policy, + creator, + )?; Ok(Planner { log, policy, blueprint, inventory }) } @@ -319,6 +324,9 @@ mod test { fn test_basic_add_sled() { let logctx = test_setup_log("planner_basic_add_sled"); + // For our purposes, we don't care about the internal DNS generation. + let internal_dns_version = Generation::new(); + // Use our example inventory collection. let (mut collection, mut policy) = example(); @@ -326,6 +334,7 @@ mod test { // because there's a separate test for that. let blueprint1 = BlueprintBuilder::build_initial_from_collection( &collection, + internal_dns_version, &policy, "the_test", ) @@ -338,6 +347,7 @@ mod test { let blueprint2 = Planner::new_based_on( logctx.log.clone(), &blueprint1, + internal_dns_version, &policy, "no-op?", &collection, @@ -346,7 +356,7 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint1.diff(&blueprint2); + let diff = blueprint1.diff_sleds(&blueprint2); println!("1 -> 2 (expected no changes):\n{}", diff); assert_eq!(diff.sleds_added().count(), 0); assert_eq!(diff.sleds_removed().count(), 0); @@ -362,6 +372,7 @@ mod test { let blueprint3 = Planner::new_based_on( logctx.log.clone(), &blueprint2, + internal_dns_version, &policy, "test: add NTP?", &collection, @@ -370,7 +381,7 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint2.diff(&blueprint3); + let diff = blueprint2.diff_sleds(&blueprint3); println!("2 -> 3 (expect new NTP zone on new sled):\n{}", diff); let sleds = diff.sleds_added().collect::>(); let (sled_id, sled_zones) = sleds[0]; @@ -394,6 +405,7 @@ mod test { let blueprint4 = Planner::new_based_on( logctx.log.clone(), &blueprint3, + internal_dns_version, &policy, "test: add nothing more", &collection, @@ -401,7 +413,7 @@ mod test { .expect("failed to create planner") .plan() .expect("failed to plan"); - let diff = blueprint3.diff(&blueprint4); + let diff = blueprint3.diff_sleds(&blueprint4); println!("3 -> 4 (expected no changes):\n{}", diff); assert_eq!(diff.sleds_added().count(), 0); assert_eq!(diff.sleds_removed().count(), 0); @@ -430,6 +442,7 @@ mod test { let blueprint5 = Planner::new_based_on( logctx.log.clone(), &blueprint3, + internal_dns_version, &policy, "test: add Crucible zones?", &collection, @@ -438,7 +451,7 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint3.diff(&blueprint5); + let diff = blueprint3.diff_sleds(&blueprint5); println!("3 -> 5 (expect Crucible zones):\n{}", diff); assert_eq!(diff.sleds_added().count(), 0); assert_eq!(diff.sleds_removed().count(), 0); @@ -465,6 +478,7 @@ mod test { let blueprint6 = Planner::new_based_on( logctx.log.clone(), &blueprint5, + internal_dns_version, &policy, "test: no-op?", &collection, @@ -473,7 +487,7 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint5.diff(&blueprint6); + let diff = blueprint5.diff_sleds(&blueprint6); println!("5 -> 6 (expect no changes):\n{}", diff); assert_eq!(diff.sleds_added().count(), 0); assert_eq!(diff.sleds_removed().count(), 0); @@ -489,6 +503,9 @@ mod test { fn test_add_multiple_nexus_to_one_sled() { let logctx = test_setup_log("planner_add_multiple_nexus_to_one_sled"); + // For our purposes, we don't care about the internal DNS generation. + let internal_dns_version = Generation::new(); + // Use our example inventory collection as a starting point, but strip // it down to just one sled. let (sled_id, collection, mut policy) = { @@ -510,6 +527,7 @@ mod test { // Build the initial blueprint. let blueprint1 = BlueprintBuilder::build_initial_from_collection( &collection, + internal_dns_version, &policy, "the_test", ) @@ -536,6 +554,7 @@ mod test { let blueprint2 = Planner::new_based_on( logctx.log.clone(), &blueprint1, + internal_dns_version, &policy, "add more Nexus", &collection, @@ -544,7 +563,7 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint1.diff(&blueprint2); + let diff = blueprint1.diff_sleds(&blueprint2); println!("1 -> 2 (added additional Nexus zones):\n{}", diff); assert_eq!(diff.sleds_added().count(), 0); assert_eq!(diff.sleds_removed().count(), 0); @@ -579,6 +598,7 @@ mod test { // Build the initial blueprint. let blueprint1 = BlueprintBuilder::build_initial_from_collection( &collection, + Generation::new(), &policy, "the_test", ) @@ -602,6 +622,7 @@ mod test { let blueprint2 = Planner::new_based_on( logctx.log.clone(), &blueprint1, + Generation::new(), &policy, "add more Nexus", &collection, @@ -610,7 +631,7 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint1.diff(&blueprint2); + let diff = blueprint1.diff_sleds(&blueprint2); println!("1 -> 2 (added additional Nexus zones):\n{}", diff); assert_eq!(diff.sleds_added().count(), 0); assert_eq!(diff.sleds_removed().count(), 0); @@ -658,6 +679,7 @@ mod test { // Build the initial blueprint. let blueprint1 = BlueprintBuilder::build_initial_from_collection( &collection, + Generation::new(), &policy, "the_test", ) @@ -689,6 +711,7 @@ mod test { let blueprint2 = Planner::new_based_on( logctx.log.clone(), &blueprint1, + Generation::new(), &policy, "add more Nexus", &collection, @@ -697,7 +720,7 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint1.diff(&blueprint2); + let diff = blueprint1.diff_sleds(&blueprint2); println!("1 -> 2 (added additional Nexus zones):\n{}", diff); assert_eq!(diff.sleds_added().count(), 0); assert_eq!(diff.sleds_removed().count(), 0); diff --git a/nexus/src/app/background/blueprint_execution.rs b/nexus/src/app/background/blueprint_execution.rs index 84d4cef212..32797facbf 100644 --- a/nexus/src/app/background/blueprint_execution.rs +++ b/nexus/src/app/background/blueprint_execution.rs @@ -19,6 +19,7 @@ use tokio::sync::watch; pub struct BlueprintExecutor { datastore: Arc, rx_blueprint: watch::Receiver>>, + nexus_label: String, } impl BlueprintExecutor { @@ -27,8 +28,9 @@ impl BlueprintExecutor { rx_blueprint: watch::Receiver< Option>, >, + nexus_label: String, ) -> BlueprintExecutor { - BlueprintExecutor { datastore, rx_blueprint } + BlueprintExecutor { datastore, rx_blueprint, nexus_label } } } @@ -65,6 +67,7 @@ impl BackgroundTask for BlueprintExecutor { opctx, &self.datastore, blueprint, + &self.nexus_label, ) .await; @@ -119,6 +122,7 @@ mod test { fn create_blueprint( omicron_zones: BTreeMap, + internal_dns_version: Generation, ) -> (BlueprintTarget, Blueprint) { let id = Uuid::new_v4(); ( @@ -132,6 +136,7 @@ mod test { omicron_zones, zones_in_service: BTreeSet::new(), parent_blueprint_id: None, + internal_dns_version, time_created: chrono::Utc::now(), creator: "test".to_string(), comment: "test blueprint".to_string(), @@ -185,7 +190,11 @@ mod test { } let (blueprint_tx, blueprint_rx) = watch::channel(None); - let mut task = BlueprintExecutor::new(datastore.clone(), blueprint_rx); + let mut task = BlueprintExecutor::new( + datastore.clone(), + blueprint_rx, + String::from("test-suite"), + ); // Now we're ready. // @@ -196,7 +205,8 @@ mod test { // With a target blueprint having no zones, the task should trivially // complete and report a successful (empty) summary. - let blueprint = Arc::new(create_blueprint(BTreeMap::new())); + let generation = Generation::new(); + let blueprint = Arc::new(create_blueprint(BTreeMap::new(), generation)); blueprint_tx.send(Some(blueprint)).unwrap(); let value = task.activate(&opctx).await; println!("activating with no zones: {:?}", value); @@ -204,32 +214,35 @@ mod test { // Create a non-empty blueprint describing two servers and verify that // the task correctly winds up making requests to both of them and - // reporting success. We reuse the same `OmicronZonesConfig` in - // constructing the blueprint because the details don't matter for this - // test. - let zones = OmicronZonesConfig { - generation: Generation::new(), - zones: vec![OmicronZoneConfig { - id: Uuid::new_v4(), - underlay_address: "::1".parse().unwrap(), - zone_type: OmicronZoneType::InternalDns { - dataset: OmicronZoneDataset { - pool_name: format!("oxp_{}", Uuid::new_v4()) - .parse() - .unwrap(), + // reporting success. + fn make_zones() -> OmicronZonesConfig { + OmicronZonesConfig { + generation: Generation::new(), + zones: vec![OmicronZoneConfig { + id: Uuid::new_v4(), + underlay_address: "::1".parse().unwrap(), + zone_type: OmicronZoneType::InternalDns { + dataset: OmicronZoneDataset { + pool_name: format!("oxp_{}", Uuid::new_v4()) + .parse() + .unwrap(), + }, + dns_address: "oh-hello-internal-dns".into(), + gz_address: "::1".parse().unwrap(), + gz_address_index: 0, + http_address: "some-ipv6-address".into(), }, - dns_address: "oh-hello-internal-dns".into(), - gz_address: "::1".parse().unwrap(), - gz_address_index: 0, - http_address: "some-ipv6-address".into(), - }, - }], - }; - - let mut blueprint = create_blueprint(BTreeMap::from([ - (sled_id1, zones.clone()), - (sled_id2, zones.clone()), - ])); + }], + } + } + let generation = generation.next(); + let mut blueprint = create_blueprint( + BTreeMap::from([ + (sled_id1, make_zones()), + (sled_id2, make_zones()), + ]), + generation, + ); blueprint_tx.send(Some(Arc::new(blueprint.clone()))).unwrap(); @@ -255,6 +268,8 @@ mod test { // Now, disable the target and make sure that we _don't_ invoke the sled // agent. It's enough to just not set expectations. + blueprint.1.internal_dns_version = + blueprint.1.internal_dns_version.next(); blueprint.0.enabled = false; blueprint_tx.send(Some(Arc::new(blueprint.clone()))).unwrap(); let value = task.activate(&opctx).await; diff --git a/nexus/src/app/background/blueprint_load.rs b/nexus/src/app/background/blueprint_load.rs index c34d2ab103..8886df81cd 100644 --- a/nexus/src/app/background/blueprint_load.rs +++ b/nexus/src/app/background/blueprint_load.rs @@ -184,6 +184,7 @@ mod test { use nexus_inventory::now_db_precision; use nexus_test_utils_macros::nexus_test; use nexus_types::deployment::{Blueprint, BlueprintTarget}; + use omicron_common::api::external::Generation; use serde::Deserialize; use std::collections::{BTreeMap, BTreeSet}; use uuid::Uuid; @@ -206,6 +207,7 @@ mod test { omicron_zones: BTreeMap::new(), zones_in_service: BTreeSet::new(), parent_blueprint_id, + internal_dns_version: Generation::new(), time_created: now_db_precision(), creator: "test".to_string(), comment: "test blueprint".to_string(), diff --git a/nexus/src/app/background/init.rs b/nexus/src/app/background/init.rs index 27e58a298c..846051a068 100644 --- a/nexus/src/app/background/init.rs +++ b/nexus/src/app/background/init.rs @@ -228,6 +228,7 @@ impl BackgroundTasks { let blueprint_executor = blueprint_execution::BlueprintExecutor::new( datastore.clone(), rx_blueprint.clone(), + nexus_id.to_string(), ); let task_blueprint_executor = driver.register( String::from("blueprint_executor"), @@ -628,7 +629,7 @@ pub mod test { ) { let conn = datastore.pool_connection_for_tests().await.unwrap(); info!(opctx.log, "writing DNS update..."); - datastore.dns_update(opctx, &conn, update).await.unwrap(); + datastore.dns_update_incremental(opctx, &conn, update).await.unwrap(); } pub(crate) async fn write_test_dns_generation( diff --git a/nexus/src/app/deployment.rs b/nexus/src/app/deployment.rs index b8cb6deabf..61ce803d13 100644 --- a/nexus/src/app/deployment.rs +++ b/nexus/src/app/deployment.rs @@ -4,8 +4,10 @@ //! Configuration of the deployment system +use nexus_db_model::DnsGroup; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::datastore::SQL_BATCH_SIZE; use nexus_db_queries::db::pagination::Paginator; use nexus_deployment::blueprint_builder::BlueprintBuilder; use nexus_deployment::planner::Planner; @@ -26,6 +28,7 @@ use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; +use omicron_common::api::external::Generation; use omicron_common::api::external::InternalContext; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; @@ -33,19 +36,15 @@ use omicron_common::api::external::LookupType; use slog_error_chain::InlineErrorChain; use std::collections::BTreeMap; use std::collections::BTreeSet; -use std::num::NonZeroU32; use std::str::FromStr; use uuid::Uuid; -/// "limit" used in SQL queries that paginate through all sleds, zpools, etc. -// unsafe: `new_unchecked` is only unsound if the argument is 0. -const SQL_BATCH_SIZE: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1000) }; - /// Common structure for collecting information that the planner needs struct PlanningContext { policy: Policy, creator: String, inventory: Option, + internal_dns_version: Generation, } impl super::Nexus { @@ -118,18 +117,7 @@ impl super::Nexus { let creator = self.id.to_string(); let datastore = self.datastore(); - let sled_rows = { - let mut all_sleds = Vec::new(); - let mut paginator = Paginator::new(SQL_BATCH_SIZE); - while let Some(p) = paginator.next() { - let batch = - datastore.sled_list(opctx, &p.current_pagparams()).await?; - paginator = - p.found_batch(&batch, &|s: &nexus_db_model::Sled| s.id()); - all_sleds.extend(batch); - } - all_sleds - }; + let sled_rows = datastore.sled_list_all_batched(opctx).await?; let mut zpools_by_sled_id = { let mut zpools = BTreeMap::new(); @@ -222,6 +210,16 @@ impl super::Nexus { "fetching latest inventory collection for blueprint planner", )?; + // Fetch the current internal DNS version. This could be made part of + // inventory, but it's enough of a one-off that there's no particular + // advantage to doing that work now. + let dns_version = datastore + .dns_group_latest_version(opctx, DnsGroup::Internal) + .await + .internal_context( + "fetching internal DNS version for blueprint planning", + )?; + Ok(PlanningContext { creator, policy: Policy { @@ -230,6 +228,7 @@ impl super::Nexus { target_nexus_zone_count: NEXUS_REDUNDANCY, }, inventory, + internal_dns_version: *dns_version.version, }) } @@ -253,6 +252,7 @@ impl super::Nexus { let planning_context = self.blueprint_planning_context(opctx).await?; let blueprint = BlueprintBuilder::build_initial_from_collection( &collection, + planning_context.internal_dns_version, &planning_context.policy, &planning_context.creator, ) @@ -287,6 +287,7 @@ impl super::Nexus { let planner = Planner::new_based_on( opctx.log.clone(), &parent_blueprint, + planning_context.internal_dns_version, &planning_context.policy, &planning_context.creator, &inventory, diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 2a37721bb0..271025f7a7 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -781,25 +781,6 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { ); } - pub async fn scrimlet_dns_setup(&mut self) { - let sled_agent = self - .sled_agent - .as_ref() - .expect("Cannot set up scrimlet DNS without sled agent"); - - let sa = match sled_agent.http_server.local_addr() { - SocketAddr::V6(sa) => sa, - SocketAddr::V4(_) => panic!("expected SocketAddrV6 for sled agent"), - }; - - for loc in [SwitchLocation::Switch0, SwitchLocation::Switch1] { - self.rack_init_builder - .internal_dns_config - .host_scrimlet(loc, sa) - .expect("add switch0 scrimlet dns entry"); - } - } - // Set up an external DNS server. pub async fn start_external_dns(&mut self) { let log = self.logctx.log.new(o!("component" => "external_dns_server")); @@ -1063,10 +1044,6 @@ async fn setup_with_config_impl( "start_crucible_pantry", Box::new(|builder| builder.start_crucible_pantry().boxed()), ), - ( - "scrimlet_dns_setup", - Box::new(|builder| builder.scrimlet_dns_setup().boxed()), - ), ( "populate_internal_dns", Box::new(|builder| builder.populate_internal_dns().boxed()), diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 490097d46d..2e683878be 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -135,6 +135,10 @@ pub struct Blueprint { /// which blueprint this blueprint is based on pub parent_blueprint_id: Option, + /// internal DNS version when this blueprint was created + // See blueprint generation for more on this. + pub internal_dns_version: Generation, + /// when this blueprint was generated (for debugging) pub time_created: chrono::DateTime, /// identity of the component that generated the blueprint (for debugging) @@ -161,8 +165,11 @@ impl Blueprint { self.omicron_zones.keys().copied() } - /// Summarize the difference between two blueprints - pub fn diff<'a>(&'a self, other: &'a Blueprint) -> OmicronZonesDiff<'a> { + /// Summarize the difference between sleds and zones between two blueprints + pub fn diff_sleds<'a>( + &'a self, + other: &'a Blueprint, + ) -> OmicronZonesDiff<'a> { OmicronZonesDiff { before_label: format!("blueprint {}", self.id), before_zones: self.omicron_zones.clone(), @@ -173,14 +180,15 @@ impl Blueprint { } } - /// Summarize the difference between a collection and a blueprint + /// Summarize the differences in sleds and zones between a collection and a + /// blueprint /// /// This gives an idea about what would change about a running system if one /// were to execute the blueprint. /// /// Note that collections do not currently include information about what /// zones are in-service, so the caller must provide that information. - pub fn diff_from_collection<'a>( + pub fn diff_sleds_from_collection<'a>( &'a self, collection: &'a Collection, before_zones_in_service: &'a BTreeSet, @@ -212,6 +220,8 @@ pub struct BlueprintMetadata { /// which blueprint this blueprint is based on pub parent_blueprint_id: Option, + /// internal DNS version when this blueprint was created + pub internal_dns_version: Generation, /// when this blueprint was generated (for debugging) pub time_created: chrono::DateTime, diff --git a/nexus/types/src/inventory.rs b/nexus/types/src/inventory.rs index 50e8b380b3..71e8e64d97 100644 --- a/nexus/types/src/inventory.rs +++ b/nexus/types/src/inventory.rs @@ -134,6 +134,14 @@ impl Collection { ) -> impl Iterator { self.omicron_zones.values().flat_map(|z| z.zones.zones.iter()) } + + /// Iterate over the sled ids of sleds identified as Scrimlets + pub fn scrimlets(&self) -> impl Iterator + '_ { + self.sled_agents + .iter() + .filter(|(_, inventory)| inventory.sled_role == SledRole::Scrimlet) + .map(|(sled_id, _)| *sled_id) + } } /// A unique baseboard id found during a collection diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index b8b45aa08e..53a53fb219 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -2124,6 +2124,14 @@ "type": "string", "format": "uuid" }, + "internal_dns_version": { + "description": "internal DNS version when this blueprint was created", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, "omicron_zones": { "description": "mapping: sled id -> zones deployed on each sled A sled is considered part of the control plane cluster iff it has an entry in this map.", "type": "object", @@ -2156,6 +2164,7 @@ "comment", "creator", "id", + "internal_dns_version", "omicron_zones", "time_created", "zones_in_service" @@ -2178,6 +2187,14 @@ "type": "string", "format": "uuid" }, + "internal_dns_version": { + "description": "internal DNS version when this blueprint was created", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, "parent_blueprint_id": { "nullable": true, "description": "which blueprint this blueprint is based on", @@ -2194,6 +2211,7 @@ "comment", "creator", "id", + "internal_dns_version", "time_created" ] }, diff --git a/schema/crdb/36.0.0/up1.sql b/schema/crdb/36.0.0/up1.sql new file mode 100644 index 0000000000..38e3843bfc --- /dev/null +++ b/schema/crdb/36.0.0/up1.sql @@ -0,0 +1,8 @@ +-- Add the "internal_dns_version" column to the "blueprint" table. +-- This query will end up setting the internal DNS version for any existing +-- blueprints to 1. This is always safe because it's the smallest possible +-- value and if a value is too small, the end result is simply needing to +-- regenerate the blueprint in order to be able to execute it. (On the other +-- hand, using a value that's too large could cause corruption.) +ALTER TABLE omicron.public.blueprint + ADD COLUMN IF NOT EXISTS internal_dns_version INT8 NOT NULL DEFAULT 1; diff --git a/schema/crdb/36.0.0/up2.sql b/schema/crdb/36.0.0/up2.sql new file mode 100644 index 0000000000..7c0082fbb0 --- /dev/null +++ b/schema/crdb/36.0.0/up2.sql @@ -0,0 +1,2 @@ +ALTER TABLE omicron.public.blueprint + ALTER COLUMN internal_dns_version DROP DEFAULT; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 6c82b63e6e..87a22d1adc 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -3096,7 +3096,10 @@ CREATE TABLE IF NOT EXISTS omicron.public.blueprint ( -- These fields are for debugging only. time_created TIMESTAMPTZ NOT NULL, creator TEXT NOT NULL, - comment TEXT NOT NULL + comment TEXT NOT NULL, + + -- identifies the latest internal DNS version when blueprint planning began + internal_dns_version INT8 NOT NULL ); -- table describing both the current and historical target blueprints of the @@ -3515,7 +3518,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '35.0.0', NULL) + ( TRUE, NOW(), NOW(), '36.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/sled-agent/src/rack_setup/plan/service.rs b/sled-agent/src/rack_setup/plan/service.rs index 0b633c2057..77fd8a39de 100644 --- a/sled-agent/src/rack_setup/plan/service.rs +++ b/sled-agent/src/rack_setup/plan/service.rs @@ -19,7 +19,6 @@ use omicron_common::address::{ RSS_RESERVED_ADDRESSES, SLED_PREFIX, }; use omicron_common::api::external::{MacAddr, Vni}; -use omicron_common::api::internal::shared::SwitchLocation; use omicron_common::api::internal::shared::{ NetworkInterface, NetworkInterfaceKind, SourceNatConfig, }; @@ -295,36 +294,17 @@ impl Plan { "No scrimlets observed".to_string(), )); } - for (i, sled) in scrimlets.iter().enumerate() { + for sled in scrimlets.iter() { let address = get_switch_zone_address(sled.subnet); - let zone = - dns_builder.host_dendrite(sled.sled_id, address).unwrap(); dns_builder - .service_backend_zone( - ServiceName::Dendrite, - &zone, + .host_zone_switch( + sled.sled_id, + address, DENDRITE_PORT, - ) - .unwrap(); - dns_builder - .service_backend_zone( - ServiceName::ManagementGatewayService, - &zone, MGS_PORT, + MGD_PORT, ) .unwrap(); - dns_builder - .service_backend_zone(ServiceName::Mgd, &zone, MGD_PORT) - .unwrap(); - - // TODO only works for single rack - let sled_address = get_sled_address(sled.subnet); - let switch_location = if i == 0 { - SwitchLocation::Switch0 - } else { - SwitchLocation::Switch1 - }; - dns_builder.host_scrimlet(switch_location, sled_address).unwrap(); } // We'll stripe most services across all available Sleds, round-robin @@ -354,11 +334,11 @@ impl Plan { let dns_address = SocketAddrV6::new(ip, DNS_PORT, 0, 0); let id = Uuid::new_v4(); - let zone = dns_builder.host_zone(id, ip).unwrap(); dns_builder - .service_backend_zone( + .host_zone_with_one_backend( + id, + ip, ServiceName::InternalDns, - &zone, DNS_HTTP_PORT, ) .unwrap(); @@ -391,9 +371,13 @@ impl Plan { let ip = sled.addr_alloc.next().expect("Not enough addrs"); let port = omicron_common::address::COCKROACH_PORT; let address = SocketAddrV6::new(ip, port, 0, 0); - let zone = dns_builder.host_zone(id, ip).unwrap(); dns_builder - .service_backend_zone(ServiceName::Cockroach, &zone, port) + .host_zone_with_one_backend( + id, + ip, + ServiceName::Cockroach, + port, + ) .unwrap(); let dataset_name = sled.alloc_from_u2_zpool(DatasetKind::CockroachDb)?; @@ -427,11 +411,11 @@ impl Plan { let internal_ip = sled.addr_alloc.next().expect("Not enough addrs"); let http_port = omicron_common::address::DNS_HTTP_PORT; let http_address = SocketAddrV6::new(internal_ip, http_port, 0, 0); - let zone = dns_builder.host_zone(id, internal_ip).unwrap(); dns_builder - .service_backend_zone( + .host_zone_with_one_backend( + id, + internal_ip, ServiceName::ExternalDns, - &zone, http_port, ) .unwrap(); @@ -463,11 +447,11 @@ impl Plan { }; let id = Uuid::new_v4(); let address = sled.addr_alloc.next().expect("Not enough addrs"); - let zone = dns_builder.host_zone(id, address).unwrap(); dns_builder - .service_backend_zone( + .host_zone_with_one_backend( + id, + address, ServiceName::Nexus, - &zone, omicron_common::address::NEXUS_INTERNAL_PORT, ) .unwrap(); @@ -506,11 +490,11 @@ impl Plan { }; let id = Uuid::new_v4(); let address = sled.addr_alloc.next().expect("Not enough addrs"); - let zone = dns_builder.host_zone(id, address).unwrap(); dns_builder - .service_backend_zone( + .host_zone_with_one_backend( + id, + address, ServiceName::Oximeter, - &zone, omicron_common::address::OXIMETER_PORT, ) .unwrap(); @@ -540,9 +524,13 @@ impl Plan { let ip = sled.addr_alloc.next().expect("Not enough addrs"); let port = omicron_common::address::CLICKHOUSE_PORT; let address = SocketAddrV6::new(ip, port, 0, 0); - let zone = dns_builder.host_zone(id, ip).unwrap(); dns_builder - .service_backend_zone(ServiceName::Clickhouse, &zone, port) + .host_zone_with_one_backend( + id, + ip, + ServiceName::Clickhouse, + port, + ) .unwrap(); let dataset_name = sled.alloc_from_u2_zpool(DatasetKind::Clickhouse)?; @@ -572,11 +560,11 @@ impl Plan { let ip = sled.addr_alloc.next().expect("Not enough addrs"); let port = omicron_common::address::CLICKHOUSE_KEEPER_PORT; let address = SocketAddrV6::new(ip, port, 0, 0); - let zone = dns_builder.host_zone(id, ip).unwrap(); dns_builder - .service_backend_zone( + .host_zone_with_one_backend( + id, + ip, ServiceName::ClickhouseKeeper, - &zone, port, ) .unwrap(); @@ -605,9 +593,13 @@ impl Plan { let address = sled.addr_alloc.next().expect("Not enough addrs"); let port = omicron_common::address::CRUCIBLE_PANTRY_PORT; let id = Uuid::new_v4(); - let zone = dns_builder.host_zone(id, address).unwrap(); dns_builder - .service_backend_zone(ServiceName::CruciblePantry, &zone, port) + .host_zone_with_one_backend( + id, + address, + ServiceName::CruciblePantry, + port, + ) .unwrap(); sled.request.zones.push(OmicronZoneConfig { id, @@ -626,11 +618,11 @@ impl Plan { let port = omicron_common::address::CRUCIBLE_PORT; let address = SocketAddrV6::new(ip, port, 0, 0); let id = Uuid::new_v4(); - let zone = dns_builder.host_zone(id, ip).unwrap(); dns_builder - .service_backend_zone( + .host_zone_with_one_backend( + id, + ip, ServiceName::Crucible(id), - &zone, port, ) .unwrap(); @@ -653,7 +645,6 @@ impl Plan { for (idx, sled) in sled_info.iter_mut().enumerate() { let id = Uuid::new_v4(); let address = sled.addr_alloc.next().expect("Not enough addrs"); - let zone = dns_builder.host_zone(id, address).unwrap(); let ntp_address = SocketAddrV6::new(address, NTP_PORT, 0, 0); let (zone_type, svcname) = if idx < BOUNDARY_NTP_COUNT { @@ -683,7 +674,9 @@ impl Plan { ) }; - dns_builder.service_backend_zone(svcname, &zone, NTP_PORT).unwrap(); + dns_builder + .host_zone_with_one_backend(id, address, svcname, NTP_PORT) + .unwrap(); sled.request.zones.push(OmicronZoneConfig { id, From e56e2394eb5f581a1957c47c520344d85533e704 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karen=20C=C3=A1rcamo?= Date: Wed, 21 Feb 2024 16:54:39 +1300 Subject: [PATCH 026/157] Create composite crucible and pantry packages (#4927) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Create new packages for crucible and pantry to include the zone network config service. Depends on https://github.com/oxidecomputer/crucible/pull/1096. These two PRs should be merged in coordination Related: https://github.com/oxidecomputer/omicron/issues/1898 ### Crucible updates This PR also merges a few changes from Crucible: * fe0c5c7 - [smf] Use new zone network config service * 3d48060 - (upstream/main) Move a few methods into downstairs * b01e15c - Remove extra clone in upstairs read * b4f37b4 - Make `crucible-downstairs` not depend on upstairs * 733b7f9 - Update Rust crate rusqlite to 0.31 * 961e971 - Update Rust crate reedline to 0.29.0 * b946a04 - Update Rust crate clap to 4.5 * 39f1f3f - Update Rust crate indicatif to 0.17.8 * 4ea9387 - Update progenitor to bc0bb4b * ace10f4 - Do not 500 on snapshot delete for deleted region * 4105133 - Drop jobs from Offline downstairs. * 43dace9 - `Mutex` → `Work` * a1f3207 - Added a contributing.md * 13b8669 - Remove ExtentFlushClose::source_downstairs * 9b3f366 - Remove unnecessary mutexes from Downstairs --- .github/buildomat/jobs/package.sh | 4 ++-- package-manifest.toml | 29 +++++++++++++++++++++++------ sled-agent/src/services.rs | 22 ++++++++++++++-------- 3 files changed, 39 insertions(+), 16 deletions(-) diff --git a/.github/buildomat/jobs/package.sh b/.github/buildomat/jobs/package.sh index 8546a67a4f..13f374779c 100755 --- a/.github/buildomat/jobs/package.sh +++ b/.github/buildomat/jobs/package.sh @@ -111,8 +111,8 @@ zones=( out/clickhouse.tar.gz out/clickhouse_keeper.tar.gz out/cockroachdb.tar.gz - out/crucible-pantry.tar.gz - out/crucible.tar.gz + out/crucible-pantry-zone.tar.gz + out/crucible-zone.tar.gz out/external-dns.tar.gz out/internal-dns.tar.gz out/omicron-nexus.tar.gz diff --git a/package-manifest.toml b/package-manifest.toml index 9b72dd7d18..d7f42794ee 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -389,13 +389,28 @@ source.packages = [ "sp-sim.tar.gz" ] output.type = "zone" output.intermediate_only = true +[package.crucible-zone] +service_name = "crucible" +only_for_targets.image = "standard" +source.type = "composite" +source.packages = [ "crucible.tar.gz", "zone-network-setup.tar.gz" ] +output.type = "zone" + + +[package.crucible-pantry-zone] +service_name = "crucible_pantry" +only_for_targets.image = "standard" +source.type = "composite" +source.packages = [ "crucible-pantry.tar.gz", "zone-network-setup.tar.gz" ] +output.type = "zone" + # Packages not built within Omicron, but which must be imported. # Refer to # https://github.com/oxidecomputer/crucible/blob/main/package/README.md # for instructions on building this manually. [package.crucible] -service_name = "crucible" +service_name = "crucible_prebuilt" only_for_targets.image = "standard" # To manually override the package source (for example, to test a change in # both Crucible and Omicron simultaneously): @@ -405,22 +420,24 @@ only_for_targets.image = "standard" # 3. Use source.type = "manual" instead of "prebuilt" source.type = "prebuilt" source.repo = "crucible" -source.commit = "796dce526dd7ed7b52a0429a486ccba4a9da1ce5" +source.commit = "fe0c5c7909707a0f826025be4fe2bbf5f6e0206f" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible.sha256.txt -source.sha256 = "8b654627a4250e8d444133cf3130838d224b13e53f3e48cf0d031314d6f05ee0" +source.sha256 = "5da4f93b16fc7c0f3cc3a67919dbaa3f143cc07b703183a236f5c98b61504d15" output.type = "zone" +output.intermediate_only = true [package.crucible-pantry] -service_name = "crucible_pantry" +service_name = "crucible_pantry_prebuilt" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "crucible" -source.commit = "796dce526dd7ed7b52a0429a486ccba4a9da1ce5" +source.commit = "fe0c5c7909707a0f826025be4fe2bbf5f6e0206f" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible-pantry.sha256.txt -source.sha256 = "8602b2d6e7beb0731ae2be481715c94795657306d6013cc6d81fd60c4784a6ed" +source.sha256 = "ed5027cc37c5ba4f2b9a568528f5bb49deedccaaa60bd770311c7bface6aa02b" output.type = "zone" +output.intermediate_only = true # Refer to # https://github.com/oxidecomputer/propolis/blob/master/package/README.md diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index bc40187b38..bcd648cd2d 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -1708,11 +1708,15 @@ impl ServiceManager { let Some(info) = self.inner.sled_info.get() else { return Err(Error::SledAgentNotReady); }; - let datalink = installed_zone.get_control_vnic_name(); - let gateway = &info.underlay_address.to_string(); let listen_addr = &underlay_address.to_string(); let listen_port = &CRUCIBLE_PORT.to_string(); + let nw_setup_service = Self::zone_network_setup_install( + info, + &installed_zone, + listen_addr, + )?; + let dataset_name = DatasetName::new( dataset.pool_name.clone(), DatasetKind::Crucible, @@ -1720,8 +1724,6 @@ impl ServiceManager { .full_name(); let uuid = &Uuid::new_v4().to_string(); let config = PropertyGroupBuilder::new("config") - .add_property("datalink", "astring", datalink) - .add_property("gateway", "astring", gateway) .add_property("dataset", "astring", &dataset_name) .add_property("listen_addr", "astring", listen_addr) .add_property("listen_port", "astring", listen_port) @@ -1729,6 +1731,7 @@ impl ServiceManager { .add_property("store", "astring", "/data"); let profile = ProfileBuilder::new("omicron") + .add_service(nw_setup_service) .add_service(disabled_ssh_service) .add_service( ServiceBuilder::new("oxide/crucible/agent") @@ -1759,18 +1762,21 @@ impl ServiceManager { return Err(Error::SledAgentNotReady); }; - let datalink = installed_zone.get_control_vnic_name(); - let gateway = &info.underlay_address.to_string(); let listen_addr = &underlay_address.to_string(); let listen_port = &CRUCIBLE_PANTRY_PORT.to_string(); + let nw_setup_service = Self::zone_network_setup_install( + info, + &installed_zone, + listen_addr, + )?; + let config = PropertyGroupBuilder::new("config") - .add_property("datalink", "astring", datalink) - .add_property("gateway", "astring", gateway) .add_property("listen_addr", "astring", listen_addr) .add_property("listen_port", "astring", listen_port); let profile = ProfileBuilder::new("omicron") + .add_service(nw_setup_service) .add_service(disabled_ssh_service) .add_service( ServiceBuilder::new("oxide/crucible/pantry") From 6993b410c04df7c9295720d6e68b297517be451b Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Wed, 21 Feb 2024 05:23:47 +0000 Subject: [PATCH 027/157] chore(deps): update taiki-e/install-action digest to 12af778 (#5109) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`6943331` -> `12af778`](https://togithub.com/taiki-e/install-action/compare/6943331...12af778) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. â™» **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index 5927cb43e6..68bc323a07 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@6943331e01261cdff7420bbc2508cb463574e404 # v2 + uses: taiki-e/install-action@12af778b97addf4c562c75a0564dc7e7dc5339a5 # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From 55ef8daa573669c69e9730ac6b18139f9a648698 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Wed, 21 Feb 2024 17:13:36 -0500 Subject: [PATCH 028/157] Reconfigurator: Record external networking allocations when realizing a blueprint (#5045) This PR expands blueprint execution to record external IPs and created NICs for Nexus, Boundary NTP, and External DNS in crdb _before_ sending new zone requests to sled-agents. The implementation has a very obvious TOCTOU race, but I think it's okay (as explained in the comments inline). If two Nexuses try to realize the same blueprint simultaneously and both see no records present, only one will succeed to insert, and the other will spuriously fail. Assuming that failure causes a retry, subsequent attempts to realize the same blueprint will succeed, as the required records will be present. If this seems wrong, please holler! I'd like to give this a spin on either madrid or one of the software testbeds before merging, but I think this is ready for review. --- nexus/blueprint-execution/src/lib.rs | 13 +- .../src/resource_allocation.rs | 992 ++++++++++++++++++ nexus/db-model/src/external_ip.rs | 14 +- nexus/db-model/src/network_interface.rs | 2 +- .../src/db/datastore/external_ip.rs | 23 +- .../src/db/datastore/network_interface.rs | 65 +- .../db-queries/src/db/queries/external_ip.rs | 44 + 7 files changed, 1140 insertions(+), 13 deletions(-) create mode 100644 nexus/blueprint-execution/src/resource_allocation.rs diff --git a/nexus/blueprint-execution/src/lib.rs b/nexus/blueprint-execution/src/lib.rs index a13acdf265..d6f5f8fc31 100644 --- a/nexus/blueprint-execution/src/lib.rs +++ b/nexus/blueprint-execution/src/lib.rs @@ -21,6 +21,7 @@ use uuid::Uuid; mod dns; mod omicron_zones; +mod resource_allocation; struct Sled { id: Uuid, @@ -69,6 +70,14 @@ where "blueprint_id" => ?blueprint.id ); + resource_allocation::ensure_zone_resources_allocated( + &opctx, + datastore, + &blueprint.omicron_zones, + ) + .await + .map_err(|err| vec![err])?; + let sleds_by_id: BTreeMap = datastore .sled_list_all_batched(&opctx) .await @@ -82,9 +91,9 @@ where dns::deploy_dns( &opctx, - &datastore, + datastore, String::from(nexus_label), - &blueprint, + blueprint, &sleds_by_id, ) .await diff --git a/nexus/blueprint-execution/src/resource_allocation.rs b/nexus/blueprint-execution/src/resource_allocation.rs new file mode 100644 index 0000000000..7f3ebb9876 --- /dev/null +++ b/nexus/blueprint-execution/src/resource_allocation.rs @@ -0,0 +1,992 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Manges allocation of resources required for blueprint realization + +use anyhow::bail; +use anyhow::Context; +use nexus_db_model::IncompleteNetworkInterface; +use nexus_db_model::Name; +use nexus_db_model::SqlU16; +use nexus_db_model::VpcSubnet; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::fixed_data::vpc_subnet::DNS_VPC_SUBNET; +use nexus_db_queries::db::fixed_data::vpc_subnet::NEXUS_VPC_SUBNET; +use nexus_db_queries::db::fixed_data::vpc_subnet::NTP_VPC_SUBNET; +use nexus_db_queries::db::DataStore; +use nexus_types::deployment::NetworkInterface; +use nexus_types::deployment::NetworkInterfaceKind; +use nexus_types::deployment::OmicronZoneType; +use nexus_types::deployment::OmicronZonesConfig; +use nexus_types::deployment::SourceNatConfig; +use omicron_common::api::external::IdentityMetadataCreateParams; +use slog::info; +use slog::warn; +use std::collections::BTreeMap; +use std::net::IpAddr; +use std::net::SocketAddr; +use uuid::Uuid; + +pub(crate) async fn ensure_zone_resources_allocated( + opctx: &OpContext, + datastore: &DataStore, + zones: &BTreeMap, +) -> anyhow::Result<()> { + let allocator = ResourceAllocator { opctx, datastore }; + + for config in zones.values() { + for z in &config.zones { + match &z.zone_type { + OmicronZoneType::Nexus { external_ip, nic, .. } => { + allocator + .ensure_nexus_external_networking_allocated( + z.id, + *external_ip, + nic, + ) + .await?; + } + OmicronZoneType::ExternalDns { dns_address, nic, .. } => { + allocator + .ensure_external_dns_external_networking_allocated( + z.id, + dns_address, + nic, + ) + .await?; + } + OmicronZoneType::BoundaryNtp { snat_cfg, nic, .. } => { + allocator + .ensure_boundary_ntp_external_networking_allocated( + z.id, snat_cfg, nic, + ) + .await?; + } + OmicronZoneType::InternalNtp { .. } + | OmicronZoneType::Clickhouse { .. } + | OmicronZoneType::ClickhouseKeeper { .. } + | OmicronZoneType::CockroachDb { .. } + | OmicronZoneType::Crucible { .. } + | OmicronZoneType::CruciblePantry { .. } + | OmicronZoneType::InternalDns { .. } + | OmicronZoneType::Oximeter { .. } => (), + } + } + } + + Ok(()) +} + +struct ResourceAllocator<'a> { + opctx: &'a OpContext, + datastore: &'a DataStore, +} + +impl<'a> ResourceAllocator<'a> { + // Helper function to determine whether a given external IP address is + // already allocated to a specific service zone. + async fn is_external_ip_already_allocated( + &self, + zone_type: &'static str, + zone_id: Uuid, + external_ip: IpAddr, + port_range: Option<(u16, u16)>, + ) -> anyhow::Result { + let allocated_ips = self + .datastore + .service_lookup_external_ips(self.opctx, zone_id) + .await + .with_context(|| { + format!( + "failed to look up external IPs for {zone_type} {zone_id}" + ) + })?; + + if !allocated_ips.is_empty() { + // All the service zones that want external IP addresses only expect + // to have a single IP. This service already has (at least) one: + // make sure this list includes the one we want, or return an error. + for allocated_ip in &allocated_ips { + if allocated_ip.ip.ip() == external_ip + && port_range + .map(|(first, last)| { + allocated_ip.first_port == SqlU16(first) + && allocated_ip.last_port == SqlU16(last) + }) + .unwrap_or(true) + { + info!( + self.opctx.log, "found already-allocated external IP"; + "zone_type" => zone_type, + "zone_id" => %zone_id, + "ip" => %external_ip, + ); + return Ok(true); + } + } + + warn!( + self.opctx.log, "zone has unexpected IPs allocated"; + "zone_type" => zone_type, + "zone_id" => %zone_id, + "want_ip" => %external_ip, + "allocated_ips" => ?allocated_ips, + ); + bail!( + "zone {zone_id} already has {} non-matching IP(s) allocated", + allocated_ips.len() + ); + } + + info!( + self.opctx.log, "external IP allocation required for zone"; + "zone_type" => zone_type, + "zone_id" => %zone_id, + "ip" => %external_ip, + ); + + Ok(false) + } + + // Helper function to determine whether a given NIC is already allocated to + // a specific service zone. + async fn is_nic_already_allocated( + &self, + zone_type: &'static str, + zone_id: Uuid, + nic: &NetworkInterface, + ) -> anyhow::Result { + let allocated_nics = self + .datastore + .service_list_network_interfaces(self.opctx, zone_id) + .await + .with_context(|| { + format!("failed to look up NICs for {zone_type} {zone_id}") + })?; + + if !allocated_nics.is_empty() { + // All the service zones that want NICs only expect to have a single + // one. Bail out here if this zone already has one or more allocated + // NICs but not the one we think it needs. + // + // This doesn't check the allocated NIC's subnet against our NICs, + // because that would require an extra DB lookup. We'll assume if + // these main properties are correct, the subnet is too. + for allocated_nic in &allocated_nics { + if allocated_nic.ip.ip() == nic.ip + && *allocated_nic.mac == nic.mac + && allocated_nic.slot == i16::from(nic.slot) + && allocated_nic.primary == nic.primary + { + info!( + self.opctx.log, "found already-allocated NIC"; + "zone_type" => zone_type, + "zone_id" => %zone_id, + "nic" => ?allocated_nic, + ); + return Ok(true); + } + } + + warn!( + self.opctx.log, "zone has unexpected NICs allocated"; + "zone_type" => zone_type, + "zone_id" => %zone_id, + "want_nic" => ?nic, + "allocated_nics" => ?allocated_nics, + ); + + bail!( + "zone {zone_id} already has {} non-matching NIC(s) allocated", + allocated_nics.len() + ); + } + + info!( + self.opctx.log, "NIC allocation required for zone"; + "zone_type" => zone_type, + "zone_id" => %zone_id, + "nid" => ?nic, + ); + + Ok(false) + } + + // Nexus and ExternalDns both use non-SNAT service IPs; this method is used + // to allocate external networking for both of them. + async fn ensure_external_service_ip( + &self, + zone_type: &'static str, + service_id: Uuid, + external_ip: IpAddr, + ip_name: &Name, + ) -> anyhow::Result<()> { + // Only attempt to allocate `external_ip` if it isn't already assigned + // to this zone. + // + // Checking for the existing of the external IP and then creating it + // if not found inserts a classic TOCTOU race: what if another Nexus + // is running concurrently, we both check and see that the IP is not + // allocated, then both attempt to create it? We believe this is + // okay: the loser of the race (i.e., the one whose create tries to + // commit second) will fail to allocate the IP, which will bubble + // out and prevent realization of the current blueprint. That's + // exactly what we want if two Nexuses try to realize the same + // blueprint at the same time. + if self + .is_external_ip_already_allocated( + zone_type, + service_id, + external_ip, + None, + ) + .await? + { + return Ok(()); + } + let ip_id = Uuid::new_v4(); + let description = zone_type; + self.datastore + .allocate_explicit_service_ip( + self.opctx, + ip_id, + ip_name, + description, + service_id, + external_ip, + ) + .await + .with_context(|| { + format!( + "failed to allocate IP to {zone_type} {service_id}: \ + {external_ip}" + ) + })?; + + info!( + self.opctx.log, "successfully allocated external IP"; + "zone_type" => zone_type, + "zone_id" => %service_id, + "ip" => %external_ip, + "ip_id" => %ip_id, + ); + + Ok(()) + } + + // BoundaryNtp uses a SNAT service IPs; this method is similar to + // `ensure_external_service_ip` but accounts for that. + async fn ensure_external_service_snat_ip( + &self, + zone_type: &'static str, + service_id: Uuid, + snat: &SourceNatConfig, + ) -> anyhow::Result<()> { + // Only attempt to allocate `external_ip` if it isn't already assigned + // to this zone. + // + // This is subject to the same kind of TOCTOU race as described for IP + // allocation in `ensure_external_service_ip`, and we believe it's okay + // for the same reasons as described there. + if self + .is_external_ip_already_allocated( + zone_type, + service_id, + snat.ip, + Some((snat.first_port, snat.last_port)), + ) + .await? + { + return Ok(()); + } + + let ip_id = Uuid::new_v4(); + self.datastore + .allocate_explicit_service_snat_ip( + self.opctx, + ip_id, + service_id, + snat.ip, + (snat.first_port, snat.last_port), + ) + .await + .with_context(|| { + format!( + "failed to allocate snat IP to {zone_type} {service_id}: \ + {snat:?}" + ) + })?; + + info!( + self.opctx.log, "successfully allocated external SNAT IP"; + "zone_type" => zone_type, + "zone_id" => %service_id, + "snat" => ?snat, + "ip_id" => %ip_id, + ); + + Ok(()) + } + + // All service zones with external connectivity get service vNICs. + async fn ensure_service_nic( + &self, + zone_type: &'static str, + service_id: Uuid, + nic: &NetworkInterface, + nic_subnet: &VpcSubnet, + ) -> anyhow::Result<()> { + // We don't pass `nic.kind` into the database below, but instead + // explicitly call `service_create_network_interface`. Ensure this is + // indeed a service NIC. + match &nic.kind { + NetworkInterfaceKind::Instance { .. } => { + bail!("invalid NIC kind (expected service, got instance)") + } + NetworkInterfaceKind::Service { .. } => (), + } + + // Only attempt to allocate `nic` if it isn't already assigned to this + // zone. + // + // This is subject to the same kind of TOCTOU race as described for IP + // allocation in `ensure_external_service_ip`, and we believe it's okay + // for the same reasons as described there. + if self.is_nic_already_allocated(zone_type, service_id, nic).await? { + return Ok(()); + } + let nic_arg = IncompleteNetworkInterface::new_service( + nic.id, + service_id, + nic_subnet.clone(), + IdentityMetadataCreateParams { + name: nic.name.clone(), + description: format!("{zone_type} service vNIC"), + }, + nic.ip, + nic.mac, + nic.slot, + ) + .with_context(|| { + format!( + "failed to convert NIC into IncompleteNetworkInterface: {nic:?}" + ) + })?; + let created_nic = self + .datastore + .service_create_network_interface(self.opctx, nic_arg) + .await + .map_err(|err| err.into_external()) + .with_context(|| { + format!( + "failed to allocate NIC to {zone_type} {service_id}: \ + {nic:?}" + ) + })?; + + // We don't pass all the properties of `nic` into the create request + // above. Double-check that the properties the DB assigned match + // what we expect. + // + // We do not check `nic.vni`, because it's not stored in the + // database. (All services are given the constant vni + // `Vni::SERVICES_VNI`.) + if created_nic.primary != nic.primary + || created_nic.slot != i16::from(nic.slot) + { + warn!( + self.opctx.log, "unexpected property on allocated NIC"; + "db_primary" => created_nic.primary, + "expected_primary" => nic.primary, + "db_slot" => created_nic.slot, + "expected_slot" => nic.slot, + ); + + // Now what? We've allocated a NIC in the database but it's + // incorrect. Should we try to delete it? That would be best + // effort (we could fail to delete, or we could crash between + // creation and deletion). + // + // We only expect services to have one NIC, so the only way it + // should be possible to get a different primary/slot value is + // if somehow this same service got a _different_ NIC allocated + // to it in the TOCTOU race window above. That should be + // impossible with the way we generate blueprints, so we'll just + // return a scary error here and expect to never see it. + bail!( + "database cleanup required: \ + unexpected NIC ({created_nic:?}) \ + allocated for {zone_type} {service_id}" + ); + } + + info!( + self.opctx.log, "successfully allocated service vNIC"; + "zone_type" => zone_type, + "zone_id" => %service_id, + "nic" => ?nic, + ); + + Ok(()) + } + + async fn ensure_nexus_external_networking_allocated( + &self, + zone_id: Uuid, + external_ip: IpAddr, + nic: &NetworkInterface, + ) -> anyhow::Result<()> { + self.ensure_external_service_ip( + "nexus", + zone_id, + external_ip, + &Name(nic.name.clone()), + ) + .await?; + self.ensure_service_nic("nexus", zone_id, nic, &NEXUS_VPC_SUBNET) + .await?; + Ok(()) + } + + async fn ensure_external_dns_external_networking_allocated( + &self, + zone_id: Uuid, + dns_address: &str, + nic: &NetworkInterface, + ) -> anyhow::Result<()> { + let dns_address = + dns_address.parse::().with_context(|| { + format!("failed to parse ExternalDns address {dns_address}") + })?; + self.ensure_external_service_ip( + "external_dns", + zone_id, + dns_address.ip(), + &Name(nic.name.clone()), + ) + .await?; + self.ensure_service_nic("external_dns", zone_id, nic, &DNS_VPC_SUBNET) + .await?; + Ok(()) + } + + async fn ensure_boundary_ntp_external_networking_allocated( + &self, + zone_id: Uuid, + snat: &SourceNatConfig, + nic: &NetworkInterface, + ) -> anyhow::Result<()> { + self.ensure_external_service_snat_ip("ntp", zone_id, snat).await?; + self.ensure_service_nic("ntp", zone_id, nic, &NTP_VPC_SUBNET).await?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use nexus_test_utils_macros::nexus_test; + use nexus_types::deployment::OmicronZoneConfig; + use nexus_types::deployment::OmicronZoneDataset; + use nexus_types::identity::Resource; + use omicron_common::address::IpRange; + use omicron_common::address::DNS_OPTE_IPV4_SUBNET; + use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; + use omicron_common::address::NTP_OPTE_IPV4_SUBNET; + use omicron_common::address::NUM_SOURCE_NAT_PORTS; + use omicron_common::api::external::Generation; + use omicron_common::api::external::IpNet; + use omicron_common::api::external::MacAddr; + use omicron_common::api::external::Vni; + use omicron_common::nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; + use std::net::IpAddr; + use std::net::Ipv6Addr; + use std::net::SocketAddrV6; + + type ControlPlaneTestContext = + nexus_test_utils::ControlPlaneTestContext; + + #[nexus_test] + async fn test_allocate_external_networking( + cptestctx: &ControlPlaneTestContext, + ) { + // Set up. + let nexus = &cptestctx.server.apictx().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + + // Create an external IP range we can use for our services. + let external_ip_range = IpRange::try_from(( + "192.0.2.1".parse::().unwrap(), + "192.0.2.100".parse::().unwrap(), + )) + .expect("bad IP range"); + let mut external_ips = external_ip_range.iter(); + + // Add the external IP range to the services IP pool. + let (ip_pool, _) = datastore + .ip_pools_service_lookup(&opctx) + .await + .expect("failed to find service IP pool"); + datastore + .ip_pool_add_range(&opctx, &ip_pool, &external_ip_range) + .await + .expect("failed to expand service IP pool"); + + // Generate the values we care about. (Other required zone config params + // that we don't care about will be filled in below arbitrarily.) + + // Nexus: + let nexus_id = Uuid::new_v4(); + let nexus_external_ip = + external_ips.next().expect("exhausted external_ips"); + let nexus_nic = NetworkInterface { + id: Uuid::new_v4(), + kind: NetworkInterfaceKind::Service(nexus_id), + name: "test-nexus".parse().expect("bad name"), + ip: NEXUS_OPTE_IPV4_SUBNET + .iter() + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES) + .unwrap() + .into(), + mac: MacAddr::random_system(), + subnet: IpNet::from(*NEXUS_OPTE_IPV4_SUBNET).into(), + vni: Vni::SERVICES_VNI, + primary: true, + slot: 0, + }; + + // External DNS: + let dns_id = Uuid::new_v4(); + let dns_external_ip = + external_ips.next().expect("exhausted external_ips"); + let dns_nic = NetworkInterface { + id: Uuid::new_v4(), + kind: NetworkInterfaceKind::Service(dns_id), + name: "test-external-dns".parse().expect("bad name"), + ip: DNS_OPTE_IPV4_SUBNET + .iter() + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES) + .unwrap() + .into(), + mac: MacAddr::random_system(), + subnet: IpNet::from(*DNS_OPTE_IPV4_SUBNET).into(), + vni: Vni::SERVICES_VNI, + primary: true, + slot: 0, + }; + + // Boundary NTP: + let ntp_id = Uuid::new_v4(); + let ntp_snat = SourceNatConfig { + ip: external_ips.next().expect("exhausted external_ips"), + first_port: NUM_SOURCE_NAT_PORTS, + last_port: 2 * NUM_SOURCE_NAT_PORTS - 1, + }; + let ntp_nic = NetworkInterface { + id: Uuid::new_v4(), + kind: NetworkInterfaceKind::Service(ntp_id), + name: "test-external-ntp".parse().expect("bad name"), + ip: NTP_OPTE_IPV4_SUBNET + .iter() + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES) + .unwrap() + .into(), + mac: MacAddr::random_system(), + subnet: IpNet::from(*NTP_OPTE_IPV4_SUBNET).into(), + vni: Vni::SERVICES_VNI, + primary: true, + slot: 0, + }; + + // Build the `zones` map needed by `ensure_zone_resources_allocated`, + // with an arbitrary sled_id. + let mut zones = BTreeMap::new(); + let sled_id = Uuid::new_v4(); + zones.insert( + sled_id, + OmicronZonesConfig { + generation: Generation::new().next(), + zones: vec![ + OmicronZoneConfig { + id: nexus_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: OmicronZoneType::Nexus { + internal_address: Ipv6Addr::LOCALHOST.to_string(), + external_ip: nexus_external_ip, + nic: nexus_nic.clone(), + external_tls: false, + external_dns_servers: Vec::new(), + }, + }, + OmicronZoneConfig { + id: dns_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: OmicronZoneType::ExternalDns { + dataset: OmicronZoneDataset { + pool_name: format!("oxp_{}", Uuid::new_v4()) + .parse() + .expect("bad name"), + }, + http_address: SocketAddrV6::new( + Ipv6Addr::LOCALHOST, + 0, + 0, + 0, + ) + .to_string(), + dns_address: SocketAddr::new(dns_external_ip, 0) + .to_string(), + nic: dns_nic.clone(), + }, + }, + OmicronZoneConfig { + id: ntp_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: OmicronZoneType::BoundaryNtp { + address: SocketAddr::new(dns_external_ip, 0) + .to_string(), + ntp_servers: Vec::new(), + dns_servers: Vec::new(), + domain: None, + nic: ntp_nic.clone(), + snat_cfg: ntp_snat, + }, + }, + ], + }, + ); + + // Initialize resource allocation: this should succeed and create all + // the relevant db records. + ensure_zone_resources_allocated(&opctx, datastore, &zones) + .await + .with_context(|| format!("{zones:#?}")) + .unwrap(); + + // Check that the external IP records were created. + let db_nexus_ips = datastore + .service_lookup_external_ips(&opctx, nexus_id) + .await + .expect("failed to get external IPs"); + assert_eq!(db_nexus_ips.len(), 1); + assert!(db_nexus_ips[0].is_service); + assert_eq!(db_nexus_ips[0].parent_id, Some(nexus_id)); + assert_eq!(db_nexus_ips[0].ip, nexus_external_ip.into()); + assert_eq!(db_nexus_ips[0].first_port, SqlU16(0)); + assert_eq!(db_nexus_ips[0].last_port, SqlU16(65535)); + + let db_dns_ips = datastore + .service_lookup_external_ips(&opctx, dns_id) + .await + .expect("failed to get external IPs"); + assert_eq!(db_dns_ips.len(), 1); + assert!(db_dns_ips[0].is_service); + assert_eq!(db_dns_ips[0].parent_id, Some(dns_id)); + assert_eq!(db_dns_ips[0].ip, dns_external_ip.into()); + assert_eq!(db_dns_ips[0].first_port, SqlU16(0)); + assert_eq!(db_dns_ips[0].last_port, SqlU16(65535)); + + let db_ntp_ips = datastore + .service_lookup_external_ips(&opctx, ntp_id) + .await + .expect("failed to get external IPs"); + assert_eq!(db_ntp_ips.len(), 1); + assert!(db_ntp_ips[0].is_service); + assert_eq!(db_ntp_ips[0].parent_id, Some(ntp_id)); + assert_eq!(db_ntp_ips[0].ip, ntp_snat.ip.into()); + assert_eq!(db_ntp_ips[0].first_port, SqlU16(ntp_snat.first_port)); + assert_eq!(db_ntp_ips[0].last_port, SqlU16(ntp_snat.last_port)); + + // Check that the NIC records were created. + let db_nexus_nics = datastore + .service_list_network_interfaces(&opctx, nexus_id) + .await + .expect("failed to get NICs"); + assert_eq!(db_nexus_nics.len(), 1); + assert_eq!(db_nexus_nics[0].id(), nexus_nic.id); + assert_eq!(db_nexus_nics[0].service_id, nexus_id); + assert_eq!(db_nexus_nics[0].vpc_id, NEXUS_VPC_SUBNET.vpc_id); + assert_eq!(db_nexus_nics[0].subnet_id, NEXUS_VPC_SUBNET.id()); + assert_eq!(*db_nexus_nics[0].mac, nexus_nic.mac); + assert_eq!(db_nexus_nics[0].ip, nexus_nic.ip.into()); + assert_eq!(db_nexus_nics[0].slot, i16::from(nexus_nic.slot)); + assert_eq!(db_nexus_nics[0].primary, nexus_nic.primary); + + let db_dns_nics = datastore + .service_list_network_interfaces(&opctx, dns_id) + .await + .expect("failed to get NICs"); + assert_eq!(db_dns_nics.len(), 1); + assert_eq!(db_dns_nics[0].id(), dns_nic.id); + assert_eq!(db_dns_nics[0].service_id, dns_id); + assert_eq!(db_dns_nics[0].vpc_id, DNS_VPC_SUBNET.vpc_id); + assert_eq!(db_dns_nics[0].subnet_id, DNS_VPC_SUBNET.id()); + assert_eq!(*db_dns_nics[0].mac, dns_nic.mac); + assert_eq!(db_dns_nics[0].ip, dns_nic.ip.into()); + assert_eq!(db_dns_nics[0].slot, i16::from(dns_nic.slot)); + assert_eq!(db_dns_nics[0].primary, dns_nic.primary); + + let db_ntp_nics = datastore + .service_list_network_interfaces(&opctx, ntp_id) + .await + .expect("failed to get NICs"); + assert_eq!(db_ntp_nics.len(), 1); + assert_eq!(db_ntp_nics[0].id(), ntp_nic.id); + assert_eq!(db_ntp_nics[0].service_id, ntp_id); + assert_eq!(db_ntp_nics[0].vpc_id, NTP_VPC_SUBNET.vpc_id); + assert_eq!(db_ntp_nics[0].subnet_id, NTP_VPC_SUBNET.id()); + assert_eq!(*db_ntp_nics[0].mac, ntp_nic.mac); + assert_eq!(db_ntp_nics[0].ip, ntp_nic.ip.into()); + assert_eq!(db_ntp_nics[0].slot, i16::from(ntp_nic.slot)); + assert_eq!(db_ntp_nics[0].primary, ntp_nic.primary); + + // We should be able to run the function again with the same inputs, and + // it should succeed without inserting any new records. + ensure_zone_resources_allocated(&opctx, datastore, &zones) + .await + .with_context(|| format!("{zones:#?}")) + .unwrap(); + assert_eq!( + db_nexus_ips, + datastore + .service_lookup_external_ips(&opctx, nexus_id) + .await + .expect("failed to get external IPs") + ); + assert_eq!( + db_dns_ips, + datastore + .service_lookup_external_ips(&opctx, dns_id) + .await + .expect("failed to get external IPs") + ); + assert_eq!( + db_ntp_ips, + datastore + .service_lookup_external_ips(&opctx, ntp_id) + .await + .expect("failed to get external IPs") + ); + assert_eq!( + db_nexus_nics, + datastore + .service_list_network_interfaces(&opctx, nexus_id) + .await + .expect("failed to get NICs") + ); + assert_eq!( + db_dns_nics, + datastore + .service_list_network_interfaces(&opctx, dns_id) + .await + .expect("failed to get NICs") + ); + assert_eq!( + db_ntp_nics, + datastore + .service_list_network_interfaces(&opctx, ntp_id) + .await + .expect("failed to get NICs") + ); + + // Now that we've tested the happy path, try some requests that ought to + // fail because the request includes an external IP that doesn't match + // the already-allocated external IPs from above. + let bogus_ip = external_ips.next().expect("exhausted external_ips"); + for mutate_zones_fn in [ + // non-matching IP on Nexus + (&|config: &mut OmicronZonesConfig| { + for zone in &mut config.zones { + if let OmicronZoneType::Nexus { + ref mut external_ip, .. + } = &mut zone.zone_type + { + *external_ip = bogus_ip; + return format!( + "zone {} already has 1 non-matching IP", + zone.id + ); + } + } + panic!("didn't find expected zone"); + }) as &dyn Fn(&mut OmicronZonesConfig) -> String, + // non-matching IP on External DNS + &|config| { + for zone in &mut config.zones { + if let OmicronZoneType::ExternalDns { + ref mut dns_address, + .. + } = &mut zone.zone_type + { + *dns_address = SocketAddr::new(bogus_ip, 0).to_string(); + return format!( + "zone {} already has 1 non-matching IP", + zone.id + ); + } + } + panic!("didn't find expected zone"); + }, + // non-matching SNAT port range on Boundary NTP + &|config| { + for zone in &mut config.zones { + if let OmicronZoneType::BoundaryNtp { + ref mut snat_cfg, + .. + } = &mut zone.zone_type + { + snat_cfg.first_port += NUM_SOURCE_NAT_PORTS; + snat_cfg.last_port += NUM_SOURCE_NAT_PORTS; + return format!( + "zone {} already has 1 non-matching IP", + zone.id + ); + } + } + panic!("didn't find expected zone"); + }, + ] { + // Run `mutate_zones_fn` on our config... + let mut config = + zones.remove(&sled_id).expect("missing zone config"); + let orig_config = config.clone(); + let expected_error = mutate_zones_fn(&mut config); + zones.insert(sled_id, config); + + // ... check that we get the error we expect + let err = + ensure_zone_resources_allocated(&opctx, datastore, &zones) + .await + .expect_err("unexpected success"); + assert!( + err.to_string().contains(&expected_error), + "expected {expected_error:?}, got {err:#}" + ); + + // ... and restore the original, valid config before iterating. + zones.insert(sled_id, orig_config); + } + + // Also try some requests that ought to fail because the request + // includes a NIC that doesn't match the already-allocated NICs from + // above. + // + // All three zone types have a `nic` property, so here our mutating + // function only modifies that, and the body of our loop tries it on all + // three to ensure we get the errors we expect no matter the zone type. + for mutate_nic_fn in [ + // switch kind from Service to Instance + (&|_: Uuid, nic: &mut NetworkInterface| { + match &nic.kind { + NetworkInterfaceKind::Instance { .. } => { + panic!( + "invalid NIC kind (expected service, got instance)" + ) + } + NetworkInterfaceKind::Service(id) => { + let id = *id; + nic.kind = NetworkInterfaceKind::Instance(id); + } + } + "invalid NIC kind".to_string() + }) as &dyn Fn(Uuid, &mut NetworkInterface) -> String, + // non-matching IP + &|zone_id, nic| { + nic.ip = bogus_ip; + format!("zone {zone_id} already has 1 non-matching NIC") + }, + ] { + // Try this NIC mutation on Nexus... + let mut mutated_zones = zones.clone(); + for zone in &mut mutated_zones + .get_mut(&sled_id) + .expect("missing sled") + .zones + { + if let OmicronZoneType::Nexus { ref mut nic, .. } = + &mut zone.zone_type + { + let expected_error = mutate_nic_fn(zone.id, nic); + + let err = ensure_zone_resources_allocated( + &opctx, + datastore, + &mutated_zones, + ) + .await + .expect_err("unexpected success"); + + assert!( + err.to_string().contains(&expected_error), + "expected {expected_error:?}, got {err:#}" + ); + + break; + } + } + + // ... and again on ExternalDns + let mut mutated_zones = zones.clone(); + for zone in &mut mutated_zones + .get_mut(&sled_id) + .expect("missing sled") + .zones + { + if let OmicronZoneType::ExternalDns { ref mut nic, .. } = + &mut zone.zone_type + { + let expected_error = mutate_nic_fn(zone.id, nic); + + let err = ensure_zone_resources_allocated( + &opctx, + datastore, + &mutated_zones, + ) + .await + .expect_err("unexpected success"); + + assert!( + err.to_string().contains(&expected_error), + "expected {expected_error:?}, got {err:#}" + ); + + break; + } + } + + // ... and again on BoundaryNtp + let mut mutated_zones = zones.clone(); + for zone in &mut mutated_zones + .get_mut(&sled_id) + .expect("missing sled") + .zones + { + if let OmicronZoneType::BoundaryNtp { ref mut nic, .. } = + &mut zone.zone_type + { + let expected_error = mutate_nic_fn(zone.id, nic); + + let err = ensure_zone_resources_allocated( + &opctx, + datastore, + &mutated_zones, + ) + .await + .expect_err("unexpected success"); + + assert!( + err.to_string().contains(&expected_error), + "expected {expected_error:?}, got {err:#}" + ); + + break; + } + } + } + } +} diff --git a/nexus/db-model/src/external_ip.rs b/nexus/db-model/src/external_ip.rs index 1e9def4182..b30f91c7c0 100644 --- a/nexus/db-model/src/external_ip.rs +++ b/nexus/db-model/src/external_ip.rs @@ -33,7 +33,7 @@ impl_enum_type!( #[diesel(postgres_type(name = "ip_kind", schema = "public"))] pub struct IpKindEnum; - #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, PartialEq, Deserialize, Serialize)] + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, PartialEq, Eq, Deserialize, Serialize)] #[diesel(sql_type = IpKindEnum)] pub enum IpKind; @@ -47,7 +47,7 @@ impl_enum_type!( #[diesel(postgres_type(name = "ip_attach_state"))] pub struct IpAttachStateEnum; - #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, PartialEq, Deserialize, Serialize)] + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, PartialEq, Eq, Deserialize, Serialize)] #[diesel(sql_type = IpAttachStateEnum)] pub enum IpAttachState; @@ -89,7 +89,15 @@ impl std::fmt::Display for IpKind { /// API at all, and only provide outbound connectivity to instances, not /// inbound. #[derive( - Debug, Clone, Selectable, Queryable, Insertable, Deserialize, Serialize, + Debug, + Clone, + Selectable, + Queryable, + Insertable, + Deserialize, + Serialize, + PartialEq, + Eq, )] #[diesel(table_name = external_ip)] pub struct ExternalIp { diff --git a/nexus/db-model/src/network_interface.rs b/nexus/db-model/src/network_interface.rs index 01317fc160..72752ae3f8 100644 --- a/nexus/db-model/src/network_interface.rs +++ b/nexus/db-model/src/network_interface.rs @@ -91,7 +91,7 @@ pub struct InstanceNetworkInterface { /// The underlying "table" (`service_network_interface`) is actually a view /// over the `network_interface` table, that contains only rows with /// `kind = 'service'`. -#[derive(Selectable, Queryable, Clone, Debug, Resource)] +#[derive(Selectable, Queryable, Clone, Debug, PartialEq, Eq, Resource)] #[diesel(table_name = service_network_interface)] pub struct ServiceNetworkInterface { #[diesel(embed)] diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 9d4d947476..24439aa3a0 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -167,6 +167,23 @@ impl DataStore { } } + /// Fetch all external IP addresses of any kind for the provided service. + pub async fn service_lookup_external_ips( + &self, + opctx: &OpContext, + service_id: Uuid, + ) -> LookupResult> { + use db::schema::external_ip::dsl; + dsl::external_ip + .filter(dsl::is_service.eq(true)) + .filter(dsl::parent_id.eq(service_id)) + .filter(dsl::time_deleted.is_null()) + .select(ExternalIp::as_select()) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + /// Allocates an IP address for internal service usage. pub async fn allocate_service_ip( &self, @@ -337,7 +354,8 @@ impl DataStore { service_id: Uuid, ip: IpAddr, ) -> CreateResult { - let (.., pool) = self.ip_pools_service_lookup(opctx).await?; + let (authz_pool, pool) = self.ip_pools_service_lookup(opctx).await?; + opctx.authorize(authz::Action::CreateChild, &authz_pool).await?; let data = IncompleteExternalIp::for_service_explicit( ip_id, name, @@ -361,7 +379,8 @@ impl DataStore { ip: IpAddr, port_range: (u16, u16), ) -> CreateResult { - let (.., pool) = self.ip_pools_service_lookup(opctx).await?; + let (authz_pool, pool) = self.ip_pools_service_lookup(opctx).await?; + opctx.authorize(authz::Action::CreateChild, &authz_pool).await?; let data = IncompleteExternalIp::for_service_explicit_snat( ip_id, service_id, diff --git a/nexus/db-queries/src/db/datastore/network_interface.rs b/nexus/db-queries/src/db/datastore/network_interface.rs index d715bf3889..f2782e8f67 100644 --- a/nexus/db-queries/src/db/datastore/network_interface.rs +++ b/nexus/db-queries/src/db/datastore/network_interface.rs @@ -29,6 +29,7 @@ use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; use diesel::result::Error as DieselError; +use nexus_db_model::ServiceNetworkInterface; use omicron_common::api::external; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::DeleteResult; @@ -126,15 +127,69 @@ impl DataStore { .map(NetworkInterface::as_instance) } - #[cfg(test)] + /// List network interfaces associated with a given service. + pub async fn service_list_network_interfaces( + &self, + opctx: &OpContext, + service_id: Uuid, + ) -> ListResultVec { + // See the comment in `service_create_network_interface`. There's no + // obvious parent for a service network interface (as opposed to + // instance network interfaces, which require ListChildren on the + // instance to list). As a logical proxy, we check for listing children + // of the service IP pool. + let (authz_service_ip_pool, _) = + self.ip_pools_service_lookup(opctx).await?; + opctx + .authorize(authz::Action::ListChildren, &authz_service_ip_pool) + .await?; + + use db::schema::service_network_interface::dsl; + dsl::service_network_interface + .filter(dsl::time_deleted.is_null()) + .filter(dsl::service_id.eq(service_id)) + .select(ServiceNetworkInterface::as_select()) + .get_results_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// Create a network interface attached to the provided service zone. + pub async fn service_create_network_interface( + &self, + opctx: &OpContext, + interface: IncompleteNetworkInterface, + ) -> Result { + // In `instance_create_network_interface`, the authz checks are for + // creating children of the VpcSubnet and the instance. We don't have an + // instance. We do have a VpcSubet, but for services these are all + // fixed data subnets. + // + // As a proxy auth check that isn't really guarding the right resource + // but should logically be equivalent, we can insert a authz check for + // creating children of the service IP pool. For any service zone with + // external networking, we create an external IP (in the service IP + // pool) and a network interface (in the relevant VpcSubnet). Putting + // this check here ensures that the caller can't proceed if they also + // couldn't proceed with creating the corresponding external IP. + let (authz_service_ip_pool, _) = self + .ip_pools_service_lookup(opctx) + .await + .map_err(network_interface::InsertError::External)?; + opctx + .authorize(authz::Action::CreateChild, &authz_service_ip_pool) + .await + .map_err(network_interface::InsertError::External)?; + self.service_create_network_interface_raw(opctx, interface).await + } + pub(crate) async fn service_create_network_interface_raw( &self, opctx: &OpContext, interface: IncompleteNetworkInterface, - ) -> Result< - db::model::ServiceNetworkInterface, - network_interface::InsertError, - > { + ) -> Result { if interface.kind != NetworkInterfaceKind::Service { return Err(network_interface::InsertError::External( Error::invalid_request( diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 54595a8444..7456e6e6bb 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -1331,6 +1331,18 @@ mod tests { // Allocate an IP address as we would for an external, rack-associated // service. let service1_id = Uuid::new_v4(); + + // Check that `service_lookup_external_ips` returns an empty vector for + // a service with no external IPs. + assert_eq!( + context + .db_datastore + .service_lookup_external_ips(&context.opctx, service1_id) + .await + .expect("Failed to look up service external IPs"), + Vec::new(), + ); + let id1 = Uuid::new_v4(); let ip1 = context .db_datastore @@ -1349,6 +1361,14 @@ mod tests { assert_eq!(ip1.first_port.0, 0); assert_eq!(ip1.last_port.0, u16::MAX); assert_eq!(ip1.parent_id, Some(service1_id)); + assert_eq!( + context + .db_datastore + .service_lookup_external_ips(&context.opctx, service1_id) + .await + .expect("Failed to look up service external IPs"), + vec![ip1], + ); // Allocate an SNat IP let service2_id = Uuid::new_v4(); @@ -1364,6 +1384,14 @@ mod tests { assert_eq!(ip2.first_port.0, 0); assert_eq!(ip2.last_port.0, 16383); assert_eq!(ip2.parent_id, Some(service2_id)); + assert_eq!( + context + .db_datastore + .service_lookup_external_ips(&context.opctx, service2_id) + .await + .expect("Failed to look up service external IPs"), + vec![ip2], + ); // Allocate the next IP address let service3_id = Uuid::new_v4(); @@ -1385,6 +1413,14 @@ mod tests { assert_eq!(ip3.first_port.0, 0); assert_eq!(ip3.last_port.0, u16::MAX); assert_eq!(ip3.parent_id, Some(service3_id)); + assert_eq!( + context + .db_datastore + .service_lookup_external_ips(&context.opctx, service3_id) + .await + .expect("Failed to look up service external IPs"), + vec![ip3], + ); // Once we're out of IP addresses, test that we see the right error. let service3_id = Uuid::new_v4(); @@ -1422,6 +1458,14 @@ mod tests { assert_eq!(ip4.first_port.0, 16384); assert_eq!(ip4.last_port.0, 32767); assert_eq!(ip4.parent_id, Some(service4_id)); + assert_eq!( + context + .db_datastore + .service_lookup_external_ips(&context.opctx, service4_id) + .await + .expect("Failed to look up service external IPs"), + vec![ip4], + ); context.success().await; } From 4de45c8e8138ab50394b8fe0290791d8c707f48e Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 21 Feb 2024 17:26:10 -0800 Subject: [PATCH 029/157] "expensive operation" check was too aggressive (#5113) --- nexus/db-queries/src/context.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/nexus/db-queries/src/context.rs b/nexus/db-queries/src/context.rs index 72da01a5f1..aea4a60e66 100644 --- a/nexus/db-queries/src/context.rs +++ b/nexus/db-queries/src/context.rs @@ -303,10 +303,17 @@ impl OpContext { /// either. Clients and proxies often don't expect long requests and /// apply aggressive timeouts. Depending on the HTTP version, a /// long-running request can tie up the TCP connection. + /// + /// We shouldn't allow these in either internal or external API handlers, + /// but we currently have some internal APIs for exercising some expensive + /// blueprint operations and so we allow these cases here. pub fn check_complex_operations_allowed(&self) -> Result<(), Error> { let api_handler = match self.kind { - OpKind::ExternalApiRequest | OpKind::InternalApiRequest => true, - OpKind::Saga | OpKind::Background | OpKind::Test => false, + OpKind::ExternalApiRequest => true, + OpKind::InternalApiRequest + | OpKind::Saga + | OpKind::Background + | OpKind::Test => false, }; if api_handler { Err(Error::internal_error( From 828c79c10e75d7529d13836b6fcc08a155b6fe59 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Thu, 22 Feb 2024 05:11:33 +0000 Subject: [PATCH 030/157] chore(deps): update taiki-e/install-action digest to 875aa7b (#5114) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`12af778` -> `875aa7b`](https://togithub.com/taiki-e/install-action/compare/12af778...875aa7b) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. â™» **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index 68bc323a07..dc907e93e7 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@12af778b97addf4c562c75a0564dc7e7dc5339a5 # v2 + uses: taiki-e/install-action@875aa7bb88c61a90732e8d159a915df57f27e475 # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From 42844311eb688e63ea23361dfb4279b44e65f792 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Thu, 22 Feb 2024 07:28:45 +0000 Subject: [PATCH 031/157] chore(deps): update taiki-e/install-action digest to 19e9b54 (#5117) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`875aa7b` -> `19e9b54`](https://togithub.com/taiki-e/install-action/compare/875aa7b...19e9b54) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. â™» **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index dc907e93e7..1f55f2f255 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@875aa7bb88c61a90732e8d159a915df57f27e475 # v2 + uses: taiki-e/install-action@19e9b549a48620cc50fcf6e6e866b8fb4eca1b01 # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From dea0ea5a1e26464dc9780ae908f028d210f5f637 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 22 Feb 2024 09:25:23 -0500 Subject: [PATCH 032/157] RoT staging/dev and prod/rel 1.0.6 and SP 1.0.8 (#5057) --- .github/buildomat/jobs/tuf-repo.sh | 4 ++-- tools/dvt_dock_version | 2 +- tools/hubris_checksums | 16 ++++++++-------- tools/hubris_version | 2 +- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/buildomat/jobs/tuf-repo.sh b/.github/buildomat/jobs/tuf-repo.sh index 59fc0fd7a7..14c2293f5b 100644 --- a/.github/buildomat/jobs/tuf-repo.sh +++ b/.github/buildomat/jobs/tuf-repo.sh @@ -278,8 +278,8 @@ EOF done } # usage: SERIES ROT_DIR ROT_VERSION BOARDS... -add_hubris_artifacts rot-staging-dev staging/dev cert-staging-dev-v1.0.5 "${ALL_BOARDS[@]}" -add_hubris_artifacts rot-prod-rel prod/rel cert-prod-rel-v1.0.5 "${ALL_BOARDS[@]}" +add_hubris_artifacts rot-staging-dev staging/dev cert-staging-dev-v1.0.6 "${ALL_BOARDS[@]}" +add_hubris_artifacts rot-prod-rel prod/rel cert-prod-rel-v1.0.6 "${ALL_BOARDS[@]}" for series in "${SERIES_LIST[@]}"; do /work/tufaceous assemble --no-generate-key /work/manifest-"$series".toml /work/repo-"$series".zip diff --git a/tools/dvt_dock_version b/tools/dvt_dock_version index 047065135b..fd988dc876 100644 --- a/tools/dvt_dock_version +++ b/tools/dvt_dock_version @@ -1 +1 @@ -COMMIT=e384836415e05ae0ba648810ab1c87e9093cdabb +COMMIT=9e8e16f508bb41f02455aeddcece0a7edae2f672 diff --git a/tools/hubris_checksums b/tools/hubris_checksums index d451f7a86c..dca8ea0ab6 100644 --- a/tools/hubris_checksums +++ b/tools/hubris_checksums @@ -1,8 +1,8 @@ -e1b3dc5c7da643b27c0dd5bf8e915d13661446e711bfdeb1d8274eed63fa5843 build-gimlet-c-image-default-v1.0.6.zip -3002444307047429531ef862435a034c64b89a698921bf19794ac97b777a2f95 build-gimlet-d-image-default-v1.0.6.zip -9e783bc92fb1c8a91f4b117241ed4c0ff2818f32f46c5193cdcdbbe02d56af9a build-gimlet-e-image-default-v1.0.6.zip -458c4f02310fe79f27841ce87b2a7c163494f0196890e6420fac17dc4803b51c build-gimlet-f-image-default-v1.0.6.zip -dece7d39f7fcd2f15dc62d91e94046b1f438a3e0fd2c804efd5f67e12ce0dd58 build-psc-b-image-default-v1.0.6.zip -7e94035b52f1dcb137b477750bf9e215d4fcd07fe95b2cfdbbc0d7fada79eb28 build-psc-c-image-default-v1.0.6.zip -ccf09dc7c9c2a946b89bcfafb391100504880fa395c9079dfb7a3b28635a4abb build-sidecar-b-image-default-v1.0.6.zip -b5d91c212f813dbdba06c1f5b098fd37fe6cb93fe33fd3c58325cb6504dc6d05 build-sidecar-c-image-default-v1.0.6.zip +28f432735a15f40101c133e4e9974d1681a33bb7b3386ccbe15b465b613f4826 build-gimlet-c-image-default-v1.0.8.zip +db927999398f0723d5d614db78a5abb4a1d515c711ffba944477bdac10c48907 build-gimlet-d-image-default-v1.0.8.zip +629a53b5d9d4bf3d410687d0ecedf4837a54233ce62b6223d209494777cc7ebc build-gimlet-e-image-default-v1.0.8.zip +e3c2a243257929a65de638f3be425370f084aeeefafbd1773d01ee71cf0b8ea7 build-gimlet-f-image-default-v1.0.8.zip +556595b42d05508ebfdac9dd71b38fe9b72e0cb30f6aa4be626c02141e375a71 build-psc-b-image-default-v1.0.8.zip +39fbf92cbc935b4eaecb81a9768357828cf3e79b5c74d36c9a655ae9023cc50c build-psc-c-image-default-v1.0.8.zip +4225dff721b034fe7cf1dc26277557e1f15f2710014dd46dfa7c92ff04c7e054 build-sidecar-b-image-default-v1.0.8.zip +ae8f12d7b66d0bcc372dd79abf515255f6ca97bb0339c570a058684e04e12cf8 build-sidecar-c-image-default-v1.0.8.zip diff --git a/tools/hubris_version b/tools/hubris_version index f2c1e74f2b..4ee8ac61fe 100644 --- a/tools/hubris_version +++ b/tools/hubris_version @@ -1 +1 @@ -TAGS=(gimlet-v1.0.6 psc-v1.0.6 sidecar-v1.0.6) +TAGS=(gimlet-v1.0.8 psc-v1.0.8 sidecar-v1.0.8) From a5be09fe0f20466ff66ae546df4895b785129fb0 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Thu, 22 Feb 2024 11:06:14 -0800 Subject: [PATCH 033/157] "add sled" needs a longer timeout (#5116) In #5111: - the "add sled" Nexus external API call invokes `PUT /sleds` to some sled agent - `PUT /sleds` itself blocks until the new sled's sled agent has started - sled agent startup blocks on setting the reservoir - on production hardware, setting the reservoir took 115s - the default Progenitor (reqwest) timeout is only 15s So as a result, the "add sled" request failed, even though the operation ultimately succeeded. In this PR, I bump the timeout to 5 minutes. I do wonder if we should remove it altogether, or if we should consider the other changes mentioned in #5111 (like not blocking sled agent startup on this, or not blocking these API calls in this way). But for now, this seems like a low-risk way to improve this situation. --- nexus/src/app/rack.rs | 41 +++++++++++++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 8 deletions(-) diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 7a1ad0e6a9..a137f19434 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -61,6 +61,7 @@ use sled_agent_client::types::{ BgpConfig, BgpPeerConfig as SledBgpPeerConfig, EarlyNetworkConfig, PortConfigV1, RackNetworkConfigV1, RouteConfig as SledRouteConfig, }; +use slog_error_chain::InlineErrorChain; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::collections::HashMap; @@ -647,7 +648,7 @@ impl super::Nexus { if rack.rack_subnet.is_some() { return Ok(()); } - let sa = self.get_any_sled_agent(opctx).await?; + let sa = self.get_any_sled_agent_client(opctx).await?; let result = sa .read_network_bootstore_config_cache() .await @@ -883,7 +884,27 @@ impl super::Nexus { }, }, }; - let sa = self.get_any_sled_agent(opctx).await?; + + // This timeout value is fairly arbitrary (as they usually are). As of + // this writing, this operation is known to take close to two minutes on + // production hardware. + let dur = std::time::Duration::from_secs(300); + let sa_url = self.get_any_sled_agent_url(opctx).await?; + let reqwest_client = reqwest::ClientBuilder::new() + .connect_timeout(dur) + .timeout(dur) + .build() + .map_err(|e| { + Error::internal_error(&format!( + "failed to create reqwest client for sled agent: {}", + InlineErrorChain::new(&e) + )) + })?; + let sa = sled_agent_client::Client::new_with_client( + &sa_url, + reqwest_client, + self.log.new(o!("sled_agent_url" => sa_url.clone())), + ); sa.sled_add(&req).await.map_err(|e| Error::InternalError { internal_message: format!( "failed to add sled with baseboard {:?} to rack {}: {e}", @@ -899,10 +920,10 @@ impl super::Nexus { Ok(()) } - async fn get_any_sled_agent( + async fn get_any_sled_agent_url( &self, opctx: &OpContext, - ) -> Result { + ) -> Result { let addr = self .sled_list(opctx, &DataPageParams::max_page()) .await? @@ -911,11 +932,15 @@ impl super::Nexus { internal_message: "no sled agents available".into(), })? .address(); + Ok(format!("http://{}", addr)) + } - Ok(sled_agent_client::Client::new( - &format!("http://{}", addr), - self.log.clone(), - )) + async fn get_any_sled_agent_client( + &self, + opctx: &OpContext, + ) -> Result { + let url = self.get_any_sled_agent_url(opctx).await?; + Ok(sled_agent_client::Client::new(&url, self.log.clone())) } } From 119bcd1f3d376dabc90a398b4c8ce9f384bbd17d Mon Sep 17 00:00:00 2001 From: "oxide-reflector-bot[bot]" <130185838+oxide-reflector-bot[bot]@users.noreply.github.com> Date: Thu, 22 Feb 2024 22:53:12 +0000 Subject: [PATCH 034/157] Update maghemite to 4b0e584 (#5123) Updated maghemite to commit 4b0e584. Co-authored-by: reflector[bot] <130185838+reflector[bot]@users.noreply.github.com> --- package-manifest.toml | 12 ++++++------ tools/maghemite_ddm_openapi_version | 2 +- tools/maghemite_mg_openapi_version | 2 +- tools/maghemite_mgd_checksums | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/package-manifest.toml b/package-manifest.toml index d7f42794ee..1a749c5b61 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -463,10 +463,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "41a69a11db6cfa8fc0c8686dc2d725708e0586ce" +source.commit = "4b0e584eec455a43c36af08ae207086965cef833" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//maghemite.sha256.txt -source.sha256 = "19d5eaa744257c32ccdca52af79d718aeb88a0af188345d33a4564a69b259632" +source.sha256 = "f1407cb9aac188d6493d2b0f948c75aad2c36668ddf4ae2a1ed80e9dd395b35d" output.type = "tarball" [package.mg-ddm] @@ -479,10 +479,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "41a69a11db6cfa8fc0c8686dc2d725708e0586ce" +source.commit = "4b0e584eec455a43c36af08ae207086965cef833" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt -source.sha256 = "ffb647b3297ec616d3d9ea93396ad9edd16ed146048a660b34e9b86e85d466b7" +source.sha256 = "fae53cb39536dc92d97cb9610de65b0acbce285e685d7167b719ea6311844fec" output.type = "zone" output.intermediate_only = true @@ -494,10 +494,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "41a69a11db6cfa8fc0c8686dc2d725708e0586ce" +source.commit = "4b0e584eec455a43c36af08ae207086965cef833" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt -source.sha256 = "26d34f61589f63be64eaa77a6e9e2db4c95d6675798386a1d61721c1ccc59d4d" +source.sha256 = "22996a6f3353296b848be729f14e78a42e7d3d6e62a4a918a5c2358ae011c8eb" output.type = "zone" output.intermediate_only = true diff --git a/tools/maghemite_ddm_openapi_version b/tools/maghemite_ddm_openapi_version index 6c58d83ea3..d161091fa8 100644 --- a/tools/maghemite_ddm_openapi_version +++ b/tools/maghemite_ddm_openapi_version @@ -1,2 +1,2 @@ -COMMIT="41a69a11db6cfa8fc0c8686dc2d725708e0586ce" +COMMIT="4b0e584eec455a43c36af08ae207086965cef833" SHA2="0b0dbc2f8bbc5d2d9be92d64c4865f8f9335355aae62f7de9f67f81dfb3f1803" diff --git a/tools/maghemite_mg_openapi_version b/tools/maghemite_mg_openapi_version index 896be8d38c..475d273f4a 100644 --- a/tools/maghemite_mg_openapi_version +++ b/tools/maghemite_mg_openapi_version @@ -1,2 +1,2 @@ -COMMIT="41a69a11db6cfa8fc0c8686dc2d725708e0586ce" +COMMIT="4b0e584eec455a43c36af08ae207086965cef833" SHA2="0ac038bbaa54d0ae0ac5ccaeff48f03070618372cca26c9d09b716b909bf9355" diff --git a/tools/maghemite_mgd_checksums b/tools/maghemite_mgd_checksums index 8fc4d083f8..ab84fafc01 100644 --- a/tools/maghemite_mgd_checksums +++ b/tools/maghemite_mgd_checksums @@ -1,2 +1,2 @@ -CIDL_SHA256="26d34f61589f63be64eaa77a6e9e2db4c95d6675798386a1d61721c1ccc59d4d" -MGD_LINUX_SHA256="b2c823dd714fad67546a0e0c0d4ae56f2fe2e7c43434469b38e13b78de9f6968" \ No newline at end of file +CIDL_SHA256="22996a6f3353296b848be729f14e78a42e7d3d6e62a4a918a5c2358ae011c8eb" +MGD_LINUX_SHA256="943b0a52d279bde55a419e2cdb24873acc32703bc97bd599376117ee0edc1511" \ No newline at end of file From 2088693046c7a666a50a5e6a8b0a14e99f7d01a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karen=20C=C3=A1rcamo?= Date: Fri, 23 Feb 2024 19:39:35 +1300 Subject: [PATCH 035/157] [sled-agent] Self assembling external DNS zone (#5059) ### Overview In addition to implementing the external DNS self assembling zone, this PR contains a new SMF service called `opte-interface-setup`. Closes: https://github.com/oxidecomputer/omicron/issues/2881 Related: https://github.com/oxidecomputer/omicron/issues/1898 ### Implementation This service makes use of the zone-network CLI tool to avoid having too many CLIs doing things. The CLI is now shipped independently so it can be called by two different services. The [`zone-networking opte-interface-set-up`]( https://github.com/oxidecomputer/omicron/pull/5059/files#diff-5fb7b70dc87176e02517181b0887ce250b6a4e4079e495990551deeca741dc8bR181-R202) command sets up what the `ensure_address_for_port()` method used to set up. ### Justification The reasoning behind this new service is to avoid setting up too many things via the method_script.sh file, and to avoid code duplication. The Nexus zone will also be using this service to set up the OPTE interface. --- illumos-utils/src/ipadm.rs | 25 +++ illumos-utils/src/opte/port.rs | 8 +- illumos-utils/src/route.rs | 67 +++++++- illumos-utils/src/running_zone.rs | 7 +- package-manifest.toml | 56 +++++-- sled-agent/src/services.rs | 140 ++++++++++++----- smf/external-dns/manifest.xml | 12 +- smf/opte-interface-setup/manifest.xml | 46 ++++++ smf/zone-network-setup/manifest.xml | 2 +- zone-network-setup/src/bin/zone-networking.rs | 145 ++++++++++++++---- 10 files changed, 417 insertions(+), 91 deletions(-) create mode 100644 smf/opte-interface-setup/manifest.xml diff --git a/illumos-utils/src/ipadm.rs b/illumos-utils/src/ipadm.rs index f4d884d452..1c9e1e234e 100644 --- a/illumos-utils/src/ipadm.rs +++ b/illumos-utils/src/ipadm.rs @@ -107,4 +107,29 @@ impl Ipadm { }; Ok(()) } + + // Create gateway on the IP interface if it doesn't already exist + pub fn create_opte_gateway( + opte_iface: &String, + ) -> Result<(), ExecutionError> { + let addrobj = format!("{}/public", opte_iface); + let mut cmd = std::process::Command::new(PFEXEC); + let cmd = cmd.args(&[IPADM, "show-addr", &addrobj]); + match execute(cmd) { + Err(_) => { + let mut cmd = std::process::Command::new(PFEXEC); + let cmd = cmd.args(&[ + IPADM, + "create-addr", + "-t", + "-T", + "dhcp", + &addrobj, + ]); + execute(cmd)?; + } + Ok(_) => (), + }; + Ok(()) + } } diff --git a/illumos-utils/src/opte/port.rs b/illumos-utils/src/opte/port.rs index d06cdfe1ec..6fbb89c450 100644 --- a/illumos-utils/src/opte/port.rs +++ b/illumos-utils/src/opte/port.rs @@ -15,7 +15,7 @@ struct PortInner { // Name of the port as identified by OPTE name: String, // IP address within the VPC Subnet - _ip: IpAddr, + ip: IpAddr, // VPC-private MAC address mac: MacAddr6, // Emulated PCI slot for the guest NIC, passed to Propolis @@ -95,7 +95,7 @@ impl Port { Self { inner: Arc::new(PortInner { name, - _ip: ip, + ip, mac, slot, vni, @@ -105,6 +105,10 @@ impl Port { } } + pub fn ip(&self) -> &IpAddr { + &self.inner.ip + } + pub fn name(&self) -> &str { &self.inner.name } diff --git a/illumos-utils/src/route.rs b/illumos-utils/src/route.rs index 2b6af9a9fd..ceff2b3d9e 100644 --- a/illumos-utils/src/route.rs +++ b/illumos-utils/src/route.rs @@ -7,27 +7,76 @@ use crate::zone::ROUTE; use crate::{execute, inner, output_to_exec_error, ExecutionError, PFEXEC}; use libc::ESRCH; -use std::net::Ipv6Addr; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; /// Wraps commands for interacting with routing tables. pub struct Route {} +pub enum Gateway { + Ipv4(Ipv4Addr), + Ipv6(Ipv6Addr), +} + #[cfg_attr(any(test, feature = "testing"), mockall::automock)] impl Route { pub fn ensure_default_route_with_gateway( - gateway: &Ipv6Addr, + gateway: Gateway, ) -> Result<(), ExecutionError> { + let inet; + let gw; + match gateway { + Gateway::Ipv4(addr) => { + inet = "-inet"; + gw = addr.to_string(); + } + Gateway::Ipv6(addr) => { + inet = "-inet6"; + gw = addr.to_string(); + } + } // Add the desired route if it doesn't already exist let destination = "default"; let mut cmd = std::process::Command::new(PFEXEC); + let cmd = cmd.args(&[ROUTE, "-n", "get", inet, destination, inet, &gw]); + + let out = + cmd.output().map_err(|err| ExecutionError::ExecutionStart { + command: inner::to_string(cmd), + err, + })?; + match out.status.code() { + Some(0) => (), + // If the entry is not found in the table, + // the exit status of the command will be 3 (ESRCH). + // When that is the case, we'll add the route. + Some(ESRCH) => { + let mut cmd = std::process::Command::new(PFEXEC); + let cmd = + cmd.args(&[ROUTE, "add", inet, destination, inet, &gw]); + execute(cmd)?; + } + Some(_) | None => return Err(output_to_exec_error(cmd, &out)), + }; + Ok(()) + } + + pub fn ensure_opte_route( + gateway: &Ipv4Addr, + iface: &String, + opte_ip: &IpAddr, + ) -> Result<(), ExecutionError> { + // Add the desired route if it doesn't already exist + let mut cmd = std::process::Command::new(PFEXEC); let cmd = cmd.args(&[ ROUTE, "-n", "get", - "-inet6", - destination, - "-inet6", + "-host", &gateway.to_string(), + &opte_ip.to_string(), + "-interface", + "-ifp", + &iface.to_string(), ]); let out = @@ -45,10 +94,12 @@ impl Route { let cmd = cmd.args(&[ ROUTE, "add", - "-inet6", - destination, - "-inet6", + "-host", &gateway.to_string(), + &opte_ip.to_string(), + "-interface", + "-ifp", + &iface.to_string(), ]); execute(cmd)?; } diff --git a/illumos-utils/src/running_zone.rs b/illumos-utils/src/running_zone.rs index 4b4107f529..1c1df01980 100644 --- a/illumos-utils/src/running_zone.rs +++ b/illumos-utils/src/running_zone.rs @@ -888,7 +888,7 @@ impl RunningZone { /// Return references to the OPTE ports for this zone. pub fn opte_ports(&self) -> impl Iterator { - self.inner.opte_ports.iter().map(|(port, _)| port) + self.inner.opte_ports() } /// Remove the OPTE ports on this zone from the port manager. @@ -1130,6 +1130,11 @@ impl InstalledZone { path.push("root/var/svc/profile/site.xml"); path } + + /// Returns references to the OPTE ports for this zone. + pub fn opte_ports(&self) -> impl Iterator { + self.opte_ports.iter().map(|(port, _)| port) + } } #[derive(Clone)] diff --git a/package-manifest.toml b/package-manifest.toml index 1a749c5b61..c474a52736 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -120,7 +120,7 @@ setup_hint = """ service_name = "oximeter" only_for_targets.image = "standard" source.type = "composite" -source.packages = [ "oximeter-collector.tar.gz", "zone-network-setup.tar.gz" ] +source.packages = [ "oximeter-collector.tar.gz", "zone-network-setup.tar.gz", "zone-network-install.tar.gz" ] output.type = "zone" [package.oximeter-collector] @@ -140,7 +140,12 @@ output.intermediate_only = true service_name = "clickhouse" only_for_targets.image = "standard" source.type = "composite" -source.packages = [ "clickhouse_svc.tar.gz", "internal-dns-cli.tar.gz", "zone-network-setup.tar.gz" ] +source.packages = [ + "clickhouse_svc.tar.gz", + "internal-dns-cli.tar.gz", + "zone-network-setup.tar.gz", + "zone-network-install.tar.gz" +] output.type = "zone" [package.clickhouse_svc] @@ -161,7 +166,12 @@ setup_hint = "Run `./tools/ci_download_clickhouse` to download the necessary bin service_name = "clickhouse_keeper" only_for_targets.image = "standard" source.type = "composite" -source.packages = [ "clickhouse_keeper_svc.tar.gz", "internal-dns-cli.tar.gz", "zone-network-setup.tar.gz" ] +source.packages = [ + "clickhouse_keeper_svc.tar.gz", + "internal-dns-cli.tar.gz", + "zone-network-setup.tar.gz", + "zone-network-install.tar.gz" +] output.type = "zone" [package.clickhouse_keeper_svc] @@ -182,7 +192,12 @@ setup_hint = "Run `./tools/ci_download_clickhouse` to download the necessary bin service_name = "cockroachdb" only_for_targets.image = "standard" source.type = "composite" -source.packages = [ "cockroachdb-service.tar.gz", "internal-dns-cli.tar.gz", "zone-network-setup.tar.gz" ] +source.packages = [ + "cockroachdb-service.tar.gz", + "internal-dns-cli.tar.gz", + "zone-network-setup.tar.gz", + "zone-network-install.tar.gz" +] output.type = "zone" [package.cockroachdb-service] @@ -220,7 +235,13 @@ output.type = "zone" service_name = "external_dns" only_for_targets.image = "standard" source.type = "composite" -source.packages = [ "dns-server.tar.gz", "external-dns-customizations.tar.gz" ] +source.packages = [ + "dns-server.tar.gz", + "external-dns-customizations.tar.gz", + "zone-network-setup.tar.gz", + "zone-network-install.tar.gz", + "opte-interface-setup.tar.gz" +] output.type = "zone" [package.dns-server] @@ -393,7 +414,7 @@ output.intermediate_only = true service_name = "crucible" only_for_targets.image = "standard" source.type = "composite" -source.packages = [ "crucible.tar.gz", "zone-network-setup.tar.gz" ] +source.packages = [ "crucible.tar.gz", "zone-network-setup.tar.gz", "zone-network-install.tar.gz" ] output.type = "zone" @@ -401,7 +422,7 @@ output.type = "zone" service_name = "crucible_pantry" only_for_targets.image = "standard" source.type = "composite" -source.packages = [ "crucible-pantry.tar.gz", "zone-network-setup.tar.gz" ] +source.packages = [ "crucible-pantry.tar.gz", "zone-network-setup.tar.gz", "zone-network-install.tar.gz" ] output.type = "zone" # Packages not built within Omicron, but which must be imported. @@ -643,14 +664,31 @@ source.packages = [ ] output.type = "zone" -[package.zone-network-setup] +[package.zone-network-install] service_name = "zone-network-setup" only_for_targets.image = "standard" source.type = "local" +source.paths = [ + { from = "smf/zone-network-setup/manifest.xml", to = "/var/svc/manifest/site/zone-network-setup/manifest.xml" }, +] +output.type = "zone" +output.intermediate_only = true + +[package.zone-network-setup] +service_name = "zone-network-cli" +only_for_targets.image = "standard" +source.type = "local" source.rust.binary_names = ["zone-networking"] source.rust.release = true +output.type = "zone" +output.intermediate_only = true + +[package.opte-interface-setup] +service_name = "opte-interface-setup" +only_for_targets.image = "standard" +source.type = "local" source.paths = [ - { from = "smf/zone-network-setup/manifest.xml", to = "/var/svc/manifest/site/zone-network-setup/manifest.xml" }, + { from = "smf/opte-interface-setup/manifest.xml", to = "/var/svc/manifest/site/opte-interface-setup/manifest.xml" }, ] output.type = "zone" output.intermediate_only = true diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index bcd648cd2d..f3ddfdbf89 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -53,7 +53,8 @@ use illumos_utils::dladm::{ use illumos_utils::link::{Link, VnicAllocator}; use illumos_utils::opte::{DhcpCfg, Port, PortManager, PortTicket}; use illumos_utils::running_zone::{ - InstalledZone, RunCommandError, RunningZone, ZoneBuilderFactory, + EnsureAddressError, InstalledZone, RunCommandError, RunningZone, + ZoneBuilderFactory, }; use illumos_utils::zfs::ZONE_ZFS_RAMDISK_DATASET_MOUNTPOINT; use illumos_utils::zone::AddressRequest; @@ -68,6 +69,8 @@ use omicron_common::address::COCKROACH_PORT; use omicron_common::address::CRUCIBLE_PANTRY_PORT; use omicron_common::address::CRUCIBLE_PORT; use omicron_common::address::DENDRITE_PORT; +use omicron_common::address::DNS_HTTP_PORT; +use omicron_common::address::DNS_PORT; use omicron_common::address::MGS_PORT; use omicron_common::address::RACK_PREFIX; use omicron_common::address::SLED_PREFIX; @@ -1444,6 +1447,32 @@ impl ServiceManager { .add_instance(ServiceInstanceBuilder::new("default"))) } + fn opte_interface_set_up_install( + zone: &InstalledZone, + ) -> Result { + let port_idx = 0; + let port = zone.opte_ports().nth(port_idx).ok_or_else(|| { + Error::ZoneEnsureAddress(EnsureAddressError::MissingOptePort { + zone: String::from(zone.name()), + port_idx, + }) + })?; + + let opte_interface = port.vnic_name(); + let opte_gateway = port.gateway().ip().to_string(); + let opte_ip = port.ip().to_string(); + + let mut config_builder = PropertyGroupBuilder::new("config"); + config_builder = config_builder + .add_property("interface", "astring", opte_interface) + .add_property("gateway", "astring", &opte_gateway) + .add_property("ip", "astring", &opte_ip); + + Ok(ServiceBuilder::new("oxide/opte-interface-setup") + .add_property_group(config_builder) + .add_instance(ServiceInstanceBuilder::new("default"))) + } + async fn initialize_zone( &self, request: ZoneArgs<'_>, @@ -1841,6 +1870,73 @@ impl ServiceManager { })?; return Ok(RunningZone::boot(installed_zone).await?); } + ZoneArgs::Omicron(OmicronZoneConfigLocal { + zone: + OmicronZoneConfig { + zone_type: OmicronZoneType::ExternalDns { .. }, + underlay_address, + .. + }, + .. + }) => { + let Some(info) = self.inner.sled_info.get() else { + return Err(Error::SledAgentNotReady); + }; + + let static_addr = underlay_address.to_string(); + + let nw_setup_service = Self::zone_network_setup_install( + info, + &installed_zone, + &static_addr.clone(), + )?; + + // Like Nexus, we need to be reachable externally via + // `dns_address` but we don't listen on that address + // directly but instead on a VPC private IP. OPTE will + // en/decapsulate as appropriate. + let opte_interface_setup = + Self::opte_interface_set_up_install(&installed_zone)?; + + let port_idx = 0; + let port = installed_zone + .opte_ports() + .nth(port_idx) + .ok_or_else(|| { + Error::ZoneEnsureAddress( + EnsureAddressError::MissingOptePort { + zone: String::from(installed_zone.name()), + port_idx, + }, + ) + })?; + let opte_ip = port.ip(); + + let http_addr = format!("[{}]:{}", static_addr, DNS_HTTP_PORT); + let dns_addr = format!("{}:{}", opte_ip, DNS_PORT); + + let external_dns_config = PropertyGroupBuilder::new("config") + .add_property("http_address", "astring", &http_addr) + .add_property("dns_address", "astring", &dns_addr); + let external_dns_service = + ServiceBuilder::new("oxide/external_dns").add_instance( + ServiceInstanceBuilder::new("default") + .add_property_group(external_dns_config), + ); + + let profile = ProfileBuilder::new("omicron") + .add_service(nw_setup_service) + .add_service(opte_interface_setup) + .add_service(disabled_ssh_service) + .add_service(external_dns_service); + profile + .add_to_zone(&self.inner.log, &installed_zone) + .await + .map_err(|err| { + Error::io("Failed to setup External DNS profile", err) + })?; + return Ok(RunningZone::boot(installed_zone).await?); + } _ => {} } @@ -2081,47 +2177,6 @@ impl ServiceManager { .await .map_err(|err| Error::io_path(&config_path, err))?; } - - OmicronZoneType::ExternalDns { - http_address, - dns_address, - .. - } => { - info!( - self.inner.log, - "Setting up external-dns service" - ); - - // Like Nexus, we need to be reachable externally via - // `dns_address` but we don't listen on that address - // directly but instead on a VPC private IP. OPTE will - // en/decapsulate as appropriate. - let port_ip = running_zone - .ensure_address_for_port("public", 0) - .await? - .ip(); - let dns_address = - SocketAddr::new(port_ip, dns_address.port()); - - smfh.setprop( - "config/http_address", - format!( - "[{}]:{}", - http_address.ip(), - http_address.port(), - ), - )?; - smfh.setprop( - "config/dns_address", - dns_address.to_string(), - )?; - - // Refresh the manifest with the new properties we set, - // so they become "effective" properties when the - // service is enabled. - smfh.refresh()?; - } - OmicronZoneType::InternalDns { http_address, dns_address, @@ -2262,6 +2317,7 @@ impl ServiceManager { | OmicronZoneType::CockroachDb { .. } | OmicronZoneType::Crucible { .. } | OmicronZoneType::CruciblePantry { .. } + | OmicronZoneType::ExternalDns { .. } | OmicronZoneType::Oximeter { .. } => { panic!( "{} is a service which exists as part of a \ diff --git a/smf/external-dns/manifest.xml b/smf/external-dns/manifest.xml index 05c3c02b4e..1b6ee2cba0 100644 --- a/smf/external-dns/manifest.xml +++ b/smf/external-dns/manifest.xml @@ -4,13 +4,23 @@ - + + + + + + + + + diff --git a/smf/opte-interface-setup/manifest.xml b/smf/opte-interface-setup/manifest.xml new file mode 100644 index 0000000000..5b886c8a71 --- /dev/null +++ b/smf/opte-interface-setup/manifest.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/smf/zone-network-setup/manifest.xml b/smf/zone-network-setup/manifest.xml index 0776329749..a20ff949a4 100644 --- a/smf/zone-network-setup/manifest.xml +++ b/smf/zone-network-setup/manifest.xml @@ -18,7 +18,7 @@ diff --git a/zone-network-setup/src/bin/zone-networking.rs b/zone-network-setup/src/bin/zone-networking.rs index f3d18832c5..e36afd6b03 100644 --- a/zone-network-setup/src/bin/zone-networking.rs +++ b/zone-network-setup/src/bin/zone-networking.rs @@ -5,17 +5,31 @@ //! CLI to set up zone networking use anyhow::anyhow; -use clap::{arg, command}; +use clap::{arg, command, ArgMatches, Command}; use illumos_utils::ipadm::Ipadm; -use illumos_utils::route::Route; +use illumos_utils::route::{Gateway, Route}; use omicron_common::cmd::fatal; use omicron_common::cmd::CmdError; -use slog::info; +use slog::{info, Logger}; use std::fs; -use std::net::Ipv6Addr; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; pub const HOSTS_FILE: &str = "/etc/inet/hosts"; +fn parse_ip(s: &str) -> anyhow::Result { + if s == "unknown" { + return Err(anyhow!("ERROR: Missing input value")); + }; + s.parse().map_err(|_| anyhow!("ERROR: Invalid IP address")) +} + +fn parse_ipv4(s: &str) -> anyhow::Result { + if s == "unknown" { + return Err(anyhow!("ERROR: Missing input value")); + }; + s.parse().map_err(|_| anyhow!("ERROR: Invalid IPv4 address")) +} + fn parse_ipv6(s: &str) -> anyhow::Result { if s == "unknown" { return Err(anyhow!("ERROR: Missing input value")); @@ -30,6 +44,13 @@ fn parse_datalink(s: &str) -> anyhow::Result { s.parse().map_err(|_| anyhow!("ERROR: Invalid data link")) } +fn parse_opte_iface(s: &str) -> anyhow::Result { + if s == "unknown" { + return Err(anyhow!("ERROR: Missing OPTE interface")); + }; + s.parse().map_err(|_| anyhow!("ERROR: Invalid OPTE interface")) +} + #[tokio::main] async fn main() { if let Err(message) = do_run().await { @@ -47,34 +68,81 @@ async fn do_run() -> Result<(), CmdError> { .map_err(|err| CmdError::Failure(anyhow!(err)))?; let matches = command!() - .arg( - arg!( - -d --datalink "datalink" - ) - .required(true) - .value_parser(parse_datalink), - ) - .arg( - arg!( - -g --gateway "gateway" - ) - .required(true) - .value_parser(parse_ipv6), + .subcommand( + Command::new("set-up") + .about( + "Sets up common networking configuration across all zones", + ) + .arg( + arg!( + -d --datalink "datalink" + ) + .required(true) + .value_parser(parse_datalink), + ) + .arg( + arg!( + -g --gateway "gateway" + ) + .required(true) + .value_parser(parse_ipv6), + ) + .arg( + arg!( + -s --static_addr "static_addr" + ) + .required(true) + .value_parser(parse_ipv6), + ), ) - .arg( - arg!( - -s --static_addr "static_addr" - ) - .required(true) - .value_parser(parse_ipv6), + .subcommand( + Command::new("opte-interface-set-up") + .about("Sets up OPTE interface") + .arg( + arg!( + -i --opte_interface "opte_interface" + ) + .required(true) + .value_parser(parse_opte_iface), + ) + .arg( + arg!( + -g --opte_gateway "opte_gateway" + ) + .required(true) + .value_parser(parse_ipv4), + ) + .arg( + arg!( + -p --opte_ip "opte_ip" + ) + .required(true) + .value_parser(parse_ip), + ), ) .get_matches(); - let zonename = - zone::current().await.expect("Could not determine local zone name"); + if let Some(matches) = matches.subcommand_matches("set-up") { + set_up(matches, log.clone()).await?; + } + + if let Some(matches) = matches.subcommand_matches("opte-interface-set-up") { + opte_interface_set_up(matches, log.clone()).await?; + } + + Ok(()) +} + +async fn set_up(matches: &ArgMatches, log: Logger) -> Result<(), CmdError> { let datalink: &String = matches.get_one("datalink").unwrap(); let static_addr: &Ipv6Addr = matches.get_one("static_addr").unwrap(); - let gateway: &Ipv6Addr = matches.get_one("gateway").unwrap(); + let gateway: Ipv6Addr = *matches.get_one("gateway").unwrap(); + let zonename = zone::current().await.map_err(|err| { + CmdError::Failure(anyhow!( + "Could not determine local zone name: {}", + err + )) + })?; // TODO: remove when https://github.com/oxidecomputer/stlouis/issues/435 is // addressed @@ -91,7 +159,7 @@ async fn do_run() -> Result<(), CmdError> { .map_err(|err| CmdError::Failure(anyhow!(err)))?; info!(&log, "Ensuring there is a default route"; "gateway" => ?gateway); - Route::ensure_default_route_with_gateway(gateway) + Route::ensure_default_route_with_gateway(Gateway::Ipv6(gateway)) .map_err(|err| CmdError::Failure(anyhow!(err)))?; info!(&log, "Populating hosts file for zone"; "zonename" => ?zonename); @@ -109,3 +177,26 @@ async fn do_run() -> Result<(), CmdError> { Ok(()) } + +async fn opte_interface_set_up( + matches: &ArgMatches, + log: Logger, +) -> Result<(), CmdError> { + let interface: &String = matches.get_one("opte_interface").unwrap(); + let gateway: Ipv4Addr = *matches.get_one("opte_gateway").unwrap(); + let opte_ip: &IpAddr = matches.get_one("opte_ip").unwrap(); + + info!(&log, "Creating gateway on the OPTE IP interface if it doesn't already exist"; "OPTE interface" => ?interface); + Ipadm::create_opte_gateway(interface) + .map_err(|err| CmdError::Failure(anyhow!(err)))?; + + info!(&log, "Ensuring there is a gateway route"; "OPTE gateway" => ?gateway, "OPTE interface" => ?interface, "OPTE IP" => ?opte_ip); + Route::ensure_opte_route(&gateway, interface, &opte_ip) + .map_err(|err| CmdError::Failure(anyhow!(err)))?; + + info!(&log, "Ensuring there is a default route"; "gateway" => ?gateway); + Route::ensure_default_route_with_gateway(Gateway::Ipv4(gateway)) + .map_err(|err| CmdError::Failure(anyhow!(err)))?; + + Ok(()) +} From 65ebf72288b16467dc6457bb4c3bfa90830000db Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 23 Feb 2024 01:59:41 -0800 Subject: [PATCH 036/157] [test-utils] Give a little hint about running CockroachDB on test failures (#5125) This is a pattern I use a lot, but I want to help make it more visible to folks during test failures. Digging into the state of Cockroach after a test failure is a useful debugging tool, so I want to make it obvious "how to do that". --- test-utils/src/dev/db.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/test-utils/src/dev/db.rs b/test-utils/src/dev/db.rs index da6cc90b03..3c1b46b4c2 100644 --- a/test-utils/src/dev/db.rs +++ b/test-utils/src/dev/db.rs @@ -640,8 +640,13 @@ impl Drop for CockroachInstance { // Do NOT clean up the temporary directory in this case. let path = temp_dir.into_path(); eprintln!( - "WARN: temporary directory leaked: {}", - path.display() + "WARN: temporary directory leaked: {path:?}\n\ + \tIf you would like to access the database for debugging, run the following:\n\n\ + \t# Run the database\n\ + \tcargo run --bin omicron-dev db-run --no-populate --store-dir {data_path:?}\n\ + \t# Access the database. Note the port may change if you run multiple databases.\n\ + \tcockroach sql --host=localhost:32221 --insecure", + data_path = path.join("data"), ); } } From a07cae6485c3f7babbd4824b021c47601b99f92a Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Fri, 23 Feb 2024 14:55:47 -0500 Subject: [PATCH 037/157] Fix backwards counts in bad partition layout log message (#5129) --- sled-hardware/src/illumos/partitions.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sled-hardware/src/illumos/partitions.rs b/sled-hardware/src/illumos/partitions.rs index de62e25cfe..29b2466ad9 100644 --- a/sled-hardware/src/illumos/partitions.rs +++ b/sled-hardware/src/illumos/partitions.rs @@ -46,9 +46,8 @@ fn parse_partition_types( return Err(PooledDiskError::BadPartitionLayout { path: path.to_path_buf(), why: format!( - "Expected {} partitions, only saw {}", + "Expected {N} partitions, only saw {}", partitions.len(), - N ), }); } From a6ef7f9ed791893d1333f080b15ae4109e3b858d Mon Sep 17 00:00:00 2001 From: Rain Date: Fri, 23 Feb 2024 15:52:27 -0800 Subject: [PATCH 038/157] [nexus] add basic support for expunged sled policy and decommissioned sled state (#5032) This PR does a few things: * Migrates our current `sled_provision_state` to a new `sled_policy` enum, which has more information (per [RFD 457](https://rfd.shared.oxide.computer/rfd/0457)). This PR implements the expunged state, not the graceful removal state. * Adds a `sled_state` enum, which describes Nexus's view of the sled. This PR adds the `active` and `decommissioned` states. * Adds **internal** code to move around between valid states. * Makes the blueprint execution code aware of sleds eligible for discretionary services. * Adds tests for all of this new stuff, as well as valid and invalid state transitions -- and also makes sure that if we _do_ end up in an invalid state, things don't break down. Not done here, but in future PRs (to try and keep this PR a manageable size): * We'll add the endpoint to mark the sled as expunged (this is an irreversible operation and will need the appropriate warnings): https://github.com/oxidecomputer/omicron/issues/5134 * We'll add blueprint code to start removing sleds. * We'll also remove the sled `time_deleted` because it has a lifecycle too complicated to be described that way -- instead, we'll add a `time_decommissioned` field: https://github.com/oxidecomputer/omicron/issues/5131 --- Cargo.lock | 1 + nexus/blueprint-execution/src/dns.rs | 6 +- nexus/db-model/src/lib.rs | 6 +- nexus/db-model/src/schema.rs | 5 +- nexus/db-model/src/sled.rs | 31 +- nexus/db-model/src/sled_policy.rs | 68 ++ nexus/db-model/src/sled_provision_state.rs | 53 -- nexus/db-model/src/sled_state.rs | 59 ++ nexus/db-queries/Cargo.toml | 3 +- nexus/db-queries/src/authz/context.rs | 3 +- nexus/db-queries/src/authz/policy_test/mod.rs | 6 +- nexus/db-queries/src/context.rs | 9 +- .../db-queries/src/db/datastore/deployment.rs | 8 +- nexus/db-queries/src/db/datastore/disk.rs | 2 +- nexus/db-queries/src/db/datastore/dns.rs | 2 +- .../db-queries/src/db/datastore/inventory.rs | 2 +- nexus/db-queries/src/db/datastore/ip_pool.rs | 2 +- .../src/db/datastore/ipv4_nat_entry.rs | 2 +- nexus/db-queries/src/db/datastore/mod.rs | 434 +++++----- .../src/db/datastore/physical_disk.rs | 3 +- nexus/db-queries/src/db/datastore/rack.rs | 3 +- nexus/db-queries/src/db/datastore/sled.rs | 799 ++++++++++++++++-- .../src/db/datastore/switch_port.rs | 3 +- .../db-queries/src/db/datastore/test_utils.rs | 343 ++++++++ nexus/db-queries/src/db/datastore/volume.rs | 2 +- nexus/db-queries/src/db/datastore/vpc.rs | 2 +- nexus/db-queries/src/db/lookup.rs | 3 +- nexus/db-queries/src/db/pool_connection.rs | 3 +- .../db-queries/src/db/queries/external_ip.rs | 3 +- .../src/db/queries/network_interface.rs | 3 +- .../src/db/queries/region_allocation.rs | 9 +- nexus/db-queries/src/transaction_retry.rs | 2 +- nexus/deployment/src/blueprint_builder.rs | 38 +- nexus/deployment/src/planner.rs | 80 +- .../src/app/background/blueprint_execution.rs | 3 +- .../app/background/inventory_collection.rs | 2 +- nexus/src/app/deployment.rs | 3 +- nexus/src/app/sled.rs | 26 +- nexus/src/external_api/http_entrypoints.rs | 24 +- nexus/tests/integration_tests/endpoints.rs | 17 +- nexus/tests/integration_tests/schema.rs | 48 +- nexus/tests/output/nexus_tags.txt | 2 +- nexus/types/src/deployment.rs | 22 +- nexus/types/src/external_api/params.rs | 14 +- nexus/types/src/external_api/views.rs | 200 ++++- openapi/nexus.json | 110 ++- schema/crdb/37.0.0/up01.sql | 13 + schema/crdb/37.0.0/up02.sql | 22 + schema/crdb/37.0.0/up03.sql | 7 + schema/crdb/37.0.0/up04.sql | 14 + schema/crdb/37.0.1/up01.sql | 8 + schema/crdb/37.0.1/up02.sql | 1 + schema/crdb/dbinit.sql | 49 +- 53 files changed, 2115 insertions(+), 468 deletions(-) create mode 100644 nexus/db-model/src/sled_policy.rs delete mode 100644 nexus/db-model/src/sled_provision_state.rs create mode 100644 nexus/db-model/src/sled_state.rs create mode 100644 nexus/db-queries/src/db/datastore/test_utils.rs create mode 100644 schema/crdb/37.0.0/up01.sql create mode 100644 schema/crdb/37.0.0/up02.sql create mode 100644 schema/crdb/37.0.0/up03.sql create mode 100644 schema/crdb/37.0.0/up04.sql create mode 100644 schema/crdb/37.0.1/up01.sql create mode 100644 schema/crdb/37.0.1/up02.sql diff --git a/Cargo.lock b/Cargo.lock index e15afdfbab..85b7e5a186 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4405,6 +4405,7 @@ dependencies = [ "pem", "petgraph", "pq-sys", + "predicates", "pretty_assertions", "rand 0.8.5", "rcgen", diff --git a/nexus/blueprint-execution/src/dns.rs b/nexus/blueprint-execution/src/dns.rs index 6dd9266f32..54611e9f66 100644 --- a/nexus/blueprint-execution/src/dns.rs +++ b/nexus/blueprint-execution/src/dns.rs @@ -328,7 +328,8 @@ mod test { use nexus_types::deployment::Policy; use nexus_types::deployment::SledResources; use nexus_types::deployment::ZpoolName; - use nexus_types::external_api::views::SledProvisionState; + use nexus_types::external_api::views::SledPolicy; + use nexus_types::external_api::views::SledState; use nexus_types::internal_api::params::DnsConfigParams; use nexus_types::internal_api::params::DnsConfigZone; use nexus_types::internal_api::params::DnsRecord; @@ -409,7 +410,8 @@ mod test { .zip(possible_sled_subnets) .map(|(sled_id, subnet)| { let sled_resources = SledResources { - provision_state: SledProvisionState::Provisionable, + policy: SledPolicy::provisionable(), + state: SledState::Active, zpools: BTreeSet::from([ZpoolName::from_str(&format!( "oxp_{}", Uuid::new_v4() diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index ecbb8365fe..07c9f5eec6 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -73,9 +73,10 @@ mod silo_user; mod silo_user_password_hash; mod sled; mod sled_instance; -mod sled_provision_state; +mod sled_policy; mod sled_resource; mod sled_resource_kind; +mod sled_state; mod sled_underlay_subnet_allocation; mod snapshot; mod ssh_key; @@ -161,9 +162,10 @@ pub use silo_user::*; pub use silo_user_password_hash::*; pub use sled::*; pub use sled_instance::*; -pub use sled_provision_state::*; +pub use sled_policy::to_db_sled_policy; // Do not expose DbSledPolicy pub use sled_resource::*; pub use sled_resource_kind::*; +pub use sled_state::*; pub use sled_underlay_subnet_allocation::*; pub use snapshot::*; pub use ssh_key::*; diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 54755486e5..d8344c2258 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -13,7 +13,7 @@ use omicron_common::api::external::SemverVersion; /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(36, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(37, 0, 1); table! { disk (id) { @@ -824,7 +824,8 @@ table! { ip -> Inet, port -> Int4, last_used_address -> Inet, - provision_state -> crate::SledProvisionStateEnum, + sled_policy -> crate::sled_policy::SledPolicyEnum, + sled_state -> crate::SledStateEnum, } } diff --git a/nexus/db-model/src/sled.rs b/nexus/db-model/src/sled.rs index 52968c27d5..47912f89cc 100644 --- a/nexus/db-model/src/sled.rs +++ b/nexus/db-model/src/sled.rs @@ -2,10 +2,11 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use super::{ByteCount, Generation, SqlU16, SqlU32}; +use super::{ByteCount, Generation, SledState, SqlU16, SqlU32}; use crate::collection::DatastoreCollectionConfig; +use crate::ipv6; use crate::schema::{physical_disk, service, sled, zpool}; -use crate::{ipv6, SledProvisionState}; +use crate::sled_policy::DbSledPolicy; use chrono::{DateTime, Utc}; use db_macros::Asset; use nexus_types::{external_api::shared, external_api::views, identity::Asset}; @@ -60,7 +61,11 @@ pub struct Sled { /// The last IP address provided to a propolis instance on this sled pub last_used_address: ipv6::Ipv6Addr, - provision_state: SledProvisionState, + #[diesel(column_name = sled_policy)] + policy: DbSledPolicy, + + #[diesel(column_name = sled_state)] + state: SledState, } impl Sled { @@ -84,8 +89,15 @@ impl Sled { &self.serial_number } - pub fn provision_state(&self) -> SledProvisionState { - self.provision_state + /// The policy here is the `views::SledPolicy` because we expect external + /// users to always use that. + pub fn policy(&self) -> views::SledPolicy { + self.policy.into() + } + + /// Returns the sled's state. + pub fn state(&self) -> SledState { + self.state } } @@ -99,7 +111,8 @@ impl From for views::Sled { part: sled.part_number, revision: sled.revision, }, - provision_state: sled.provision_state.into(), + policy: sled.policy.into(), + state: sled.state.into(), usable_hardware_threads: sled.usable_hardware_threads.0, usable_physical_ram: *sled.usable_physical_ram, } @@ -197,8 +210,10 @@ impl SledUpdate { serial_number: self.serial_number, part_number: self.part_number, revision: self.revision, - // By default, sleds start as provisionable. - provision_state: SledProvisionState::Provisionable, + // By default, sleds start in-service. + policy: DbSledPolicy::InService, + // Currently, new sleds start in the "active" state. + state: SledState::Active, usable_hardware_threads: self.usable_hardware_threads, usable_physical_ram: self.usable_physical_ram, reservoir_size: self.reservoir_size, diff --git a/nexus/db-model/src/sled_policy.rs b/nexus/db-model/src/sled_policy.rs new file mode 100644 index 0000000000..13ecf10296 --- /dev/null +++ b/nexus/db-model/src/sled_policy.rs @@ -0,0 +1,68 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Database representation of a sled's operator-defined policy. +//! +//! This is related to, but different from `SledState`: a sled's **policy** is +//! its disposition as specified by the operator, while its **state** refers to +//! what's currently on it, as determined by Nexus. +//! +//! For example, a sled might be in the `Active` state, but have a policy of +//! `Expunged` -- this would mean that Nexus knows about resources currently +//! provisioned on the sled, but the operator has said that it should be marked +//! as gone. + +use super::impl_enum_type; +use nexus_types::external_api::views::{SledPolicy, SledProvisionPolicy}; +use serde::{Deserialize, Serialize}; + +impl_enum_type!( + #[derive(Clone, SqlType, Debug, QueryId)] + #[diesel(postgres_type(name = "sled_policy", schema = "public"))] + pub struct SledPolicyEnum; + + /// This type is not actually public, because [`SledPolicy`] has a somewhat + /// different, friendlier shape while being equivalent -- external code + /// should always use [`SledPolicy`]. + /// + /// However, it must be marked `pub` to avoid errors like `crate-private + /// type `DbSledPolicy` in public interface`. Marking this type `pub`, + /// without actually making it public, tricks rustc in a desirable way. + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq)] + #[diesel(sql_type = SledPolicyEnum)] + pub enum DbSledPolicy; + + // Enum values + InService => b"in_service" + NoProvision => b"no_provision" + Expunged => b"expunged" +); + +/// Converts a [`SledPolicy`] to a version that can be inserted into a +/// database. +pub fn to_db_sled_policy(policy: SledPolicy) -> DbSledPolicy { + match policy { + SledPolicy::InService { + provision_policy: SledProvisionPolicy::Provisionable, + } => DbSledPolicy::InService, + SledPolicy::InService { + provision_policy: SledProvisionPolicy::NonProvisionable, + } => DbSledPolicy::NoProvision, + SledPolicy::Expunged => DbSledPolicy::Expunged, + } +} + +impl From for SledPolicy { + fn from(policy: DbSledPolicy) -> Self { + match policy { + DbSledPolicy::InService => SledPolicy::InService { + provision_policy: SledProvisionPolicy::Provisionable, + }, + DbSledPolicy::NoProvision => SledPolicy::InService { + provision_policy: SledProvisionPolicy::NonProvisionable, + }, + DbSledPolicy::Expunged => SledPolicy::Expunged, + } + } +} diff --git a/nexus/db-model/src/sled_provision_state.rs b/nexus/db-model/src/sled_provision_state.rs deleted file mode 100644 index ada842a32f..0000000000 --- a/nexus/db-model/src/sled_provision_state.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -use super::impl_enum_type; -use nexus_types::external_api::views; -use serde::{Deserialize, Serialize}; -use thiserror::Error; - -impl_enum_type!( - #[derive(Clone, SqlType, Debug, QueryId)] - #[diesel(postgres_type(name = "sled_provision_state", schema = "public"))] - pub struct SledProvisionStateEnum; - - #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq)] - #[diesel(sql_type = SledProvisionStateEnum)] - pub enum SledProvisionState; - - // Enum values - Provisionable => b"provisionable" - NonProvisionable => b"non_provisionable" -); - -impl From for views::SledProvisionState { - fn from(state: SledProvisionState) -> Self { - match state { - SledProvisionState::Provisionable => { - views::SledProvisionState::Provisionable - } - SledProvisionState::NonProvisionable => { - views::SledProvisionState::NonProvisionable - } - } - } -} - -impl From for SledProvisionState { - fn from(state: views::SledProvisionState) -> Self { - match state { - views::SledProvisionState::Provisionable => { - SledProvisionState::Provisionable - } - views::SledProvisionState::NonProvisionable => { - SledProvisionState::NonProvisionable - } - } - } -} - -/// An unknown [`views::SledProvisionState`] was encountered. -#[derive(Clone, Debug, Error)] -#[error("Unknown SledProvisionState")] -pub struct UnknownSledProvisionState; diff --git a/nexus/db-model/src/sled_state.rs b/nexus/db-model/src/sled_state.rs new file mode 100644 index 0000000000..31029f3f4f --- /dev/null +++ b/nexus/db-model/src/sled_state.rs @@ -0,0 +1,59 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Database representation of a sled's state as understood by Nexus. +//! +//! This is related to, but different from `SledState`: a sled's **policy** is +//! its disposition as specified by the operator, while its **state** refers to +//! what's currently on it, as determined by Nexus. +//! +//! For example, a sled might be in the `Active` state, but have a policy of +//! `Expunged` -- this would mean that Nexus knows about resources currently +//! provisioned on the sled, but the operator has said that it should be marked +//! as gone. + +use super::impl_enum_type; +use nexus_types::external_api::views; +use serde::{Deserialize, Serialize}; +use std::fmt; +use strum::EnumIter; + +impl_enum_type!( + #[derive(Clone, SqlType, Debug, QueryId)] + #[diesel(postgres_type(name = "sled_state", schema = "public"))] + pub struct SledStateEnum; + + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq, Eq, EnumIter)] + #[diesel(sql_type = SledStateEnum)] + pub enum SledState; + + // Enum values + Active => b"active" + Decommissioned => b"decommissioned" +); + +impl fmt::Display for SledState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Forward to the canonical implementation in nexus-types. + views::SledState::from(*self).fmt(f) + } +} + +impl From for views::SledState { + fn from(state: SledState) -> Self { + match state { + SledState::Active => views::SledState::Active, + SledState::Decommissioned => views::SledState::Decommissioned, + } + } +} + +impl From for SledState { + fn from(state: views::SledState) -> Self { + match state { + views::SledState::Active => SledState::Active, + views::SledState::Decommissioned => SledState::Decommissioned, + } + } +} diff --git a/nexus/db-queries/Cargo.toml b/nexus/db-queries/Cargo.toml index 9c9a30799e..539a913476 100644 --- a/nexus/db-queries/Cargo.toml +++ b/nexus/db-queries/Cargo.toml @@ -43,6 +43,7 @@ sled-agent-client.workspace = true slog.workspace = true static_assertions.workspace = true steno.workspace = true +strum.workspace = true swrite.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["full"] } @@ -76,10 +77,10 @@ omicron-test-utils.workspace = true openapiv3.workspace = true pem.workspace = true petgraph.workspace = true +predicates.workspace = true pretty_assertions.workspace = true rcgen.workspace = true regex.workspace = true rustls.workspace = true -strum.workspace = true subprocess.workspace = true term.workspace = true diff --git a/nexus/db-queries/src/authz/context.rs b/nexus/db-queries/src/authz/context.rs index 3510da2735..0d6f2a73ac 100644 --- a/nexus/db-queries/src/authz/context.rs +++ b/nexus/db-queries/src/authz/context.rs @@ -217,7 +217,8 @@ mod test { let logctx = dev::test_setup_log("test_unregistered_resource"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = - crate::db::datastore::datastore_test(&logctx, &db).await; + crate::db::datastore::test_utils::datastore_test(&logctx, &db) + .await; // Define a resource that we "forget" to register with Oso. use super::AuthorizedResource; diff --git a/nexus/db-queries/src/authz/policy_test/mod.rs b/nexus/db-queries/src/authz/policy_test/mod.rs index 00a9904499..b6961bcc30 100644 --- a/nexus/db-queries/src/authz/policy_test/mod.rs +++ b/nexus/db-queries/src/authz/policy_test/mod.rs @@ -62,7 +62,8 @@ use uuid::Uuid; async fn test_iam_roles_behavior() { let logctx = dev::test_setup_log("test_iam_roles"); let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = db::datastore::datastore_test(&logctx, &db).await; + let (opctx, datastore) = + db::datastore::test_utils::datastore_test(&logctx, &db).await; // Before we can create the resources, users, and role assignments that we // need, we must grant the "test-privileged" user privileges to fetch and @@ -328,7 +329,8 @@ async fn test_conferred_roles() { // To start, this test looks a lot like the test above. let logctx = dev::test_setup_log("test_conferred_roles"); let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = db::datastore::datastore_test(&logctx, &db).await; + let (opctx, datastore) = + db::datastore::test_utils::datastore_test(&logctx, &db).await; // Before we can create the resources, users, and role assignments that we // need, we must grant the "test-privileged" user privileges to fetch and diff --git a/nexus/db-queries/src/context.rs b/nexus/db-queries/src/context.rs index aea4a60e66..dfd1fe4322 100644 --- a/nexus/db-queries/src/context.rs +++ b/nexus/db-queries/src/context.rs @@ -357,7 +357,8 @@ mod test { let logctx = dev::test_setup_log("test_background_context"); let mut db = test_setup_database(&logctx.log).await; let (_, datastore) = - crate::db::datastore::datastore_test(&logctx, &db).await; + crate::db::datastore::test_utils::datastore_test(&logctx, &db) + .await; let opctx = OpContext::for_background( logctx.log.new(o!()), Arc::new(authz::Authz::new(&logctx.log)), @@ -389,7 +390,8 @@ mod test { let logctx = dev::test_setup_log("test_background_context"); let mut db = test_setup_database(&logctx.log).await; let (_, datastore) = - crate::db::datastore::datastore_test(&logctx, &db).await; + crate::db::datastore::test_utils::datastore_test(&logctx, &db) + .await; let opctx = OpContext::for_tests(logctx.log.new(o!()), datastore); // Like in test_background_context(), this is essentially a test of the @@ -410,7 +412,8 @@ mod test { let logctx = dev::test_setup_log("test_child_context"); let mut db = test_setup_database(&logctx.log).await; let (_, datastore) = - crate::db::datastore::datastore_test(&logctx, &db).await; + crate::db::datastore::test_utils::datastore_test(&logctx, &db) + .await; let opctx = OpContext::for_background( logctx.log.new(o!()), Arc::new(authz::Authz::new(&logctx.log)), diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index d9df143022..00a75a21da 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -1054,14 +1054,15 @@ impl RunQueryDsl for InsertTargetQuery {} #[cfg(test)] mod tests { use super::*; - use crate::db::datastore::datastore_test; + use crate::db::datastore::test_utils::datastore_test; use nexus_deployment::blueprint_builder::BlueprintBuilder; use nexus_deployment::blueprint_builder::Ensure; use nexus_inventory::now_db_precision; use nexus_test_utils::db::test_setup_database; use nexus_types::deployment::Policy; use nexus_types::deployment::SledResources; - use nexus_types::external_api::views::SledProvisionState; + use nexus_types::external_api::views::SledPolicy; + use nexus_types::external_api::views::SledState; use nexus_types::inventory::Collection; use omicron_common::address::Ipv6Subnet; use omicron_common::api::external::Generation; @@ -1126,7 +1127,8 @@ mod tests { .collect(); let ip = ip.unwrap_or_else(|| thread_rng().gen::().into()); SledResources { - provision_state: SledProvisionState::Provisionable, + policy: SledPolicy::provisionable(), + state: SledState::Active, zpools, subnet: Ipv6Subnet::new(ip), } diff --git a/nexus/db-queries/src/db/datastore/disk.rs b/nexus/db-queries/src/db/datastore/disk.rs index 390376e627..2916573322 100644 --- a/nexus/db-queries/src/db/datastore/disk.rs +++ b/nexus/db-queries/src/db/datastore/disk.rs @@ -817,7 +817,7 @@ impl DataStore { mod tests { use super::*; - use crate::db::datastore::datastore_test; + use crate::db::datastore::test_utils::datastore_test; use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params; use omicron_common::api::external; diff --git a/nexus/db-queries/src/db/datastore/dns.rs b/nexus/db-queries/src/db/datastore/dns.rs index 180764d38c..b12df1875f 100644 --- a/nexus/db-queries/src/db/datastore/dns.rs +++ b/nexus/db-queries/src/db/datastore/dns.rs @@ -688,7 +688,7 @@ impl DnsVersionUpdateBuilder { #[cfg(test)] mod test { - use crate::db::datastore::datastore_test; + use crate::db::datastore::test_utils::datastore_test; use crate::db::datastore::DnsVersionUpdateBuilder; use crate::db::DataStore; use crate::db::TransactionError; diff --git a/nexus/db-queries/src/db/datastore/inventory.rs b/nexus/db-queries/src/db/datastore/inventory.rs index 6b737f21ac..3f2f4bd127 100644 --- a/nexus/db-queries/src/db/datastore/inventory.rs +++ b/nexus/db-queries/src/db/datastore/inventory.rs @@ -1914,8 +1914,8 @@ impl DataStoreInventoryTest for DataStore { #[cfg(test)] mod test { - use crate::db::datastore::datastore_test; use crate::db::datastore::inventory::DataStoreInventoryTest; + use crate::db::datastore::test_utils::datastore_test; use crate::db::datastore::DataStoreConnection; use crate::db::schema; use anyhow::Context; diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 4634fda9ee..4a37efd612 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -816,7 +816,7 @@ mod test { use std::num::NonZeroU32; use crate::authz; - use crate::db::datastore::datastore_test; + use crate::db::datastore::test_utils::datastore_test; use crate::db::model::{IpPool, IpPoolResource, IpPoolResourceType}; use assert_matches::assert_matches; use nexus_test_utils::db::test_setup_database; diff --git a/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs b/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs index 27a6bad32f..670ca08960 100644 --- a/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs +++ b/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs @@ -379,7 +379,7 @@ fn ipv4_nat_next_version() -> diesel::expression::SqlLiteral { mod test { use std::{net::Ipv4Addr, str::FromStr}; - use crate::db::datastore::datastore_test; + use crate::db::datastore::test_utils::datastore_test; use chrono::Utc; use nexus_db_model::{Ipv4NatEntry, Ipv4NatValues, MacAddr, Vni}; use nexus_test_utils::db::test_setup_database; diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 5f05aa1760..b5eff6cb85 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -44,6 +44,7 @@ use omicron_common::backoff::{ use omicron_common::nexus_config::SchemaConfig; use slog::Logger; use std::net::Ipv6Addr; +use std::num::NonZeroU32; use std::sync::Arc; use uuid::Uuid; @@ -87,6 +88,8 @@ mod ssh_key; mod switch; mod switch_interface; mod switch_port; +#[cfg(test)] +pub(crate) mod test_utils; mod update; mod utilization; mod virtual_provisioning_collection; @@ -105,7 +108,7 @@ pub use instance::InstanceAndActiveVmm; pub use inventory::DataStoreInventoryTest; pub use rack::RackInit; pub use silo::Discoverability; -use std::num::NonZeroU32; +pub use sled::SledUpsertOutput; pub use switch_port::SwitchPortSettingsCombinedResult; pub use virtual_provisioning_collection::StorageType; pub use volume::read_only_resources_associated_with_volume; @@ -352,52 +355,16 @@ pub enum UpdatePrecondition { Value(T), } -/// Constructs a DataStore for use in test suites that has preloaded the -/// built-in users, roles, and role assignments that are needed for basic -/// operation -#[cfg(test)] -pub async fn datastore_test( - logctx: &dropshot::test_util::LogContext, - db: &omicron_test_utils::dev::db::CockroachInstance, -) -> (OpContext, Arc) { - use crate::authn; - - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = Arc::new(db::Pool::new(&logctx.log, &cfg)); - let datastore = - Arc::new(DataStore::new(&logctx.log, pool, None).await.unwrap()); - - // Create an OpContext with the credentials of "db-init" just for the - // purpose of loading the built-in users, roles, and assignments. - let opctx = OpContext::for_background( - logctx.log.new(o!()), - Arc::new(authz::Authz::new(&logctx.log)), - authn::Context::internal_db_init(), - Arc::clone(&datastore), - ); - - // TODO: Can we just call "Populate" instead of doing this? - let rack_id = Uuid::parse_str(nexus_test_utils::RACK_UUID).unwrap(); - datastore.load_builtin_users(&opctx).await.unwrap(); - datastore.load_builtin_roles(&opctx).await.unwrap(); - datastore.load_builtin_role_asgns(&opctx).await.unwrap(); - datastore.load_builtin_silos(&opctx).await.unwrap(); - datastore.load_builtin_projects(&opctx).await.unwrap(); - datastore.load_builtin_vpcs(&opctx).await.unwrap(); - datastore.load_silo_users(&opctx).await.unwrap(); - datastore.load_silo_user_role_assignments(&opctx).await.unwrap(); - datastore - .load_builtin_fleet_virtual_provisioning_collection(&opctx) - .await - .unwrap(); - datastore.load_builtin_rack_data(&opctx, rack_id).await.unwrap(); - - // Create an OpContext with the credentials of "test-privileged" for general - // testing. - let opctx = - OpContext::for_tests(logctx.log.new(o!()), Arc::clone(&datastore)); - - (opctx, datastore) +/// Whether state transitions should be validated. "No" is only accessible in +/// test-only code. +/// +/// Intended only for testing around illegal states. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[must_use] +enum ValidateTransition { + Yes, + #[cfg(test)] + No, } #[cfg(test)] @@ -406,6 +373,10 @@ mod test { use crate::authn; use crate::authn::SiloAuthnPolicy; use crate::authz; + use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::test_utils::{ + IneligibleSledKind, IneligibleSleds, + }; use crate::db::explain::ExplainableAsync; use crate::db::fixed_data::silo::DEFAULT_SILO; use crate::db::fixed_data::silo::SILO_ID; @@ -414,8 +385,8 @@ mod test { use crate::db::model::{ BlockSize, ConsoleSession, Dataset, DatasetKind, ExternalIp, PhysicalDisk, PhysicalDiskKind, Project, Rack, Region, Service, - ServiceKind, SiloUser, SledBaseboard, SledProvisionState, - SledSystemHardware, SledUpdate, SshKey, VpcSubnet, Zpool, + ServiceKind, SiloUser, SledBaseboard, SledSystemHardware, SledUpdate, + SshKey, VpcSubnet, Zpool, }; use crate::db::queries::vpc_subnet::FilterConflictingVpcSubnetRangesQuery; use chrono::{Duration, Utc}; @@ -435,6 +406,7 @@ mod test { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddrV6}; use std::num::NonZeroU32; use std::sync::Arc; + use strum::EnumCount; use uuid::Uuid; // Creates a "fake" Sled Baseboard. @@ -648,39 +620,10 @@ mod test { sled_system_hardware_for_test(), rack_id, ); - datastore.sled_upsert(sled_update).await.unwrap(); + datastore.sled_upsert(sled_update).await.unwrap().unwrap(); sled_id } - // Marks a sled as non-provisionable. - async fn mark_sled_non_provisionable( - datastore: &DataStore, - opctx: &OpContext, - sled_id: Uuid, - ) { - let (authz_sled, sled) = LookupPath::new(opctx, datastore) - .sled_id(sled_id) - .fetch_for(authz::Action::Modify) - .await - .unwrap(); - println!("sled: {:?}", sled); - let old_state = datastore - .sled_set_provision_state( - &opctx, - &authz_sled, - SledProvisionState::NonProvisionable, - ) - .await - .unwrap_or_else(|error| { - panic!( - "error marking sled {sled_id} as non-provisionable: {error}" - ) - }); - // The old state should always be provisionable since that's where we - // start. - assert_eq!(old_state, SledProvisionState::Provisionable); - } - fn test_zpool_size() -> ByteCount { ByteCount::from_gibibytes_u32(100) } @@ -742,95 +685,184 @@ mod test { } } - struct TestDataset { - sled_id: Uuid, - dataset_id: Uuid, + #[derive(Debug)] + struct TestDatasets { + // eligible and ineligible aren't currently used, but are probably handy + // for the future. + #[allow(dead_code)] + eligible: SledToDatasetMap, + #[allow(dead_code)] + ineligible: SledToDatasetMap, + + // A map from eligible dataset IDs to their corresponding sled IDs. + eligible_dataset_ids: HashMap, + ineligible_dataset_ids: HashMap, } - async fn create_test_datasets_for_region_allocation( - opctx: &OpContext, - datastore: Arc, - number_of_sleds: usize, - ) -> Vec { - // Create sleds... - let sled_ids: Vec = stream::iter(0..number_of_sleds) - .then(|_| create_test_sled(&datastore)) - .collect() + // Map of sled IDs to dataset IDs. + type SledToDatasetMap = HashMap>; + + impl TestDatasets { + async fn create( + opctx: &OpContext, + datastore: Arc, + num_eligible_sleds: usize, + ) -> Self { + let eligible = + Self::create_impl(opctx, datastore.clone(), num_eligible_sleds) + .await; + + let eligible_dataset_ids = eligible + .iter() + .flat_map(|(sled_id, dataset_ids)| { + dataset_ids + .iter() + .map(move |dataset_id| (*dataset_id, *sled_id)) + }) + .collect(); + + let ineligible = Self::create_impl( + opctx, + datastore.clone(), + IneligibleSledKind::COUNT, + ) .await; - struct PhysicalDisk { - sled_id: Uuid, - disk_id: Uuid, - } + let mut ineligible_sled_ids = ineligible.keys(); - // create 9 disks on each sled - let physical_disks: Vec = stream::iter(sled_ids) - .map(|sled_id| { - let sled_id_iter: Vec = (0..9).map(|_| sled_id).collect(); - stream::iter(sled_id_iter).then(|sled_id| { - let disk_id_future = create_test_physical_disk( - &datastore, - opctx, - sled_id, - PhysicalDiskKind::U2, - ); - async move { - let disk_id = disk_id_future.await; - PhysicalDisk { sled_id, disk_id } - } - }) - }) - .flatten() - .collect() - .await; + // Set up the ineligible sleds. (We're guaranteed that + // IneligibleSledKind::COUNT is the same as the number of next() + // calls below.) + let ineligible_sleds = IneligibleSleds { + non_provisionable: *ineligible_sled_ids.next().unwrap(), + expunged: *ineligible_sled_ids.next().unwrap(), + decommissioned: *ineligible_sled_ids.next().unwrap(), + illegal_decommissioned: *ineligible_sled_ids.next().unwrap(), + }; - #[derive(Copy, Clone)] - struct Zpool { - sled_id: Uuid, - pool_id: Uuid, - } + eprintln!("Setting up ineligible sleds: {:?}", ineligible_sleds); - // 1 pool per disk - let zpools: Vec = stream::iter(physical_disks) - .then(|disk| { - let pool_id_future = - create_test_zpool(&datastore, disk.sled_id, disk.disk_id); - async move { - let pool_id = pool_id_future.await; - Zpool { sled_id: disk.sled_id, pool_id } + ineligible_sleds + .setup(opctx, &datastore) + .await + .expect("error setting up ineligible sleds"); + + // Build a map of dataset IDs to their ineligible kind. + let mut ineligible_dataset_ids = HashMap::new(); + for (kind, sled_id) in ineligible_sleds.iter() { + for dataset_id in ineligible.get(&sled_id).unwrap() { + ineligible_dataset_ids.insert(*dataset_id, kind); } - }) - .collect() - .await; + } - let bogus_addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 8080, 0, 0); + Self { + eligible, + eligible_dataset_ids, + ineligible, + ineligible_dataset_ids, + } + } - let datasets: Vec = stream::iter(zpools) - .map(|zpool| { - // 3 datasets per zpool, to test that pools are distinct - let zpool_iter: Vec = (0..3).map(|_| zpool).collect(); - stream::iter(zpool_iter).then(|zpool| { - let id = Uuid::new_v4(); - let dataset = Dataset::new( - id, - zpool.pool_id, - bogus_addr, - DatasetKind::Crucible, - ); + // Returns a map of sled ID to dataset IDs. + async fn create_impl( + opctx: &OpContext, + datastore: Arc, + number_of_sleds: usize, + ) -> SledToDatasetMap { + // Create sleds... + let sled_ids: Vec = stream::iter(0..number_of_sleds) + .then(|_| create_test_sled(&datastore)) + .collect() + .await; - let datastore = datastore.clone(); - async move { - datastore.dataset_upsert(dataset).await.unwrap(); + struct PhysicalDisk { + sled_id: Uuid, + disk_id: Uuid, + } - TestDataset { sled_id: zpool.sled_id, dataset_id: id } + // create 9 disks on each sled + let physical_disks: Vec = stream::iter(sled_ids) + .map(|sled_id| { + let sled_id_iter: Vec = + (0..9).map(|_| sled_id).collect(); + stream::iter(sled_id_iter).then(|sled_id| { + let disk_id_future = create_test_physical_disk( + &datastore, + opctx, + sled_id, + PhysicalDiskKind::U2, + ); + async move { + let disk_id = disk_id_future.await; + PhysicalDisk { sled_id, disk_id } + } + }) + }) + .flatten() + .collect() + .await; + + #[derive(Copy, Clone)] + struct Zpool { + sled_id: Uuid, + pool_id: Uuid, + } + + // 1 pool per disk + let zpools: Vec = stream::iter(physical_disks) + .then(|disk| { + let pool_id_future = create_test_zpool( + &datastore, + disk.sled_id, + disk.disk_id, + ); + async move { + let pool_id = pool_id_future.await; + Zpool { sled_id: disk.sled_id, pool_id } } }) - }) - .flatten() - .collect() - .await; + .collect() + .await; + + let bogus_addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 8080, 0, 0); + + let datasets = stream::iter(zpools) + .map(|zpool| { + // 3 datasets per zpool, to test that pools are distinct + let zpool_iter: Vec = + (0..3).map(|_| zpool).collect(); + stream::iter(zpool_iter).then(|zpool| { + let dataset_id = Uuid::new_v4(); + let dataset = Dataset::new( + dataset_id, + zpool.pool_id, + bogus_addr, + DatasetKind::Crucible, + ); + + let datastore = datastore.clone(); + async move { + datastore.dataset_upsert(dataset).await.unwrap(); + + (zpool.sled_id, dataset_id) + } + }) + }) + .flatten() + .fold( + SledToDatasetMap::new(), + |mut map, (sled_id, dataset_id)| { + // Build a map of sled ID to dataset IDs. + map.entry(sled_id) + .or_insert_with(Vec::new) + .push(dataset_id); + async move { map } + }, + ) + .await; - datasets + datasets + } } #[tokio::test] @@ -841,21 +873,12 @@ mod test { let logctx = dev::test_setup_log("test_region_allocation_strat_random"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - let test_datasets = create_test_datasets_for_region_allocation( + let test_datasets = TestDatasets::create( &opctx, datastore.clone(), - // Even though we're going to mark one sled as non-provisionable to - // test that logic, we aren't forcing the datasets to be on - // distinct sleds, so REGION_REDUNDANCY_THRESHOLD is enough. - REGION_REDUNDANCY_THRESHOLD, - ) - .await; - - let non_provisionable_dataset_id = test_datasets[0].dataset_id; - mark_sled_non_provisionable( - &datastore, - &opctx, - test_datasets[0].sled_id, + // We aren't forcing the datasets to be on distinct sleds, so we + // just need one eligible sled. + 1, ) .await; @@ -891,8 +914,16 @@ mod test { // Must be 3 unique datasets assert!(disk_datasets.insert(dataset.id())); - // Dataset must not be non-provisionable. - assert_ne!(dataset.id(), non_provisionable_dataset_id); + // Dataset must not be eligible for provisioning. + if let Some(kind) = + test_datasets.ineligible_dataset_ids.get(&dataset.id()) + { + panic!( + "Dataset {} was ineligible for provisioning: {:?}", + dataset.id(), + kind + ); + } // Must be 3 unique zpools assert!(disk_zpools.insert(dataset.pool_id)); @@ -923,31 +954,16 @@ mod test { let (opctx, datastore) = datastore_test(&logctx, &db).await; // Create a rack with enough sleds for a successful allocation when we - // require 3 distinct provisionable sleds. - let test_datasets = create_test_datasets_for_region_allocation( + // require 3 distinct eligible sleds. + let test_datasets = TestDatasets::create( &opctx, datastore.clone(), - // We're going to mark one sled as non-provisionable to test that - // logic, and we *are* forcing the datasets to be on distinct - // sleds: hence threshold + 1. - REGION_REDUNDANCY_THRESHOLD + 1, - ) - .await; - - let non_provisionable_dataset_id = test_datasets[0].dataset_id; - mark_sled_non_provisionable( - &datastore, - &opctx, - test_datasets[0].sled_id, + // We're forcing the datasets to be on distinct sleds, hence the + // full REGION_REDUNDANCY_THRESHOLD. + REGION_REDUNDANCY_THRESHOLD, ) .await; - // We need to check that our datasets end up on 3 distinct sleds, but the query doesn't return the sled ID, so we need to reverse map from dataset ID to sled ID - let sled_id_map: HashMap = test_datasets - .into_iter() - .map(|test_dataset| (test_dataset.dataset_id, test_dataset.sled_id)) - .collect(); - // Allocate regions from the datasets for this disk. Do it a few times // for good measure. for alloc_seed in 0..10 { @@ -980,14 +996,25 @@ mod test { // Must be 3 unique datasets assert!(disk_datasets.insert(dataset.id())); - // Dataset must not be non-provisionable. - assert_ne!(dataset.id(), non_provisionable_dataset_id); + // Dataset must not be eligible for provisioning. + if let Some(kind) = + test_datasets.ineligible_dataset_ids.get(&dataset.id()) + { + panic!( + "Dataset {} was ineligible for provisioning: {:?}", + dataset.id(), + kind + ); + } // Must be 3 unique zpools assert!(disk_zpools.insert(dataset.pool_id)); // Must be 3 unique sleds - let sled_id = sled_id_map.get(&dataset.id()).unwrap(); + let sled_id = test_datasets + .eligible_dataset_ids + .get(&dataset.id()) + .unwrap(); assert!(disk_sleds.insert(*sled_id)); assert_eq!(volume_id, region.volume_id()); @@ -1016,21 +1043,12 @@ mod test { // Create a rack without enough sleds for a successful allocation when // we require 3 distinct provisionable sleds. - let test_datasets = create_test_datasets_for_region_allocation( + TestDatasets::create( &opctx, datastore.clone(), - // Here, we need to have REGION_REDUNDANCY_THRESHOLD - 1 - // provisionable sleds to test this failure condition. We're going - // to mark one sled as non-provisionable to test that logic, so we - // need to add 1 to that number. - REGION_REDUNDANCY_THRESHOLD, - ) - .await; - - mark_sled_non_provisionable( - &datastore, - &opctx, - test_datasets[0].sled_id, + // Here, we need to have REGION_REDUNDANCY_THRESHOLD - 1 eligible + // sleds to test this failure condition. + REGION_REDUNDANCY_THRESHOLD - 1, ) .await; @@ -1075,7 +1093,7 @@ mod test { dev::test_setup_log("test_region_allocation_is_idempotent"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - create_test_datasets_for_region_allocation( + TestDatasets::create( &opctx, datastore.clone(), REGION_REDUNDANCY_THRESHOLD, @@ -1220,7 +1238,7 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - create_test_datasets_for_region_allocation( + TestDatasets::create( &opctx, datastore.clone(), REGION_REDUNDANCY_THRESHOLD, @@ -1321,7 +1339,7 @@ mod test { sled_system_hardware_for_test(), rack_id, ); - datastore.sled_upsert(sled1).await.unwrap(); + datastore.sled_upsert(sled1).await.unwrap().unwrap(); let addr2 = "[fd00:1df::1]:12345".parse().unwrap(); let sled2_id = "66285c18-0c79-43e0-e54f-95271f271314".parse().unwrap(); @@ -1332,7 +1350,7 @@ mod test { sled_system_hardware_for_test(), rack_id, ); - datastore.sled_upsert(sled2).await.unwrap(); + datastore.sled_upsert(sled2).await.unwrap().unwrap(); let ip = datastore.next_ipv6_address(&opctx, sled1_id).await.unwrap(); let expected_ip = Ipv6Addr::new(0xfd00, 0x1de, 0, 0, 0, 0, 1, 0); diff --git a/nexus/db-queries/src/db/datastore/physical_disk.rs b/nexus/db-queries/src/db/datastore/physical_disk.rs index ecb583ee29..d4e94745aa 100644 --- a/nexus/db-queries/src/db/datastore/physical_disk.rs +++ b/nexus/db-queries/src/db/datastore/physical_disk.rs @@ -137,10 +137,10 @@ impl DataStore { #[cfg(test)] mod test { use super::*; - use crate::db::datastore::datastore_test; use crate::db::datastore::test::{ sled_baseboard_for_test, sled_system_hardware_for_test, }; + use crate::db::datastore::test_utils::datastore_test; use crate::db::model::{PhysicalDiskKind, Sled, SledUpdate}; use dropshot::PaginationOrder; use nexus_test_utils::db::test_setup_database; @@ -163,6 +163,7 @@ mod test { db.sled_upsert(sled_update) .await .expect("Could not upsert sled during test prep") + .unwrap() } fn list_disk_params() -> DataPageParams<'static, Uuid> { diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 99ed71a073..46a3e0af2d 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -853,10 +853,10 @@ impl DataStore { #[cfg(test)] mod test { use super::*; - use crate::db::datastore::datastore_test; use crate::db::datastore::test::{ sled_baseboard_for_test, sled_system_hardware_for_test, }; + use crate::db::datastore::test_utils::datastore_test; use crate::db::datastore::Discoverability; use crate::db::lookup::LookupPath; use crate::db::model::ExternalIp; @@ -1069,6 +1069,7 @@ mod test { db.sled_upsert(sled_update) .await .expect("Could not upsert sled during test prep") + .unwrap() } // Hacky macro helper to: diff --git a/nexus/db-queries/src/db/datastore/sled.rs b/nexus/db-queries/src/db/datastore/sled.rs index eb50061272..8809f4d60d 100644 --- a/nexus/db-queries/src/db/datastore/sled.rs +++ b/nexus/db-queries/src/db/datastore/sled.rs @@ -9,18 +9,23 @@ use super::SQL_BATCH_SIZE; use crate::authz; use crate::context::OpContext; use crate::db; +use crate::db::datastore::ValidateTransition; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; +use crate::db::model::to_db_sled_policy; use crate::db::model::Sled; use crate::db::model::SledResource; +use crate::db::model::SledState; use crate::db::model::SledUpdate; use crate::db::pagination::paginated; use crate::db::pagination::Paginator; -use crate::db::update_and_check::UpdateAndCheck; +use crate::db::update_and_check::{UpdateAndCheck, UpdateStatus}; use crate::transaction_retry::OptionalError; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; +use nexus_types::external_api::views::SledPolicy; +use nexus_types::external_api::views::SledProvisionPolicy; use nexus_types::identity::Asset; use omicron_common::api::external; use omicron_common::api::external::CreateResult; @@ -28,16 +33,29 @@ use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::ResourceType; +use std::fmt; +use strum::IntoEnumIterator; +use thiserror::Error; use uuid::Uuid; impl DataStore { /// Stores a new sled in the database. + /// + /// Produces `SledUpsertOutput::Decommissioned` if the sled is + /// decommissioned. This is not an error, because `sled_upsert`'s only + /// caller (sled-agent) is not expected to receive this error. pub async fn sled_upsert( &self, sled_update: SledUpdate, - ) -> CreateResult { + ) -> CreateResult { use db::schema::sled::dsl; - diesel::insert_into(dsl::sled) + // required for conditional upsert + use diesel::query_dsl::methods::FilterDsl; + + // TODO: figure out what to do with time_deleted. We want to replace it + // with a time_decommissioned, most probably. + + let query = diesel::insert_into(dsl::sled) .values(sled_update.clone().into_insertable()) .on_conflict(dsl::id) .do_update() @@ -52,9 +70,13 @@ impl DataStore { dsl::usable_physical_ram.eq(sled_update.usable_physical_ram), dsl::reservoir_size.eq(sled_update.reservoir_size), )) - .returning(Sled::as_returning()) + .filter(dsl::sled_state.ne(SledState::Decommissioned)) + .returning(Sled::as_returning()); + + let sled: Option = query .get_result_async(&*self.pool_connection_unauthorized().await?) .await + .optional() .map_err(|e| { public_error_from_diesel( e, @@ -63,7 +85,19 @@ impl DataStore { &sled_update.id().to_string(), ), ) - }) + })?; + + // The only situation in which a sled is not returned is if the + // `.filter(dsl::sled_state.ne(SledState::Decommissioned))` is not + // satisfied. + // + // If we want to return a sled even if it's decommissioned here, we may + // have to do something more complex. See + // https://stackoverflow.com/q/34708509. + match sled { + Some(sled) => Ok(SledUpsertOutput::Updated(sled)), + None => Ok(SledUpsertOutput::Decommissioned), + } } pub async fn sled_list( @@ -194,10 +228,11 @@ impl DataStore { .and(sled_has_space_in_reservoir), ) .filter(sled_dsl::time_deleted.is_null()) - // Filter out sleds that are not provisionable. - .filter(sled_dsl::provision_state.eq( - db::model::SledProvisionState::Provisionable, + // Ensure that the sled is in-service and active. + .filter(sled_dsl::sled_policy.eq( + to_db_sled_policy(SledPolicy::provisionable()), )) + .filter(sled_dsl::sled_state.eq(SledState::Active)) .select(sled_dsl::id) .into_boxed(); @@ -269,15 +304,61 @@ impl DataStore { Ok(()) } - /// Sets the provision state for this sled. + /// Sets the provision policy for this sled. + /// + /// Errors if the sled is not in service. + /// + /// Returns the previous policy. + pub async fn sled_set_provision_policy( + &self, + opctx: &OpContext, + authz_sled: &authz::Sled, + policy: SledProvisionPolicy, + ) -> Result { + match self + .sled_set_policy_impl( + opctx, + authz_sled, + SledPolicy::InService { provision_policy: policy }, + ValidateTransition::Yes, + ) + .await + { + Ok(old_policy) => Ok(old_policy + .provision_policy() + .expect("only valid policy was in-service")), + Err(error) => Err(error.into_external_error()), + } + } + + /// Marks a sled as expunged, as directed by the operator. /// - /// Returns the previous state. - pub async fn sled_set_provision_state( + /// This is an irreversible process! It should only be called after + /// sufficient warning to the operator. + /// + /// This is idempotent, and it returns the old policy of the sled. + pub async fn sled_set_policy_to_expunged( &self, opctx: &OpContext, authz_sled: &authz::Sled, - state: db::model::SledProvisionState, - ) -> Result { + ) -> Result { + self.sled_set_policy_impl( + opctx, + authz_sled, + SledPolicy::Expunged, + ValidateTransition::Yes, + ) + .await + .map_err(|error| error.into_external_error()) + } + + pub(super) async fn sled_set_policy_impl( + &self, + opctx: &OpContext, + authz_sled: &authz::Sled, + new_policy: SledPolicy, + check: ValidateTransition, + ) -> Result { use db::schema::sled::dsl; opctx.authorize(authz::Action::Modify, authz_sled).await?; @@ -285,38 +366,350 @@ impl DataStore { let sled_id = authz_sled.id(); let query = diesel::update(dsl::sled) .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(sled_id)) - .filter(dsl::provision_state.ne(state)) + .filter(dsl::id.eq(sled_id)); + + let t = SledTransition::Policy(new_policy); + let valid_old_policies = t.valid_old_policies(); + let valid_old_states = t.valid_old_states(); + + let query = match check { + ValidateTransition::Yes => query + .filter(dsl::sled_policy.eq_any( + valid_old_policies.into_iter().map(to_db_sled_policy), + )) + .filter( + dsl::sled_state.eq_any(valid_old_states.iter().copied()), + ) + .into_boxed(), + #[cfg(test)] + ValidateTransition::No => query.into_boxed(), + }; + + let query = query .set(( - dsl::provision_state.eq(state), + dsl::sled_policy.eq(to_db_sled_policy(new_policy)), dsl::time_modified.eq(Utc::now()), )) .check_if_exists::(sled_id); + let result = query .execute_and_check(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; - Ok(result.found.provision_state()) + match (check, result.status) { + (ValidateTransition::Yes, UpdateStatus::Updated) => { + Ok(result.found.policy()) + } + (ValidateTransition::Yes, UpdateStatus::NotUpdatedButExists) => { + // Two reasons this can happen: + // 1. An idempotent update: this is treated as a success. + // 2. Invalid state transition: a failure. + // + // To differentiate between the two, check that the new policy + // is the same as the old policy, and that the old state is + // valid. + if result.found.policy() == new_policy + && valid_old_states.contains(&result.found.state()) + { + Ok(result.found.policy()) + } else { + Err(TransitionError::InvalidTransition { + current: result.found, + transition: SledTransition::Policy(new_policy), + }) + } + } + #[cfg(test)] + (ValidateTransition::No, _) => Ok(result.found.policy()), + } + } + + /// Marks the state of the sled as decommissioned, as believed by Nexus. + /// + /// This is an irreversible process! It should only be called after all + /// resources previously on the sled have been migrated over. + /// + /// This is idempotent, and it returns the old state of the sled. + /// + /// # Errors + /// + /// This method returns an error if the sled policy is not a state that is + /// valid to decommission from (i.e. if, for the current sled policy, + /// [`SledPolicy::is_decommissionable`] returns `false`). + pub async fn sled_set_state_to_decommissioned( + &self, + opctx: &OpContext, + authz_sled: &authz::Sled, + ) -> Result { + self.sled_set_state_impl( + opctx, + authz_sled, + SledState::Decommissioned, + ValidateTransition::Yes, + ) + .await + .map_err(|error| error.into_external_error()) + } + + pub(super) async fn sled_set_state_impl( + &self, + opctx: &OpContext, + authz_sled: &authz::Sled, + new_state: SledState, + check: ValidateTransition, + ) -> Result { + use db::schema::sled::dsl; + + opctx.authorize(authz::Action::Modify, authz_sled).await?; + + let sled_id = authz_sled.id(); + let query = diesel::update(dsl::sled) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(sled_id)); + + let t = SledTransition::State(new_state); + let valid_old_policies = t.valid_old_policies(); + let valid_old_states = t.valid_old_states(); + + let query = match check { + ValidateTransition::Yes => query + .filter(dsl::sled_policy.eq_any( + valid_old_policies.iter().copied().map(to_db_sled_policy), + )) + .filter(dsl::sled_state.eq_any(valid_old_states)) + .into_boxed(), + #[cfg(test)] + ValidateTransition::No => query.into_boxed(), + }; + + let query = query + .set(( + dsl::sled_state.eq(new_state), + dsl::time_modified.eq(Utc::now()), + )) + .check_if_exists::(sled_id); + + let result = query + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + match (check, result.status) { + (ValidateTransition::Yes, UpdateStatus::Updated) => { + Ok(result.found.state()) + } + (ValidateTransition::Yes, UpdateStatus::NotUpdatedButExists) => { + // Two reasons this can happen: + // 1. An idempotent update: this is treated as a success. + // 2. Invalid state transition: a failure. + // + // To differentiate between the two, check that the new state + // is the same as the old state, and the found policy is valid. + if result.found.state() == new_state + && valid_old_policies.contains(&result.found.policy()) + { + Ok(result.found.state()) + } else { + Err(TransitionError::InvalidTransition { + current: result.found, + transition: SledTransition::State(new_state), + }) + } + } + #[cfg(test)] + (ValidateTransition::No, _) => Ok(result.found.state()), + } + } +} + +/// The result of [`DataStore::sled_upsert`]. +#[derive(Clone, Debug)] +#[must_use] +pub enum SledUpsertOutput { + /// The sled was updated. + Updated(Sled), + /// The sled was not updated because it is decommissioned. + Decommissioned, +} + +impl SledUpsertOutput { + /// Returns the sled if it was updated, or panics if it was not. + pub fn unwrap(self) -> Sled { + match self { + SledUpsertOutput::Updated(sled) => sled, + SledUpsertOutput::Decommissioned => { + panic!("sled was decommissioned, not updated") + } + } + } +} + +// --- +// State transition validators +// --- + +// The functions in this section return the old policies or states that are +// valid for a new policy or state, except idempotent transitions. + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(super) enum SledTransition { + Policy(SledPolicy), + State(SledState), +} + +impl SledTransition { + /// Returns the list of valid old policies, other than the provided one + /// (which is always considered valid). + /// + /// For a more descriptive listing of valid transitions, see + /// [`test_sled_transitions`]. + fn valid_old_policies(&self) -> Vec { + use SledPolicy::*; + use SledProvisionPolicy::*; + use SledState::*; + + match self { + SledTransition::Policy(new_policy) => match new_policy { + InService { provision_policy: Provisionable } => { + vec![InService { provision_policy: NonProvisionable }] + } + InService { provision_policy: NonProvisionable } => { + vec![InService { provision_policy: Provisionable }] + } + Expunged => SledProvisionPolicy::iter() + .map(|provision_policy| InService { provision_policy }) + .collect(), + }, + SledTransition::State(state) => { + match state { + Active => { + // Any policy is valid for the active state. + SledPolicy::iter().collect() + } + Decommissioned => { + SledPolicy::all_decommissionable().to_vec() + } + } + } + } + } + + /// Returns the list of valid old states, other than the provided one + /// (which is always considered valid). + /// + /// For a more descriptive listing of valid transitions, see + /// [`test_sled_transitions`]. + fn valid_old_states(&self) -> Vec { + use SledState::*; + + match self { + SledTransition::Policy(_) => { + // Policies can only be transitioned in the active state. (In + // the future, this will include other non-decommissioned + // states.) + vec![Active] + } + SledTransition::State(state) => match state { + Active => vec![], + Decommissioned => vec![Active], + }, + } + } +} + +impl fmt::Display for SledTransition { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SledTransition::Policy(policy) => { + write!(f, "policy \"{}\"", policy) + } + SledTransition::State(state) => write!(f, "state \"{}\"", state), + } + } +} + +impl IntoEnumIterator for SledTransition { + type Iterator = std::vec::IntoIter; + + fn iter() -> Self::Iterator { + let v: Vec<_> = SledPolicy::iter() + .map(SledTransition::Policy) + .chain(SledState::iter().map(SledTransition::State)) + .collect(); + v.into_iter() + } +} + +/// An error that occurred while setting a policy or state. +#[derive(Debug, Error)] +#[must_use] +pub(super) enum TransitionError { + /// The state transition check failed. + /// + /// The sled is returned. + #[error( + "sled id {} has current policy \"{}\" and state \"{}\" \ + and the transition to {} is not permitted", + .current.id(), + .current.policy(), + .current.state(), + .transition, + )] + InvalidTransition { + /// The current sled as fetched from the database. + current: Sled, + + /// The new policy or state that was attempted. + transition: SledTransition, + }, + + /// Some other kind of error occurred. + #[error("database error")] + External(#[from] external::Error), +} + +impl TransitionError { + fn into_external_error(self) -> external::Error { + match self { + TransitionError::InvalidTransition { .. } => { + external::Error::conflict(self.to_string()) + } + TransitionError::External(e) => e.clone(), + } + } + + #[cfg(test)] + pub(super) fn ensure_invalid_transition(self) -> anyhow::Result<()> { + match self { + TransitionError::InvalidTransition { .. } => Ok(()), + TransitionError::External(e) => Err(anyhow::anyhow!(e) + .context("expected invalid transition, got other error")), + } } } #[cfg(test)] mod test { use super::*; - use crate::db::datastore::datastore_test; use crate::db::datastore::test::{ sled_baseboard_for_test, sled_system_hardware_for_test, }; + use crate::db::datastore::test_utils::{ + datastore_test, sled_set_policy, sled_set_state, Expected, + IneligibleSleds, + }; use crate::db::lookup::LookupPath; use crate::db::model::ByteCount; use crate::db::model::SqlU32; + use anyhow::{Context, Result}; + use itertools::Itertools; use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Asset; use omicron_common::api::external; use omicron_test_utils::dev; + use predicates::{prelude::*, BoxPredicate}; use std::net::{Ipv6Addr, SocketAddrV6}; - use std::num::NonZeroU32; fn rack_id() -> Uuid { Uuid::parse_str(nexus_test_utils::RACK_UUID).unwrap() @@ -324,13 +717,13 @@ mod test { #[tokio::test] async fn upsert_sled_updates_hardware() { - let logctx = dev::test_setup_log("upsert_sled"); + let logctx = dev::test_setup_log("upsert_sled_updates_hardware"); let mut db = test_setup_database(&logctx.log).await; let (_opctx, datastore) = datastore_test(&logctx, &db).await; let mut sled_update = test_new_sled_update(); let observed_sled = - datastore.sled_upsert(sled_update.clone()).await.unwrap(); + datastore.sled_upsert(sled_update.clone()).await.unwrap().unwrap(); assert_eq!( observed_sled.usable_hardware_threads, sled_update.usable_hardware_threads @@ -362,7 +755,8 @@ mod test { let observed_sled = datastore .sled_upsert(sled_update.clone()) .await - .expect("Could not upsert sled during test prep"); + .expect("Could not upsert sled during test prep") + .unwrap(); assert_eq!( observed_sled.usable_hardware_threads, sled_update.usable_hardware_threads @@ -377,37 +771,128 @@ mod test { logctx.cleanup_successful(); } - /// Test that new reservations aren't created on non-provisionable sleds. #[tokio::test] - async fn sled_reservation_create_non_provisionable() { + async fn upsert_sled_doesnt_update_decommissioned() { let logctx = - dev::test_setup_log("sled_reservation_create_non_provisionable"); + dev::test_setup_log("upsert_sled_doesnt_update_decommissioned"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - let sled_update = test_new_sled_update(); - let non_provisionable_sled = - datastore.sled_upsert(sled_update.clone()).await.unwrap(); + let mut sled_update = test_new_sled_update(); + let observed_sled = + datastore.sled_upsert(sled_update.clone()).await.unwrap().unwrap(); + assert_eq!( + observed_sled.usable_hardware_threads, + sled_update.usable_hardware_threads + ); + assert_eq!( + observed_sled.usable_physical_ram, + sled_update.usable_physical_ram + ); + assert_eq!(observed_sled.reservoir_size, sled_update.reservoir_size); - let (authz_sled, _) = LookupPath::new(&opctx, &datastore) - .sled_id(non_provisionable_sled.id()) - .fetch_for(authz::Action::Modify) - .await - .unwrap(); + // Set the sled to decommissioned (this is not a legal transition, but + // we don't care about sled policy in sled_upsert, just the state.) + sled_set_state( + &opctx, + &datastore, + observed_sled.id(), + SledState::Decommissioned, + ValidateTransition::No, + Expected::Ok(SledState::Active), + ) + .await + .unwrap(); - let old_state = datastore - .sled_set_provision_state( - &opctx, - &authz_sled, - db::model::SledProvisionState::NonProvisionable, + // Modify the sizes of hardware + sled_update.usable_hardware_threads = + SqlU32::new(sled_update.usable_hardware_threads.0 + 1); + const MIB: u64 = 1024 * 1024; + sled_update.usable_physical_ram = ByteCount::from( + external::ByteCount::try_from( + sled_update.usable_physical_ram.0.to_bytes() + MIB, ) + .unwrap(), + ); + sled_update.reservoir_size = ByteCount::from( + external::ByteCount::try_from( + sled_update.reservoir_size.0.to_bytes() + MIB, + ) + .unwrap(), + ); + + // Upserting the sled should produce the `Decommisioned` variant. + let sled = datastore + .sled_upsert(sled_update.clone()) + .await + .expect("updating a decommissioned sled should succeed"); + assert!( + matches!(sled, SledUpsertOutput::Decommissioned), + "sled should be decommissioned" + ); + + // The sled should not have been updated. + let (_, observed_sled_2) = LookupPath::new(&opctx, &datastore) + .sled_id(observed_sled.id()) + .fetch_for(authz::Action::Modify) .await .unwrap(); assert_eq!( - old_state, - db::model::SledProvisionState::Provisionable, - "a newly created sled starts as provisionable" + observed_sled_2.usable_hardware_threads, + observed_sled.usable_hardware_threads, + "usable_hardware_threads should not have changed" ); + assert_eq!( + observed_sled_2.usable_physical_ram, + observed_sled.usable_physical_ram, + "usable_physical_ram should not have changed" + ); + assert_eq!( + observed_sled_2.reservoir_size, observed_sled.reservoir_size, + "reservoir_size should not have changed" + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + /// Test that new reservations aren't created on non-provisionable sleds. + #[tokio::test] + async fn sled_reservation_create_non_provisionable() { + let logctx = + dev::test_setup_log("sled_reservation_create_non_provisionable"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // Define some sleds that resources cannot be provisioned on. + let non_provisionable_sled = datastore + .sled_upsert(test_new_sled_update()) + .await + .unwrap() + .unwrap(); + let expunged_sled = datastore + .sled_upsert(test_new_sled_update()) + .await + .unwrap() + .unwrap(); + let decommissioned_sled = datastore + .sled_upsert(test_new_sled_update()) + .await + .unwrap() + .unwrap(); + let illegal_decommissioned_sled = datastore + .sled_upsert(test_new_sled_update()) + .await + .unwrap() + .unwrap(); + + let ineligible_sleds = IneligibleSleds { + non_provisionable: non_provisionable_sled.id(), + expunged: expunged_sled.id(), + decommissioned: decommissioned_sled.id(), + illegal_decommissioned: illegal_decommissioned_sled.id(), + }; + ineligible_sleds.setup(&opctx, &datastore).await.unwrap(); // This should be an error since there are no provisionable sleds. let resources = db::model::Resources::new( @@ -432,13 +917,7 @@ mod test { // Now add a provisionable sled and try again. let sled_update = test_new_sled_update(); let provisionable_sled = - datastore.sled_upsert(sled_update.clone()).await.unwrap(); - - let sleds = datastore - .sled_list(&opctx, &first_page(NonZeroU32::new(10).unwrap())) - .await - .unwrap(); - println!("sleds: {:?}", sleds); + datastore.sled_upsert(sled_update.clone()).await.unwrap().unwrap(); // Try a few times to ensure that resources never get allocated to the // non-provisionable sled. @@ -470,6 +949,212 @@ mod test { logctx.cleanup_successful(); } + #[tokio::test] + async fn test_sled_transitions() { + // Test valid and invalid state and policy transitions. + let logctx = dev::test_setup_log("test_sled_transitions"); + let mut db = test_setup_database(&logctx.log).await; + + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // This test generates all possible sets of transitions. Below, we list + // the before and after predicates for valid transitions. + // + // While it's possible to derive the list of valid transitions from the + // [`Transition::valid_old_policies`] and + // [`Transition::valid_old_states`] methods, we list them here + // explicitly since tests are really about writing things down twice. + let valid_transitions = [ + ( + // In-service and active sleds can be marked as expunged. + Before::new( + predicate::in_iter(SledPolicy::all_in_service()), + predicate::eq(SledState::Active), + ), + SledTransition::Policy(SledPolicy::Expunged), + ), + ( + // The provision policy of in-service sleds can be changed, or + // kept the same (1 of 2). + Before::new( + predicate::in_iter(SledPolicy::all_in_service()), + predicate::eq(SledState::Active), + ), + SledTransition::Policy(SledPolicy::InService { + provision_policy: SledProvisionPolicy::Provisionable, + }), + ), + ( + // (2 of 2) + Before::new( + predicate::in_iter(SledPolicy::all_in_service()), + predicate::eq(SledState::Active), + ), + SledTransition::Policy(SledPolicy::InService { + provision_policy: SledProvisionPolicy::NonProvisionable, + }), + ), + ( + // Active sleds can be marked as active, regardless of their + // policy. + Before::new( + predicate::always(), + predicate::eq(SledState::Active), + ), + SledTransition::State(SledState::Active), + ), + ( + // Expunged sleds can be marked as decommissioned. + Before::new( + predicate::eq(SledPolicy::Expunged), + predicate::eq(SledState::Active), + ), + SledTransition::State(SledState::Decommissioned), + ), + ( + // Expunged sleds can always be marked as expunged again, as + // long as they aren't already decommissioned (we do not allow + // any transitions once a sled is decommissioned). + Before::new( + predicate::eq(SledPolicy::Expunged), + predicate::ne(SledState::Decommissioned), + ), + SledTransition::Policy(SledPolicy::Expunged), + ), + ( + // Decommissioned sleds can always be marked as decommissioned + // again, as long as their policy is decommissionable. + Before::new( + predicate::in_iter(SledPolicy::all_decommissionable()), + predicate::eq(SledState::Decommissioned), + ), + SledTransition::State(SledState::Decommissioned), + ), + ]; + + // Generate all possible transitions. + let all_transitions = SledPolicy::iter() + .cartesian_product(SledState::iter()) + .cartesian_product(SledTransition::iter()) + .enumerate(); + + // Set up a sled to test against. + let sled = datastore + .sled_upsert(test_new_sled_update()) + .await + .unwrap() + .unwrap(); + let sled_id = sled.id(); + + for (i, ((policy, state), after)) in all_transitions { + test_sled_state_transitions_once( + &opctx, + &datastore, + sled_id, + policy, + state, + after, + &valid_transitions, + ) + .await + .with_context(|| { + format!( + "failed on transition {i} (policy: {policy}, \ + state: {state:?}, after: {after:?})", + ) + }) + .unwrap(); + } + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + async fn test_sled_state_transitions_once( + opctx: &OpContext, + datastore: &DataStore, + sled_id: Uuid, + before_policy: SledPolicy, + before_state: SledState, + after: SledTransition, + valid_transitions: &[(Before, SledTransition)], + ) -> Result<()> { + // Is this a valid transition? + let is_valid = valid_transitions.iter().any( + |(Before(valid_policy, valid_state), valid_after)| { + valid_policy.eval(&before_policy) + && valid_state.eval(&before_state) + && valid_after == &after + }, + ); + + // Set the sled to the initial policy and state, ignoring state + // transition errors (this is just to set up the initial state). + sled_set_policy( + opctx, + datastore, + sled_id, + before_policy, + ValidateTransition::No, + Expected::Ignore, + ) + .await?; + + sled_set_state( + opctx, + datastore, + sled_id, + before_state, + ValidateTransition::No, + Expected::Ignore, + ) + .await?; + + // Now perform the transition to the new policy or state. + match after { + SledTransition::Policy(new_policy) => { + let expected = if is_valid { + Expected::Ok(before_policy) + } else { + Expected::Invalid + }; + + sled_set_policy( + opctx, + datastore, + sled_id, + new_policy, + ValidateTransition::Yes, + expected, + ) + .await?; + } + SledTransition::State(new_state) => { + let expected = if is_valid { + Expected::Ok(before_state) + } else { + Expected::Invalid + }; + + sled_set_state( + opctx, + datastore, + sled_id, + new_state, + ValidateTransition::Yes, + expected, + ) + .await?; + } + } + + Ok(()) + } + + // --- + // Helper methods + // --- + fn test_new_sled_update() -> SledUpdate { let sled_id = Uuid::new_v4(); let addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0); @@ -482,13 +1167,17 @@ mod test { ) } - /// Returns pagination parameters to fetch the first page of results for a - /// paginated endpoint - fn first_page<'a, T>(limit: NonZeroU32) -> DataPageParams<'a, T> { - DataPageParams { - marker: None, - direction: dropshot::PaginationOrder::Ascending, - limit, + /// Initial state for state transitions. + #[derive(Debug)] + struct Before(BoxPredicate, BoxPredicate); + + impl Before { + fn new(policy: P, state: S) -> Self + where + P: Predicate + Send + Sync + 'static, + S: Predicate + Send + Sync + 'static, + { + Before(policy.boxed(), state.boxed()) } } diff --git a/nexus/db-queries/src/db/datastore/switch_port.rs b/nexus/db-queries/src/db/datastore/switch_port.rs index 4771768e43..842cd4bf11 100644 --- a/nexus/db-queries/src/db/datastore/switch_port.rs +++ b/nexus/db-queries/src/db/datastore/switch_port.rs @@ -1192,7 +1192,8 @@ impl DataStore { #[cfg(test)] mod test { - use crate::db::datastore::{datastore_test, UpdatePrecondition}; + use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::UpdatePrecondition; use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params::{ BgpAnnounceSetCreate, BgpConfigCreate, BgpPeer, BgpPeerConfig, diff --git a/nexus/db-queries/src/db/datastore/test_utils.rs b/nexus/db-queries/src/db/datastore/test_utils.rs new file mode 100644 index 0000000000..6d26ad044b --- /dev/null +++ b/nexus/db-queries/src/db/datastore/test_utils.rs @@ -0,0 +1,343 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Shared test-only code for the `datastore` module. + +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::datastore::ValidateTransition; +use crate::db::lookup::LookupPath; +use crate::db::DataStore; +use anyhow::bail; +use anyhow::ensure; +use anyhow::Context; +use anyhow::Result; +use dropshot::test_util::LogContext; +use nexus_db_model::SledState; +use nexus_types::external_api::views::SledPolicy; +use nexus_types::external_api::views::SledProvisionPolicy; +use omicron_test_utils::dev::db::CockroachInstance; +use std::sync::Arc; +use strum::EnumCount; +use uuid::Uuid; + +/// Constructs a DataStore for use in test suites that has preloaded the +/// built-in users, roles, and role assignments that are needed for basic +/// operation +#[cfg(test)] +pub async fn datastore_test( + logctx: &LogContext, + db: &CockroachInstance, +) -> (OpContext, Arc) { + use crate::authn; + + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = Arc::new(db::Pool::new(&logctx.log, &cfg)); + let datastore = + Arc::new(DataStore::new(&logctx.log, pool, None).await.unwrap()); + + // Create an OpContext with the credentials of "db-init" just for the + // purpose of loading the built-in users, roles, and assignments. + let opctx = OpContext::for_background( + logctx.log.new(o!()), + Arc::new(authz::Authz::new(&logctx.log)), + authn::Context::internal_db_init(), + Arc::clone(&datastore), + ); + + // TODO: Can we just call "Populate" instead of doing this? + let rack_id = Uuid::parse_str(nexus_test_utils::RACK_UUID).unwrap(); + datastore.load_builtin_users(&opctx).await.unwrap(); + datastore.load_builtin_roles(&opctx).await.unwrap(); + datastore.load_builtin_role_asgns(&opctx).await.unwrap(); + datastore.load_builtin_silos(&opctx).await.unwrap(); + datastore.load_builtin_projects(&opctx).await.unwrap(); + datastore.load_builtin_vpcs(&opctx).await.unwrap(); + datastore.load_silo_users(&opctx).await.unwrap(); + datastore.load_silo_user_role_assignments(&opctx).await.unwrap(); + datastore + .load_builtin_fleet_virtual_provisioning_collection(&opctx) + .await + .unwrap(); + datastore.load_builtin_rack_data(&opctx, rack_id).await.unwrap(); + + // Create an OpContext with the credentials of "test-privileged" for general + // testing. + let opctx = + OpContext::for_tests(logctx.log.new(o!()), Arc::clone(&datastore)); + + (opctx, datastore) +} + +/// Denotes a specific way in which a sled is ineligible. +/// +/// This should match the list of sleds in `IneligibleSleds`. +#[derive( + Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, EnumCount, +)] +pub(super) enum IneligibleSledKind { + NonProvisionable, + Expunged, + Decommissioned, + IllegalDecommissioned, +} + +/// Specifies sleds to be marked as ineligible for provisioning. +/// +/// This is less error-prone than several places duplicating this logic. +#[derive(Debug)] +pub(super) struct IneligibleSleds { + pub(super) non_provisionable: Uuid, + pub(super) expunged: Uuid, + pub(super) decommissioned: Uuid, + pub(super) illegal_decommissioned: Uuid, +} + +impl IneligibleSleds { + pub(super) fn iter( + &self, + ) -> impl Iterator { + [ + (IneligibleSledKind::NonProvisionable, self.non_provisionable), + (IneligibleSledKind::Expunged, self.expunged), + (IneligibleSledKind::Decommissioned, self.decommissioned), + ( + IneligibleSledKind::IllegalDecommissioned, + self.illegal_decommissioned, + ), + ] + .into_iter() + } + + /// Marks the provided sleds as ineligible for provisioning. + /// + /// Assumes that: + /// + /// * the sleds have just been set up and are in the default state. + /// * the UUIDs are all distinct. + pub(super) async fn setup( + &self, + opctx: &OpContext, + datastore: &DataStore, + ) -> Result<()> { + let non_provisionable_fut = async { + sled_set_policy( + &opctx, + &datastore, + self.non_provisionable, + SledPolicy::InService { + provision_policy: SledProvisionPolicy::NonProvisionable, + }, + ValidateTransition::Yes, + Expected::Ok(SledPolicy::provisionable()), + ) + .await + .with_context(|| { + format!( + "failed to set non-provisionable policy for sled {}", + self.non_provisionable + ) + }) + }; + + let expunged_fut = async { + sled_set_policy( + &opctx, + &datastore, + self.expunged, + SledPolicy::Expunged, + ValidateTransition::Yes, + Expected::Ok(SledPolicy::provisionable()), + ) + .await + .with_context(|| { + format!( + "failed to set expunged policy for sled {}", + self.non_provisionable + ) + }) + }; + + // Legally, we must set the policy to expunged before setting the state + // to decommissioned. (In the future, we'll want to test graceful + // removal as well.) + let decommissioned_fut = async { + sled_set_policy( + &opctx, + &datastore, + self.decommissioned, + SledPolicy::Expunged, + ValidateTransition::Yes, + Expected::Ok(SledPolicy::provisionable()), + ) + .await + .with_context(|| { + format!( + "failed to set expunged policy for sled {}, \ + as prerequisite for decommissioning", + self.decommissioned + ) + })?; + + sled_set_state( + &opctx, + &datastore, + self.decommissioned, + SledState::Decommissioned, + ValidateTransition::Yes, + Expected::Ok(SledState::Active), + ) + .await + .with_context(|| { + format!( + "failed to set decommissioned state for sled {}", + self.decommissioned + ) + }) + }; + + // This is _not_ a legal state, BUT we test it out to ensure that if + // the system somehow enters this state anyway, we don't try and + // provision resources on it. + let illegal_decommissioned_fut = async { + sled_set_state( + &opctx, + &datastore, + self.illegal_decommissioned, + SledState::Decommissioned, + ValidateTransition::No, + Expected::Ok(SledState::Active), + ) + .await + .with_context(|| { + format!( + "failed to illegally set decommissioned state for sled {}", + self.illegal_decommissioned + ) + }) + }; + + // We're okay cancelling the rest of the futures if one of them fails, + // since the overall test is going to fail anyway. Hence try_join + // rather than join_then_try. + futures::try_join!( + non_provisionable_fut, + expunged_fut, + decommissioned_fut, + illegal_decommissioned_fut + )?; + + Ok(()) + } +} + +pub(super) async fn sled_set_policy( + opctx: &OpContext, + datastore: &DataStore, + sled_id: Uuid, + new_policy: SledPolicy, + check: ValidateTransition, + expected_old_policy: Expected, +) -> Result<()> { + let (authz_sled, _) = LookupPath::new(&opctx, &datastore) + .sled_id(sled_id) + .fetch_for(authz::Action::Modify) + .await + .unwrap(); + + let res = datastore + .sled_set_policy_impl(opctx, &authz_sled, new_policy, check) + .await; + match expected_old_policy { + Expected::Ok(expected) => { + let actual = res.context( + "failed transition that was expected to be successful", + )?; + ensure!( + actual == expected, + "actual old policy ({actual}) is not \ + the same as expected ({expected})" + ); + } + Expected::Invalid => match res { + Ok(old_policy) => { + bail!( + "expected an invalid state transition error, \ + but transition was accepted with old policy: \ + {old_policy}" + ) + } + Err(error) => { + error.ensure_invalid_transition()?; + } + }, + Expected::Ignore => { + // The return value is ignored. + } + } + + Ok(()) +} + +pub(super) async fn sled_set_state( + opctx: &OpContext, + datastore: &DataStore, + sled_id: Uuid, + new_state: SledState, + check: ValidateTransition, + expected_old_state: Expected, +) -> Result<()> { + let (authz_sled, _) = LookupPath::new(&opctx, &datastore) + .sled_id(sled_id) + .fetch_for(authz::Action::Modify) + .await + .unwrap(); + + let res = datastore + .sled_set_state_impl(&opctx, &authz_sled, new_state, check) + .await; + match expected_old_state { + Expected::Ok(expected) => { + let actual = res.context( + "failed transition that was expected to be successful", + )?; + ensure!( + actual == expected, + "actual old state ({actual:?}) \ + is not the same as expected ({expected:?})" + ); + } + Expected::Invalid => match res { + Ok(old_state) => { + bail!( + "expected an invalid state transition error, \ + but transition was accepted with old state: \ + {old_state:?}" + ) + } + Err(error) => { + error.ensure_invalid_transition()?; + } + }, + Expected::Ignore => { + // The return value is ignored. + } + } + + Ok(()) +} + +/// For a transition, describes the expected value of the old state. +pub(super) enum Expected { + /// The transition is expected to successful, with the provided old + /// value. + Ok(T), + + /// The transition is expected to be invalid. + Invalid, + + /// The return value is ignored. + Ignore, +} diff --git a/nexus/db-queries/src/db/datastore/volume.rs b/nexus/db-queries/src/db/datastore/volume.rs index d0b093ff45..374ef2cf73 100644 --- a/nexus/db-queries/src/db/datastore/volume.rs +++ b/nexus/db-queries/src/db/datastore/volume.rs @@ -1064,7 +1064,7 @@ pub fn read_only_resources_associated_with_volume( mod tests { use super::*; - use crate::db::datastore::datastore_test; + use crate::db::datastore::test_utils::datastore_test; use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; diff --git a/nexus/db-queries/src/db/datastore/vpc.rs b/nexus/db-queries/src/db/datastore/vpc.rs index 4f0245e283..4626301f76 100644 --- a/nexus/db-queries/src/db/datastore/vpc.rs +++ b/nexus/db-queries/src/db/datastore/vpc.rs @@ -1171,7 +1171,7 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; - use crate::db::datastore::datastore_test; + use crate::db::datastore::test_utils::datastore_test; use crate::db::model::Project; use crate::db::queries::vpc::MAX_VNI_SEARCH_RANGE_SIZE; use nexus_test_utils::db::test_setup_database; diff --git a/nexus/db-queries/src/db/lookup.rs b/nexus/db-queries/src/db/lookup.rs index 18ea369685..050c1dcfe9 100644 --- a/nexus/db-queries/src/db/lookup.rs +++ b/nexus/db-queries/src/db/lookup.rs @@ -950,7 +950,8 @@ mod test { let logctx = dev::test_setup_log("test_lookup"); let mut db = test_setup_database(&logctx.log).await; let (_, datastore) = - crate::db::datastore::datastore_test(&logctx, &db).await; + crate::db::datastore::test_utils::datastore_test(&logctx, &db) + .await; let opctx = OpContext::for_tests(logctx.log.new(o!()), Arc::clone(&datastore)); let project_name: Name = Name("my-project".parse().unwrap()); diff --git a/nexus/db-queries/src/db/pool_connection.rs b/nexus/db-queries/src/db/pool_connection.rs index 66fb125a7c..f419ba6852 100644 --- a/nexus/db-queries/src/db/pool_connection.rs +++ b/nexus/db-queries/src/db/pool_connection.rs @@ -59,9 +59,10 @@ static CUSTOM_TYPE_KEYS: &'static [&'static str] = &[ "router_route_kind", "saga_state", "service_kind", - "sled_provision_state", + "sled_policy", "sled_resource_kind", "sled_role", + "sled_state", "snapshot_state", "sp_type", "switch_interface_kind", diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 7456e6e6bb..739c0b0809 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -906,7 +906,8 @@ mod tests { let logctx = dev::test_setup_log(test_name); let log = logctx.log.new(o!()); let db = test_setup_database(&log).await; - crate::db::datastore::datastore_test(&logctx, &db).await; + crate::db::datastore::test_utils::datastore_test(&logctx, &db) + .await; let cfg = crate::db::Config { url: db.pg_config().clone() }; let pool = Arc::new(crate::db::Pool::new(&logctx.log, &cfg)); let db_datastore = Arc::new( diff --git a/nexus/db-queries/src/db/queries/network_interface.rs b/nexus/db-queries/src/db/queries/network_interface.rs index a22c80b232..d96b26c9b3 100644 --- a/nexus/db-queries/src/db/queries/network_interface.rs +++ b/nexus/db-queries/src/db/queries/network_interface.rs @@ -1953,7 +1953,8 @@ mod tests { let log = logctx.log.new(o!()); let db = test_setup_database(&log).await; let (opctx, db_datastore) = - crate::db::datastore::datastore_test(&logctx, &db).await; + crate::db::datastore::test_utils::datastore_test(&logctx, &db) + .await; let authz_silo = opctx.authn.silo_required().unwrap(); diff --git a/nexus/db-queries/src/db/queries/region_allocation.rs b/nexus/db-queries/src/db/queries/region_allocation.rs index 3c37bf6b2e..43e1750812 100644 --- a/nexus/db-queries/src/db/queries/region_allocation.rs +++ b/nexus/db-queries/src/db/queries/region_allocation.rs @@ -27,6 +27,9 @@ use nexus_db_model::queries::region_allocation::{ proposed_dataset_changes, shuffled_candidate_datasets, updated_datasets, }; use nexus_db_model::schema; +use nexus_db_model::to_db_sled_policy; +use nexus_db_model::SledState; +use nexus_types::external_api::views::SledPolicy; use omicron_common::api::external; use omicron_common::nexus_config::RegionAllocationStrategy; @@ -321,14 +324,16 @@ impl CandidateZpools { .on(zpool_dsl::id.eq(old_zpool_usage::dsl::pool_id)) .inner_join(with_sled); - let sled_is_provisionable = sled_dsl::provision_state - .eq(crate::db::model::SledProvisionState::Provisionable); + let sled_is_provisionable = sled_dsl::sled_policy + .eq(to_db_sled_policy(SledPolicy::provisionable())); + let sled_is_active = sled_dsl::sled_state.eq(SledState::Active); let base_query = old_zpool_usage .query_source() .inner_join(with_zpool) .filter(it_will_fit) .filter(sled_is_provisionable) + .filter(sled_is_active) .select((old_zpool_usage::dsl::pool_id,)); let query = if distinct_sleds { diff --git a/nexus/db-queries/src/transaction_retry.rs b/nexus/db-queries/src/transaction_retry.rs index 6b5098158b..74a94a0b8f 100644 --- a/nexus/db-queries/src/transaction_retry.rs +++ b/nexus/db-queries/src/transaction_retry.rs @@ -270,7 +270,7 @@ impl OptionalError { mod test { use super::*; - use crate::db::datastore::datastore_test; + use crate::db::datastore::test_utils::datastore_test; use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; use oximeter::types::FieldValue; diff --git a/nexus/deployment/src/blueprint_builder.rs b/nexus/deployment/src/blueprint_builder.rs index 86a9b8da6e..2263a99ce0 100644 --- a/nexus/deployment/src/blueprint_builder.rs +++ b/nexus/deployment/src/blueprint_builder.rs @@ -724,7 +724,8 @@ impl<'a> BlueprintZones<'a> { #[cfg(test)] pub mod test { use super::*; - use nexus_types::external_api::views::SledProvisionState; + use nexus_types::external_api::views::SledPolicy; + use nexus_types::external_api::views::SledState; use omicron_common::address::IpRange; use omicron_common::address::Ipv4Range; use omicron_common::address::Ipv6Subnet; @@ -736,14 +737,26 @@ pub mod test { }; use std::str::FromStr; - /// Returns a collection and policy describing a pretty simple system - pub fn example() -> (Collection, Policy) { + pub const DEFAULT_N_SLEDS: usize = 3; + + /// Returns a collection and policy describing a pretty simple system. + /// + /// `n_sleds` is the number of sleds supported. Currently, this value can + /// be anywhere between 0 and 5. (More can be added in the future if + /// necessary.) + pub fn example(n_sleds: usize) -> (Collection, Policy) { let mut builder = nexus_inventory::CollectionBuilder::new("test-suite"); + if n_sleds > 5 { + panic!("example() only supports up to 5 sleds, but got {n_sleds}"); + } + let sled_ids = [ "72443b6c-b8bb-4ffa-ab3a-aeaa428ed79b", "a5f3db3a-61aa-4f90-ad3e-02833c253bf5", "0d168386-2551-44e8-98dd-ae7a7570f8a0", + "aaaaa1a1-0c3f-4928-aba7-6ec5c1db05f7", + "85e88acb-7b86-45ff-9c88-734e1da71c3d", ]; let mut policy = Policy { sleds: BTreeMap::new(), @@ -771,7 +784,7 @@ pub mod test { }) }; - for sled_id_str in sled_ids.iter() { + for sled_id_str in sled_ids.iter().take(n_sleds) { let sled_id: Uuid = sled_id_str.parse().unwrap(); let sled_ip = policy_add_sled(&mut policy, sled_id); let serial_number = format!("s{}", policy.sleds.len()); @@ -900,7 +913,8 @@ pub mod test { policy.sleds.insert( sled_id, SledResources { - provision_state: SledProvisionState::Provisionable, + policy: SledPolicy::provisionable(), + state: SledState::Active, zpools, subnet, }, @@ -931,7 +945,7 @@ pub mod test { fn test_initial() { // Test creating a blueprint from a collection and verifying that it // describes no changes. - let (collection, policy) = example(); + let (collection, policy) = example(DEFAULT_N_SLEDS); let blueprint_initial = BlueprintBuilder::build_initial_from_collection( &collection, @@ -977,7 +991,7 @@ pub mod test { #[test] fn test_basic() { - let (collection, mut policy) = example(); + let (collection, mut policy) = example(DEFAULT_N_SLEDS); let blueprint1 = BlueprintBuilder::build_initial_from_collection( &collection, Generation::new(), @@ -1096,7 +1110,7 @@ pub mod test { #[test] fn test_add_nexus_with_no_existing_nexus_zones() { - let (mut collection, policy) = example(); + let (mut collection, policy) = example(DEFAULT_N_SLEDS); // We don't care about the internal DNS version here. let internal_dns_version = Generation::new(); @@ -1147,7 +1161,7 @@ pub mod test { #[test] fn test_add_nexus_error_cases() { - let (mut collection, policy) = example(); + let (mut collection, policy) = example(DEFAULT_N_SLEDS); // We don't care about the internal DNS version here. let internal_dns_version = Generation::new(); @@ -1259,7 +1273,7 @@ pub mod test { #[test] fn test_invalid_parent_blueprint_two_zones_with_same_external_ip() { - let (mut collection, policy) = example(); + let (mut collection, policy) = example(DEFAULT_N_SLEDS); // We should fail if the parent blueprint claims to contain two // zones with the same external IP. Skim through the zones, copy the @@ -1310,7 +1324,7 @@ pub mod test { #[test] fn test_invalid_parent_blueprint_two_nexus_zones_with_same_nic_ip() { - let (mut collection, policy) = example(); + let (mut collection, policy) = example(DEFAULT_N_SLEDS); // We should fail if the parent blueprint claims to contain two // Nexus zones with the same NIC IP. Skim through the zones, copy @@ -1359,7 +1373,7 @@ pub mod test { #[test] fn test_invalid_parent_blueprint_two_zones_with_same_vnic_mac() { - let (mut collection, policy) = example(); + let (mut collection, policy) = example(DEFAULT_N_SLEDS); // We should fail if the parent blueprint claims to contain two // zones with the same service vNIC MAC address. Skim through the diff --git a/nexus/deployment/src/planner.rs b/nexus/deployment/src/planner.rs index 7973157068..0773fec2bf 100644 --- a/nexus/deployment/src/planner.rs +++ b/nexus/deployment/src/planner.rs @@ -12,7 +12,7 @@ use crate::blueprint_builder::EnsureMultiple; use crate::blueprint_builder::Error; use nexus_types::deployment::Blueprint; use nexus_types::deployment::Policy; -use nexus_types::external_api::views::SledProvisionState; +use nexus_types::external_api::views::SledState; use nexus_types::inventory::Collection; use omicron_common::api::external::Generation; use slog::{info, warn, Logger}; @@ -84,6 +84,17 @@ impl<'a> Planner<'a> { let mut sleds_ineligible_for_services = BTreeSet::new(); for (sled_id, sled_info) in &self.policy.sleds { + // Decommissioned sleds don't get any services. (This is an + // explicit match so that when more states are added, this fails to + // compile.) + match sled_info.state { + SledState::Decommissioned => { + sleds_ineligible_for_services.insert(*sled_id); + continue; + } + SledState::Active => {} + } + // Check for an NTP zone. Every sled should have one. If it's not // there, all we can do is provision that one zone. We have to wait // for that to succeed and synchronize the clock before we can @@ -175,10 +186,8 @@ impl<'a> Planner<'a> { // assumption that there is something amiss with them. sleds_ineligible_for_services.extend( self.policy.sleds.iter().filter_map(|(sled_id, sled_info)| { - match sled_info.provision_state { - SledProvisionState::Provisionable => None, - SledProvisionState::NonProvisionable => Some(*sled_id), - } + (!sled_info.is_eligible_for_discretionary_services()) + .then_some(*sled_id) }), ); @@ -311,9 +320,12 @@ mod test { use crate::blueprint_builder::test::example; use crate::blueprint_builder::test::policy_add_sled; use crate::blueprint_builder::test::verify_blueprint; + use crate::blueprint_builder::test::DEFAULT_N_SLEDS; use crate::blueprint_builder::BlueprintBuilder; use nexus_inventory::now_db_precision; - use nexus_types::external_api::views::SledProvisionState; + use nexus_types::external_api::views::SledPolicy; + use nexus_types::external_api::views::SledProvisionPolicy; + use nexus_types::external_api::views::SledState; use nexus_types::inventory::OmicronZoneType; use nexus_types::inventory::OmicronZonesFound; use omicron_common::api::external::Generation; @@ -328,7 +340,7 @@ mod test { let internal_dns_version = Generation::new(); // Use our example inventory collection. - let (mut collection, mut policy) = example(); + let (mut collection, mut policy) = example(DEFAULT_N_SLEDS); // Build the initial blueprint. We don't bother verifying it here // because there's a separate test for that. @@ -509,7 +521,7 @@ mod test { // Use our example inventory collection as a starting point, but strip // it down to just one sled. let (sled_id, collection, mut policy) = { - let (mut collection, mut policy) = example(); + let (mut collection, mut policy) = example(DEFAULT_N_SLEDS); // Pick one sled ID to keep and remove the rest. let keep_sled_id = @@ -593,7 +605,7 @@ mod test { ); // Use our example inventory collection as a starting point. - let (collection, mut policy) = example(); + let (collection, mut policy) = example(DEFAULT_N_SLEDS); // Build the initial blueprint. let blueprint1 = BlueprintBuilder::build_initial_from_collection( @@ -674,7 +686,12 @@ mod test { ); // Use our example inventory collection as a starting point. - let (collection, mut policy) = example(); + // + // Request two extra sleds here so we test non-provisionable, expunged, + // and decommissioned sleds. (When we add more kinds of + // non-provisionable states in the future, we'll have to add more + // sleds.) + let (collection, mut policy) = example(5); // Build the initial blueprint. let blueprint1 = BlueprintBuilder::build_initial_from_collection( @@ -685,8 +702,8 @@ mod test { ) .expect("failed to create initial blueprint"); - // This blueprint should only have 3 Nexus zones: one on each sled. - assert_eq!(blueprint1.omicron_zones.len(), 3); + // This blueprint should only have 5 Nexus zones: one on each sled. + assert_eq!(blueprint1.omicron_zones.len(), 5); for sled_config in blueprint1.omicron_zones.values() { assert_eq!( sled_config @@ -698,16 +715,39 @@ mod test { ); } - // Arbitrarily choose one of the sleds and mark it non-provisionable. + // Arbitrarily choose some of the sleds and mark them non-provisionable + // in various ways. + let mut sleds_iter = policy.sleds.iter_mut(); + let nonprovisionable_sled_id = { - let (sled_id, resources) = - policy.sleds.iter_mut().next().expect("no sleds"); - resources.provision_state = SledProvisionState::NonProvisionable; + let (sled_id, resources) = sleds_iter.next().expect("no sleds"); + resources.policy = SledPolicy::InService { + provision_policy: SledProvisionPolicy::NonProvisionable, + }; + *sled_id + }; + let expunged_sled_id = { + let (sled_id, resources) = sleds_iter.next().expect("no sleds"); + resources.policy = SledPolicy::Expunged; + *sled_id + }; + let decommissioned_sled_id = { + let (sled_id, resources) = sleds_iter.next().expect("no sleds"); + resources.state = SledState::Decommissioned; *sled_id }; - // Now run the planner with a high number of target Nexus zones. - policy.target_nexus_zone_count = 14; + // Now run the planner with a high number of target Nexus zones. The + // number (16) is chosen such that: + // + // * we start with 5 sleds + // * we need to add 11 Nexus zones + // * there are two sleds eligible for provisioning + // * => 5 or 6 new Nexus zones per sled + // + // When the planner gets smarter about removing zones from expunged + // and/or removed sleds, we'll have to adjust this number. + policy.target_nexus_zone_count = 16; let blueprint2 = Planner::new_based_on( logctx.log.clone(), &blueprint1, @@ -727,13 +767,15 @@ mod test { let sleds = diff.sleds_changed().collect::>(); // Only 2 of the 3 sleds should get additional Nexus zones. We expect a - // total of 11 new Nexus zones, which should be spread evenly across the + // total of 12 new Nexus zones, which should be spread evenly across the // two sleds (one gets 6 and the other gets 5), while the // non-provisionable sled should be unchanged. assert_eq!(sleds.len(), 2); let mut total_new_nexus_zones = 0; for (sled_id, sled_changes) in sleds { assert!(sled_id != nonprovisionable_sled_id); + assert!(sled_id != expunged_sled_id); + assert!(sled_id != decommissioned_sled_id); assert_eq!(sled_changes.zones_removed().count(), 0); assert_eq!(sled_changes.zones_changed().count(), 0); let zones = sled_changes.zones_added().collect::>(); diff --git a/nexus/src/app/background/blueprint_execution.rs b/nexus/src/app/background/blueprint_execution.rs index 32797facbf..373f023288 100644 --- a/nexus/src/app/background/blueprint_execution.rs +++ b/nexus/src/app/background/blueprint_execution.rs @@ -186,7 +186,8 @@ mod test { datastore .sled_upsert(update) .await - .expect("Failed to insert sled to db"); + .expect("Failed to insert sled to db") + .unwrap(); } let (blueprint_tx, blueprint_rx) = watch::channel(None); diff --git a/nexus/src/app/background/inventory_collection.rs b/nexus/src/app/background/inventory_collection.rs index 044e5a2234..c0d64d554a 100644 --- a/nexus/src/app/background/inventory_collection.rs +++ b/nexus/src/app/background/inventory_collection.rs @@ -338,7 +338,7 @@ mod test { }, rack_id, ); - sleds.push(datastore.sled_upsert(sled).await.unwrap()); + sleds.push(datastore.sled_upsert(sled).await.unwrap().unwrap()); } // The same enumerator should immediately find all the new sleds. diff --git a/nexus/src/app/deployment.rs b/nexus/src/app/deployment.rs index 61ce803d13..adf6119c5c 100644 --- a/nexus/src/app/deployment.rs +++ b/nexus/src/app/deployment.rs @@ -160,7 +160,8 @@ impl super::Nexus { .remove(&sled_id) .unwrap_or_else(BTreeSet::new); let sled_info = SledResources { - provision_state: sled_row.provision_state().into(), + policy: sled_row.policy(), + state: sled_row.state().into(), subnet, zpools, }; diff --git a/nexus/src/app/sled.rs b/nexus/src/app/sled.rs index ec3f11dc6f..88955d78e9 100644 --- a/nexus/src/app/sled.rs +++ b/nexus/src/app/sled.rs @@ -11,9 +11,11 @@ use crate::internal_api::params::{ use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; +use nexus_db_queries::db::datastore::SledUpsertOutput; use nexus_db_queries::db::lookup; use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::db::model::DatasetKind; +use nexus_types::external_api::views::SledProvisionPolicy; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; @@ -68,7 +70,19 @@ impl super::Nexus { }, self.rack_id, ); - self.db_datastore.sled_upsert(sled).await?; + match self.db_datastore.sled_upsert(sled).await? { + SledUpsertOutput::Updated(_) => {} + SledUpsertOutput::Decommissioned => { + // We currently don't bubble up errors for decommissioned sleds + // -- if it ever happens, a decommissioned sled-agent doesn't + // know about that. + warn!( + self.log, + "decommissioned sled-agent reached out for upserts"; + "sled_uuid" => id.to_string() + ); + } + } Ok(()) } @@ -146,17 +160,17 @@ impl super::Nexus { .await } - /// Returns the old state. - pub(crate) async fn sled_set_provision_state( + /// Returns the old provision policy. + pub(crate) async fn sled_set_provision_policy( &self, opctx: &OpContext, sled_lookup: &lookup::Sled<'_>, - state: db::model::SledProvisionState, - ) -> Result { + new_policy: SledProvisionPolicy, + ) -> Result { let (authz_sled,) = sled_lookup.lookup_for(authz::Action::Modify).await?; self.db_datastore - .sled_set_provision_state(opctx, &authz_sled, state) + .sled_set_provision_policy(opctx, &authz_sled, new_policy) .await } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index fd18cb2dab..b007cc6217 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -225,7 +225,7 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(rack_view)?; api.register(sled_list)?; api.register(sled_view)?; - api.register(sled_set_provision_state)?; + api.register(sled_set_provision_policy)?; api.register(sled_instance_list)?; api.register(sled_physical_disk_list)?; api.register(physical_disk_list)?; @@ -5166,38 +5166,34 @@ async fn sled_view( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/// Set sled provision state +/// Set sled provision policy #[endpoint { method = PUT, - path = "/v1/system/hardware/sleds/{sled_id}/provision-state", + path = "/v1/system/hardware/sleds/{sled_id}/provision-policy", tags = ["system/hardware"], }] -async fn sled_set_provision_state( +async fn sled_set_provision_policy( rqctx: RequestContext>, path_params: Path, - new_provision_state: TypedBody, -) -> Result, HttpError> { + new_provision_state: TypedBody, +) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let nexus = &apictx.nexus; let path = path_params.into_inner(); - let provision_state = new_provision_state.into_inner().state; + let new_state = new_provision_state.into_inner().state; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - // Convert the external `SledProvisionState` into our internal data model. - let new_state = db::model::SledProvisionState::from(provision_state); let sled_lookup = nexus.sled_lookup(&opctx, &path.sled_id)?; let old_state = nexus - .sled_set_provision_state(&opctx, &sled_lookup, new_state) + .sled_set_provision_policy(&opctx, &sled_lookup, new_state) .await?; - let response = params::SledProvisionStateResponse { - old_state: old_state.into(), - new_state: new_state.into(), - }; + let response = + params::SledProvisionPolicyResponse { old_state, new_state }; Ok(HttpResponseOk(response)) }; diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index cd04bb6018..81f2a02b31 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -22,6 +22,7 @@ use nexus_types::external_api::params; use nexus_types::external_api::shared; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::shared::Ipv4Range; +use nexus_types::external_api::views::SledProvisionPolicy; use omicron_common::api::external::AddressLotKind; use omicron_common::api::external::ByteCount; use omicron_common::api::external::IdentityMetadataCreateParams; @@ -45,14 +46,12 @@ pub const HARDWARE_UNINITIALIZED_SLEDS: &'static str = "/v1/system/hardware/sleds-uninitialized"; pub static HARDWARE_SLED_URL: Lazy = Lazy::new(|| format!("/v1/system/hardware/sleds/{}", SLED_AGENT_UUID)); -pub static HARDWARE_SLED_PROVISION_STATE_URL: Lazy = Lazy::new(|| { - format!("/v1/system/hardware/sleds/{}/provision-state", SLED_AGENT_UUID) +pub static HARDWARE_SLED_PROVISION_POLICY_URL: Lazy = Lazy::new(|| { + format!("/v1/system/hardware/sleds/{}/provision-policy", SLED_AGENT_UUID) }); -pub static DEMO_SLED_PROVISION_STATE: Lazy = - Lazy::new(|| { - params::SledProvisionStateParams { - state: nexus_types::external_api::views::SledProvisionState::NonProvisionable, - } +pub static DEMO_SLED_PROVISION_POLICY: Lazy = + Lazy::new(|| params::SledProvisionPolicyParams { + state: SledProvisionPolicy::NonProvisionable, }); pub static HARDWARE_SWITCH_URL: Lazy = @@ -1911,11 +1910,11 @@ pub static VERIFY_ENDPOINTS: Lazy> = Lazy::new(|| { }, VerifyEndpoint { - url: &HARDWARE_SLED_PROVISION_STATE_URL, + url: &HARDWARE_SLED_PROVISION_POLICY_URL, visibility: Visibility::Protected, unprivileged_access: UnprivilegedAccess::None, allowed_methods: vec![AllowedMethod::Put( - serde_json::to_value(&*DEMO_SLED_PROVISION_STATE).unwrap() + serde_json::to_value(&*DEMO_SLED_PROVISION_POLICY).unwrap() )], }, diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index 1c23f1b842..380ec1c975 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -1048,7 +1048,8 @@ fn after_23_0_0(client: &Client) -> BoxFuture<'_, ()> { fn before_24_0_0(client: &Client) -> BoxFuture<'_, ()> { // IP addresses were pulled off dogfood sled 16 Box::pin(async move { - // Create two sleds + // Create two sleds. (SLED2 is marked non_provisionable for + // after_37_0_1.) client .batch_execute(&format!( "INSERT INTO sled @@ -1062,7 +1063,7 @@ fn before_24_0_0(client: &Client) -> BoxFuture<'_, ()> { 'fd00:1122:3344:104::1ac', 'provisionable'), ('{SLED2}', now(), now(), NULL, 1, '{RACK1}', false, 'zzzz', 'xxxx', '2', 64, 12345678, 77,'fd00:1122:3344:107::1', 12345, - 'fd00:1122:3344:107::d4', 'provisionable'); + 'fd00:1122:3344:107::d4', 'non_provisionable'); " )) .await @@ -1095,6 +1096,45 @@ fn after_24_0_0(client: &Client) -> BoxFuture<'_, ()> { }) } +// This reuses the sleds created in before_24_0_0. +fn after_37_0_1(client: &Client) -> BoxFuture<'_, ()> { + Box::pin(async { + // Confirm that the IP Addresses have the last 2 bytes changed to `0xFFFF` + let rows = client + .query("SELECT sled_policy, sled_state FROM sled ORDER BY id", &[]) + .await + .expect("Failed to select sled policy and state"); + let policy_and_state = process_rows(&rows); + + assert_eq!( + policy_and_state[0].values, + vec![ + ColumnValue::new( + "sled_policy", + SqlEnum::from(("sled_policy", "in_service")) + ), + ColumnValue::new( + "sled_state", + SqlEnum::from(("sled_state", "active")) + ), + ] + ); + assert_eq!( + policy_and_state[1].values, + vec![ + ColumnValue::new( + "sled_policy", + SqlEnum::from(("sled_policy", "no_provision")) + ), + ColumnValue::new( + "sled_state", + SqlEnum::from(("sled_state", "active")) + ), + ] + ); + }) +} + // Lazily initializes all migration checks. The combination of Rust function // pointers and async makes defining a static table fairly painful, so we're // using lazy initialization instead. @@ -1112,6 +1152,10 @@ fn get_migration_checks() -> BTreeMap { SemverVersion(semver::Version::parse("24.0.0").unwrap()), DataMigrationFns { before: Some(before_24_0_0), after: after_24_0_0 }, ); + map.insert( + SemverVersion(semver::Version::parse("37.0.1").unwrap()), + DataMigrationFns { before: None, after: after_37_0_1 }, + ); map } diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 7ed73fd30a..adb36a24af 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -133,7 +133,7 @@ sled_instance_list GET /v1/system/hardware/sleds/{sle sled_list GET /v1/system/hardware/sleds sled_list_uninitialized GET /v1/system/hardware/sleds-uninitialized sled_physical_disk_list GET /v1/system/hardware/sleds/{sled_id}/disks -sled_set_provision_state PUT /v1/system/hardware/sleds/{sled_id}/provision-state +sled_set_provision_policy PUT /v1/system/hardware/sleds/{sled_id}/provision-policy sled_view GET /v1/system/hardware/sleds/{sled_id} switch_list GET /v1/system/hardware/switches switch_view GET /v1/system/hardware/switches/{switch_id} diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 2e683878be..0f601f3db5 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -11,7 +11,8 @@ //! nexus/deployment does not currently know about nexus/db-model and it's //! convenient to separate these concerns.) -use crate::external_api::views::SledProvisionState; +use crate::external_api::views::SledPolicy; +use crate::external_api::views::SledState; use crate::inventory::Collection; pub use crate::inventory::NetworkInterface; pub use crate::inventory::NetworkInterfaceKind; @@ -64,8 +65,11 @@ pub struct Policy { /// Describes the resources available on each sled for the planner #[derive(Debug, Clone)] pub struct SledResources { - /// provision state of this sled - pub provision_state: SledProvisionState, + /// current sled policy + pub policy: SledPolicy, + + /// current sled state + pub state: SledState, /// zpools on this sled /// @@ -80,6 +84,17 @@ pub struct SledResources { pub subnet: Ipv6Subnet, } +impl SledResources { + /// Returns true if the sled can have services provisioned on it that + /// aren't required to be on every sled. + /// + /// For example, NTP must exist on every sled, but Nexus does not have to. + pub fn is_eligible_for_discretionary_services(&self) -> bool { + self.policy.is_provisionable() + && self.state.is_eligible_for_discretionary_services() + } +} + /// Describes a complete set of software and configuration for the system // Blueprints are a fundamental part of how the system modifies itself. Each // blueprint completely describes all of the software and configuration @@ -266,6 +281,7 @@ pub struct OmicronZonesDiff<'a> { } /// Describes a sled that appeared on both sides of a diff (possibly changed) +#[derive(Debug)] pub struct DiffSledCommon<'a> { /// id of the sled pub sled_id: Uuid, diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 6cb878084d..07eeb9b679 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -96,21 +96,21 @@ pub struct SledSelector { pub sled: Uuid, } -/// Parameters for `sled_set_provision_state`. +/// Parameters for `sled_set_provision_policy`. #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] -pub struct SledProvisionStateParams { +pub struct SledProvisionPolicyParams { /// The provision state. - pub state: super::views::SledProvisionState, + pub state: super::views::SledProvisionPolicy, } -/// Response to `sled_set_provision_state`. +/// Response to `sled_set_provision_policy`. #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] -pub struct SledProvisionStateResponse { +pub struct SledProvisionPolicyResponse { /// The old provision state. - pub old_state: super::views::SledProvisionState, + pub old_state: super::views::SledProvisionPolicy, /// The new provision state. - pub new_state: super::views::SledProvisionState, + pub new_state: super::views::SledProvisionPolicy, } pub struct SwitchSelector { diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index 84648f109f..a3e87b162e 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -19,7 +19,9 @@ use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; use std::collections::BTreeSet; +use std::fmt; use std::net::IpAddr; +use strum::{EnumIter, IntoEnumIterator}; use uuid::Uuid; use super::params::PhysicalDiskKind; @@ -418,32 +420,214 @@ pub struct Sled { pub baseboard: Baseboard, /// The rack to which this Sled is currently attached pub rack_id: Uuid, - /// The provision state of the sled. - pub provision_state: SledProvisionState, + /// The operator-defined policy of a sled. + pub policy: SledPolicy, + /// The current state Nexus believes the sled to be in. + pub state: SledState, /// The number of hardware threads which can execute on this sled pub usable_hardware_threads: u32, /// Amount of RAM which may be used by the Sled's OS pub usable_physical_ram: ByteCount, } -/// The provision state of a sled. +/// The operator-defined provision policy of a sled. /// /// This controls whether new resources are going to be provisioned on this /// sled. #[derive( - Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, + Copy, + Clone, + Debug, + Deserialize, + Serialize, + JsonSchema, + PartialEq, + Eq, + EnumIter, )] #[serde(rename_all = "snake_case")] -pub enum SledProvisionState { +pub enum SledProvisionPolicy { /// New resources will be provisioned on this sled. Provisionable, - /// New resources will not be provisioned on this sled. However, existing - /// resources will continue to be on this sled unless manually migrated - /// off. + /// New resources will not be provisioned on this sled. However, if the + /// sled is currently in service, existing resources will continue to be on + /// this sled unless manually migrated off. NonProvisionable, } +impl SledProvisionPolicy { + /// Returns the opposite of the current provision state. + pub const fn invert(self) -> Self { + match self { + Self::Provisionable => Self::NonProvisionable, + Self::NonProvisionable => Self::Provisionable, + } + } +} + +/// The operator-defined policy of a sled. +#[derive( + Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, +)] +#[serde(rename_all = "snake_case", tag = "kind")] +pub enum SledPolicy { + /// The operator has indicated that the sled is in-service. + InService { + /// Determines whether new resources can be provisioned onto the sled. + provision_policy: SledProvisionPolicy, + }, + + /// The operator has indicated that the sled has been permanently removed + /// from service. + /// + /// This is a terminal state: once a particular sled ID is expunged, it + /// will never return to service. (The actual hardware may be reused, but + /// it will be treated as a brand-new sled.) + /// + /// An expunged sled is always non-provisionable. + Expunged, + // NOTE: if you add a new value here, be sure to add it to + // the `IntoEnumIterator` impl below! +} + +// Can't automatically derive strum::EnumIter because that doesn't provide a +// way to iterate over nested enums. +impl IntoEnumIterator for SledPolicy { + type Iterator = std::array::IntoIter; + + fn iter() -> Self::Iterator { + [ + Self::InService { + provision_policy: SledProvisionPolicy::Provisionable, + }, + Self::InService { + provision_policy: SledProvisionPolicy::NonProvisionable, + }, + Self::Expunged, + ] + .into_iter() + } +} + +impl SledPolicy { + /// Creates a new `SledPolicy` that is in-service and provisionable. + pub fn provisionable() -> Self { + Self::InService { provision_policy: SledProvisionPolicy::Provisionable } + } + + /// Returns the list of all in-service policies. + pub fn all_in_service() -> &'static [Self] { + &[ + Self::InService { + provision_policy: SledProvisionPolicy::Provisionable, + }, + Self::InService { + provision_policy: SledProvisionPolicy::NonProvisionable, + }, + ] + } + + /// Returns true if the sled can have services provisioned on it. + pub fn is_provisionable(&self) -> bool { + match self { + Self::InService { + provision_policy: SledProvisionPolicy::Provisionable, + } => true, + Self::InService { + provision_policy: SledProvisionPolicy::NonProvisionable, + } + | Self::Expunged => false, + } + } + + /// Returns the provision policy, if the sled is in service. + pub fn provision_policy(&self) -> Option { + match self { + Self::InService { provision_policy } => Some(*provision_policy), + Self::Expunged => None, + } + } + + /// Returns true if the sled can be decommissioned in this state. + pub fn is_decommissionable(&self) -> bool { + // This should be kept in sync with decommissionable_states below. + match self { + Self::InService { .. } => false, + Self::Expunged => true, + } + } + + /// Returns all the possible policies a sled can have for it to be + /// decommissioned. + pub fn all_decommissionable() -> &'static [Self] { + &[Self::Expunged] + } +} + +impl fmt::Display for SledPolicy { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SledPolicy::InService { + provision_policy: SledProvisionPolicy::Provisionable, + } => write!(f, "in service"), + SledPolicy::InService { + provision_policy: SledProvisionPolicy::NonProvisionable, + } => write!(f, "in service (not provisionable)"), + SledPolicy::Expunged => write!(f, "expunged"), + } + } +} + +/// The current state of the sled, as determined by Nexus. +#[derive( + Copy, + Clone, + Debug, + Deserialize, + Serialize, + JsonSchema, + PartialEq, + Eq, + EnumIter, +)] +#[serde(rename_all = "snake_case")] +pub enum SledState { + /// The sled is currently active, and has resources allocated on it. + Active, + + /// The sled has been permanently removed from service. + /// + /// This is a terminal state: once a particular sled ID is decommissioned, + /// it will never return to service. (The actual hardware may be reused, + /// but it will be treated as a brand-new sled.) + Decommissioned, +} + +impl SledState { + /// Returns true if the sled state makes it eligible for services that + /// aren't required to be on every sled. + /// + /// For example, NTP must exist on every sled, but Nexus does not have to. + pub fn is_eligible_for_discretionary_services(&self) -> bool { + // (Explicit match, so that this fails to compile if a new state is + // added.) + match self { + SledState::Active => true, + SledState::Decommissioned => false, + } + } +} + +impl fmt::Display for SledState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SledState::Active => write!(f, "active"), + SledState::Decommissioned => write!(f, "decommissioned"), + } + } +} + /// An operator's view of an instance running on a given sled #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct SledInstance { diff --git a/openapi/nexus.json b/openapi/nexus.json index f42841dcf6..cc8edec51d 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -4221,13 +4221,13 @@ } } }, - "/v1/system/hardware/sleds/{sled_id}/provision-state": { + "/v1/system/hardware/sleds/{sled_id}/provision-policy": { "put": { "tags": [ "system/hardware" ], - "summary": "Set sled provision state", - "operationId": "sled_set_provision_state", + "summary": "Set sled provision policy", + "operationId": "sled_set_provision_policy", "parameters": [ { "in": "path", @@ -4244,7 +4244,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SledProvisionStateParams" + "$ref": "#/components/schemas/SledProvisionPolicyParams" } } }, @@ -4256,7 +4256,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SledProvisionStateResponse" + "$ref": "#/components/schemas/SledProvisionPolicyResponse" } } } @@ -14839,11 +14839,11 @@ "type": "string", "format": "uuid" }, - "provision_state": { - "description": "The provision state of the sled.", + "policy": { + "description": "The operator-defined policy of a sled.", "allOf": [ { - "$ref": "#/components/schemas/SledProvisionState" + "$ref": "#/components/schemas/SledPolicy" } ] }, @@ -14852,6 +14852,14 @@ "type": "string", "format": "uuid" }, + "state": { + "description": "The current state Nexus believes the sled to be in.", + "allOf": [ + { + "$ref": "#/components/schemas/SledState" + } + ] + }, "time_created": { "description": "timestamp when this resource was created", "type": "string", @@ -14880,8 +14888,9 @@ "required": [ "baseboard", "id", - "provision_state", + "policy", "rack_id", + "state", "time_created", "time_modified", "usable_hardware_threads", @@ -14971,8 +14980,52 @@ "items" ] }, - "SledProvisionState": { - "description": "The provision state of a sled.\n\nThis controls whether new resources are going to be provisioned on this sled.", + "SledPolicy": { + "description": "The operator-defined policy of a sled.", + "oneOf": [ + { + "description": "The operator has indicated that the sled is in-service.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "in_service" + ] + }, + "provision_policy": { + "description": "Determines whether new resources can be provisioned onto the sled.", + "allOf": [ + { + "$ref": "#/components/schemas/SledProvisionPolicy" + } + ] + } + }, + "required": [ + "kind", + "provision_policy" + ] + }, + { + "description": "The operator has indicated that the sled has been permanently removed from service.\n\nThis is a terminal state: once a particular sled ID is expunged, it will never return to service. (The actual hardware may be reused, but it will be treated as a brand-new sled.)\n\nAn expunged sled is always non-provisionable.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "expunged" + ] + } + }, + "required": [ + "kind" + ] + } + ] + }, + "SledProvisionPolicy": { + "description": "The operator-defined provision policy of a sled.\n\nThis controls whether new resources are going to be provisioned on this sled.", "oneOf": [ { "description": "New resources will be provisioned on this sled.", @@ -14982,7 +15035,7 @@ ] }, { - "description": "New resources will not be provisioned on this sled. However, existing resources will continue to be on this sled unless manually migrated off.", + "description": "New resources will not be provisioned on this sled. However, if the sled is currently in service, existing resources will continue to be on this sled unless manually migrated off.", "type": "string", "enum": [ "non_provisionable" @@ -14990,15 +15043,15 @@ } ] }, - "SledProvisionStateParams": { - "description": "Parameters for `sled_set_provision_state`.", + "SledProvisionPolicyParams": { + "description": "Parameters for `sled_set_provision_policy`.", "type": "object", "properties": { "state": { "description": "The provision state.", "allOf": [ { - "$ref": "#/components/schemas/SledProvisionState" + "$ref": "#/components/schemas/SledProvisionPolicy" } ] } @@ -15007,15 +15060,15 @@ "state" ] }, - "SledProvisionStateResponse": { - "description": "Response to `sled_set_provision_state`.", + "SledProvisionPolicyResponse": { + "description": "Response to `sled_set_provision_policy`.", "type": "object", "properties": { "new_state": { "description": "The new provision state.", "allOf": [ { - "$ref": "#/components/schemas/SledProvisionState" + "$ref": "#/components/schemas/SledProvisionPolicy" } ] }, @@ -15023,7 +15076,7 @@ "description": "The old provision state.", "allOf": [ { - "$ref": "#/components/schemas/SledProvisionState" + "$ref": "#/components/schemas/SledProvisionPolicy" } ] } @@ -15054,6 +15107,25 @@ "items" ] }, + "SledState": { + "description": "The current state of the sled, as determined by Nexus.", + "oneOf": [ + { + "description": "The sled is currently active, and has resources allocated on it.", + "type": "string", + "enum": [ + "active" + ] + }, + { + "description": "The sled has been permanently removed from service.\n\nThis is a terminal state: once a particular sled ID is decommissioned, it will never return to service. (The actual hardware may be reused, but it will be treated as a brand-new sled.)", + "type": "string", + "enum": [ + "decommissioned" + ] + } + ] + }, "Snapshot": { "description": "View of a Snapshot", "type": "object", diff --git a/schema/crdb/37.0.0/up01.sql b/schema/crdb/37.0.0/up01.sql new file mode 100644 index 0000000000..bb66ff283e --- /dev/null +++ b/schema/crdb/37.0.0/up01.sql @@ -0,0 +1,13 @@ +-- The disposition for a particular sled. This is updated solely by the +-- operator, and not by Nexus. +CREATE TYPE IF NOT EXISTS omicron.public.sled_policy AS ENUM ( + -- The sled is in service, and new resources can be provisioned onto it. + 'in_service', + -- The sled is in service, but the operator has indicated that new + -- resources should not be provisioned onto it. + 'no_provision', + -- The operator has marked that the sled has, or will be, removed from the + -- rack, and it should be assumed that any resources currently on it are + -- now permanently missing. + 'expunged' +); diff --git a/schema/crdb/37.0.0/up02.sql b/schema/crdb/37.0.0/up02.sql new file mode 100644 index 0000000000..01cba8f7fe --- /dev/null +++ b/schema/crdb/37.0.0/up02.sql @@ -0,0 +1,22 @@ +-- The actual state of the sled. This is updated exclusively by Nexus. +-- +-- Nexus's goal is to match the sled's state with the operator-indicated +-- policy. For example, if the sled_policy is "expunged" and the sled_state is +-- "active", Nexus will assume that the sled is gone. Based on that, Nexus will +-- reallocate resources currently on the expunged sled to other sleds, etc. +-- Once the expunged sled no longer has any resources attached to it, Nexus +-- will mark it as decommissioned. +CREATE TYPE IF NOT EXISTS omicron.public.sled_state AS ENUM ( + -- The sled has resources of any kind allocated on it, or, is available for + -- new resources. + -- + -- The sled can be in this state and have a different sled policy, e.g. + -- "expunged". + 'active', + + -- The sled no longer has resources allocated on it, now or in the future. + -- + -- This is a terminal state. This state is only valid if the sled policy is + -- 'expunged'. + 'decommissioned' +); diff --git a/schema/crdb/37.0.0/up03.sql b/schema/crdb/37.0.0/up03.sql new file mode 100644 index 0000000000..7e51cf9546 --- /dev/null +++ b/schema/crdb/37.0.0/up03.sql @@ -0,0 +1,7 @@ +-- Modify the existing sled table to add the columns as required. +ALTER TABLE omicron.public.sled + -- Nullable for now -- we're going to set the data in sled_policy in the + -- next migration statement. + ADD COLUMN IF NOT EXISTS sled_policy omicron.public.sled_policy, + ADD COLUMN IF NOT EXISTS sled_state omicron.public.sled_state + NOT NULL DEFAULT 'active'; diff --git a/schema/crdb/37.0.0/up04.sql b/schema/crdb/37.0.0/up04.sql new file mode 100644 index 0000000000..a85367ec10 --- /dev/null +++ b/schema/crdb/37.0.0/up04.sql @@ -0,0 +1,14 @@ +-- Mass-update the sled_policy column to match the sled_provision_state column. + +-- This is a full table scan, but is unavoidable. +SET + LOCAL disallow_full_table_scans = OFF; + +UPDATE omicron.public.sled + SET sled_policy = + (CASE provision_state + WHEN 'provisionable' THEN 'in_service' + WHEN 'non_provisionable' THEN 'no_provision' + -- No need to specify the ELSE case because the enum has been + -- exhaustively matched (sled_provision_state already bans NULL). + END); diff --git a/schema/crdb/37.0.1/up01.sql b/schema/crdb/37.0.1/up01.sql new file mode 100644 index 0000000000..c23e3e5a11 --- /dev/null +++ b/schema/crdb/37.0.1/up01.sql @@ -0,0 +1,8 @@ +-- This is a follow-up to the previous migration, done separately to ensure +-- that the updated values for sled_policy are committed before the +-- provision_state column is dropped. + +ALTER TABLE omicron.public.sled + DROP COLUMN IF EXISTS provision_state, + ALTER COLUMN sled_policy SET NOT NULL, + ALTER COLUMN sled_state DROP DEFAULT; diff --git a/schema/crdb/37.0.1/up02.sql b/schema/crdb/37.0.1/up02.sql new file mode 100644 index 0000000000..342f794c82 --- /dev/null +++ b/schema/crdb/37.0.1/up02.sql @@ -0,0 +1 @@ +DROP TYPE IF EXISTS omicron.public.sled_provision_state; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 87a22d1adc..837be42c35 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -73,11 +73,41 @@ CREATE TABLE IF NOT EXISTS omicron.public.rack ( * Sleds */ -CREATE TYPE IF NOT EXISTS omicron.public.sled_provision_state AS ENUM ( - -- New resources can be provisioned onto the sled - 'provisionable', - -- New resources must not be provisioned onto the sled - 'non_provisionable' +-- The disposition for a particular sled. This is updated solely by the +-- operator, and not by Nexus. +CREATE TYPE IF NOT EXISTS omicron.public.sled_policy AS ENUM ( + -- The sled is in service, and new resources can be provisioned onto it. + 'in_service', + -- The sled is in service, but the operator has indicated that new + -- resources should not be provisioned onto it. + 'no_provision', + -- The operator has marked that the sled has, or will be, removed from the + -- rack, and it should be assumed that any resources currently on it are + -- now permanently missing. + 'expunged' +); + +-- The actual state of the sled. This is updated exclusively by Nexus. +-- +-- Nexus's goal is to match the sled's state with the operator-indicated +-- policy. For example, if the sled_policy is "expunged" and the sled_state is +-- "active", Nexus will assume that the sled is gone. Based on that, Nexus will +-- reallocate resources currently on the expunged sled to other sleds, etc. +-- Once the expunged sled no longer has any resources attached to it, Nexus +-- will mark it as decommissioned. +CREATE TYPE IF NOT EXISTS omicron.public.sled_state AS ENUM ( + -- The sled has resources of any kind allocated on it, or, is available for + -- new resources. + -- + -- The sled can be in this state and have a different sled policy, e.g. + -- "expunged". + 'active', + + -- The sled no longer has resources allocated on it, now or in the future. + -- + -- This is a terminal state. This state is only valid if the sled policy is + -- 'expunged'. + 'decommissioned' ); CREATE TABLE IF NOT EXISTS omicron.public.sled ( @@ -111,8 +141,11 @@ CREATE TABLE IF NOT EXISTS omicron.public.sled ( /* The last address allocated to a propolis instance on this sled. */ last_used_address INET NOT NULL, - /* The state of whether resources should be provisioned onto the sled */ - provision_state omicron.public.sled_provision_state NOT NULL, + /* The policy for the sled, updated exclusively by the operator */ + sled_policy omicron.public.sled_policy NOT NULL, + + /* The actual state of the sled, updated exclusively by Nexus */ + sled_state omicron.public.sled_state NOT NULL, -- This constraint should be upheld, even for deleted disks -- in the fleet. @@ -3518,7 +3551,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '36.0.0', NULL) + ( TRUE, NOW(), NOW(), '37.0.1', NULL) ON CONFLICT DO NOTHING; COMMIT; From c2ff7be9ce54fc4af61176c94ddc5f357ae33626 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sat, 24 Feb 2024 05:08:24 +0000 Subject: [PATCH 039/157] chore(deps): update taiki-e/install-action digest to ffdab02 (#5137) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`19e9b54` -> `ffdab02`](https://togithub.com/taiki-e/install-action/compare/19e9b54...ffdab02) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. â™» **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index 1f55f2f255..a55c05ba7a 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@19e9b549a48620cc50fcf6e6e866b8fb4eca1b01 # v2 + uses: taiki-e/install-action@ffdab026038b43b56c3c9540cdadb98181c6155c # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From b6d39a0aab1c950f40e26ec7f5e021aed9c22448 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sun, 25 Feb 2024 05:10:08 +0000 Subject: [PATCH 040/157] chore(deps): update taiki-e/install-action digest to b7add58 (#5139) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`ffdab02` -> `b7add58`](https://togithub.com/taiki-e/install-action/compare/ffdab02...b7add58) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. â™» **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index a55c05ba7a..2463ac4e2e 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@ffdab026038b43b56c3c9540cdadb98181c6155c # v2 + uses: taiki-e/install-action@b7add58e53e52e624966da65007ce24524f3dcf3 # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From 767c7fe2650040d7b8097a995bb3e9bde00a7512 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Mon, 26 Feb 2024 14:25:48 +0000 Subject: [PATCH 041/157] chore(deps): update taiki-e/install-action digest to 4ce8785 (#5142) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`b7add58` -> `4ce8785`](https://togithub.com/taiki-e/install-action/compare/b7add58...4ce8785) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. â™» **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index 2463ac4e2e..10c1c04003 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@b7add58e53e52e624966da65007ce24524f3dcf3 # v2 + uses: taiki-e/install-action@4ce8785db2a8a56c9ede16f705c2c49c5c61669c # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From 7fae994cbe954d576731638d800e2c0caf17a37e Mon Sep 17 00:00:00 2001 From: David Crespo Date: Mon, 26 Feb 2024 11:24:20 -0600 Subject: [PATCH 042/157] Bump web console (floating IPs) (#5126) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### User-facing changes * [80f11673](https://github.com/oxidecomputer/console/commit/80f11673) oxidecomputer/console#1957 * [f31d5331](https://github.com/oxidecomputer/console/commit/f31d5331) oxidecomputer/console#1944 * [b454cefd](https://github.com/oxidecomputer/console/commit/b454cefd) oxidecomputer/console#1955 * [1936e0d8](https://github.com/oxidecomputer/console/commit/1936e0d8) oxidecomputer/console#1948 --- ### All changes https://github.com/oxidecomputer/console/compare/e5a1f804...80f11673 * [80f11673](https://github.com/oxidecomputer/console/commit/80f11673) oxidecomputer/console#1957 * [5d989a70](https://github.com/oxidecomputer/console/commit/5d989a70) oxidecomputer/console#1959 * [d4ec1927](https://github.com/oxidecomputer/console/commit/d4ec1927) turn off license-eye comments, I hate them. CI failure is sufficient * [f8d2f36e](https://github.com/oxidecomputer/console/commit/f8d2f36e) oxidecomputer/console#1960 * [e0b676dc](https://github.com/oxidecomputer/console/commit/e0b676dc) upgrade husky commands for v9 https://github.com/typicode/husky/releases/tag/v9.0.1 * [f31d5331](https://github.com/oxidecomputer/console/commit/f31d5331) oxidecomputer/console#1944 * [b4552eea](https://github.com/oxidecomputer/console/commit/b4552eea) Revert "Revert all app changes since v6 except two small fixes (oxidecomputer/console#1958)" * [1f8ebf28](https://github.com/oxidecomputer/console/commit/1f8ebf28) oxidecomputer/console#1958 * [b454cefd](https://github.com/oxidecomputer/console/commit/b454cefd) oxidecomputer/console#1955 * [1ce32d32](https://github.com/oxidecomputer/console/commit/1ce32d32) oxidecomputer/console#1956 * [794ae11d](https://github.com/oxidecomputer/console/commit/794ae11d) oxidecomputer/console#1952 * [903b8f6a](https://github.com/oxidecomputer/console/commit/903b8f6a) tweak api-diff: fix type error on first arg, use dax's new built-in pipe * [a4e15cdd](https://github.com/oxidecomputer/console/commit/a4e15cdd) oxidecomputer/console#1950 * [32037d40](https://github.com/oxidecomputer/console/commit/32037d40) oxidecomputer/console#1949 * [f02678b0](https://github.com/oxidecomputer/console/commit/f02678b0) vite 5.1 * [1936e0d8](https://github.com/oxidecomputer/console/commit/1936e0d8) oxidecomputer/console#1948 * [cfda1636](https://github.com/oxidecomputer/console/commit/cfda1636) forgot to bump mockServiceWorker.js (no actual changes) * [4792105e](https://github.com/oxidecomputer/console/commit/4792105e) oxidecomputer/console#1943 * [a26db9ea](https://github.com/oxidecomputer/console/commit/a26db9ea) upgrade date-fns thru major version, update calls accordingly * [3dd635a6](https://github.com/oxidecomputer/console/commit/3dd635a6) oxidecomputer/console#1933 * [6c8f7a9c](https://github.com/oxidecomputer/console/commit/6c8f7a9c) upgrade filesize through a major version * [8f641b97](https://github.com/oxidecomputer/console/commit/8f641b97) remove blathering in readme about node 16, which is EOL * [e9d157a1](https://github.com/oxidecomputer/console/commit/e9d157a1) do ladle too—oh my god why does it have 3000 lines in the lockfile * [e1cdcc13](https://github.com/oxidecomputer/console/commit/e1cdcc13) oxidecomputer/console#1941 * [76877ffb](https://github.com/oxidecomputer/console/commit/76877ffb) oxidecomputer/console#1938 --- tools/console_version | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/console_version b/tools/console_version index 7f80546553..1b2cc62547 100644 --- a/tools/console_version +++ b/tools/console_version @@ -1,2 +1,2 @@ -COMMIT="e5a1f804faa913de3be5b4cddac2011247a99774" -SHA2="54ff1026062fc1a3f0de86aa558d051b8ad6248d458c1767b9e926f2606e75f5" +COMMIT="80f116734fdf8ef4f3b5e49ed39e49339460407c" +SHA2="458727fe747797860d02c9f75e62d308586c235d4708c549d5b70a40cce4d2d1" From d7db26d9c6f09fec3fec6991e4a9cd7f90128465 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Mon, 26 Feb 2024 11:54:31 -0800 Subject: [PATCH 043/157] trigger inventory collection after blueprint execution (#5130) --- .../src/app/background/blueprint_execution.rs | 11 +++- nexus/src/app/background/init.rs | 56 +++++++++++-------- 2 files changed, 42 insertions(+), 25 deletions(-) diff --git a/nexus/src/app/background/blueprint_execution.rs b/nexus/src/app/background/blueprint_execution.rs index 373f023288..4ba60ab566 100644 --- a/nexus/src/app/background/blueprint_execution.rs +++ b/nexus/src/app/background/blueprint_execution.rs @@ -20,6 +20,7 @@ pub struct BlueprintExecutor { datastore: Arc, rx_blueprint: watch::Receiver>>, nexus_label: String, + tx: watch::Sender, } impl BlueprintExecutor { @@ -30,7 +31,12 @@ impl BlueprintExecutor { >, nexus_label: String, ) -> BlueprintExecutor { - BlueprintExecutor { datastore, rx_blueprint, nexus_label } + let (tx, _) = watch::channel(0); + BlueprintExecutor { datastore, rx_blueprint, nexus_label, tx } + } + + pub fn watcher(&self) -> watch::Receiver { + self.tx.subscribe() } } @@ -71,6 +77,9 @@ impl BackgroundTask for BlueprintExecutor { ) .await; + // Trigger anybody waiting for this to finish. + self.tx.send_modify(|count| *count = *count + 1); + // Return the result as a `serde_json::Value` match result { Ok(()) => json!({}), diff --git a/nexus/src/app/background/init.rs b/nexus/src/app/background/init.rs index 846051a068..9ba30bab64 100644 --- a/nexus/src/app/background/init.rs +++ b/nexus/src/app/background/init.rs @@ -170,30 +170,6 @@ impl BackgroundTasks { ) }; - // Background task: inventory collector - let task_inventory_collection = { - let collector = inventory_collection::InventoryCollector::new( - datastore.clone(), - resolver, - &nexus_id.to_string(), - config.inventory.nkeep, - config.inventory.disable, - ); - let task = driver.register( - String::from("inventory_collection"), - String::from( - "collects hardware and software inventory data from the \ - whole system", - ), - config.inventory.period_secs, - Box::new(collector), - opctx.child(BTreeMap::new()), - vec![], - ); - - task - }; - // Background task: phantom disk detection let task_phantom_disks = { let detector = @@ -230,6 +206,7 @@ impl BackgroundTasks { rx_blueprint.clone(), nexus_id.to_string(), ); + let rx_blueprint_exec = blueprint_executor.watcher(); let task_blueprint_executor = driver.register( String::from("blueprint_executor"), String::from("Executes the target blueprint"), @@ -239,6 +216,37 @@ impl BackgroundTasks { vec![Box::new(rx_blueprint)], ); + // Background task: inventory collector + // + // This currently depends on the "output" of the blueprint executor in + // order to automatically trigger inventory collection whenever the + // blueprint executor runs. In the limit, this could become a problem + // because the blueprint executor might also depend indirectly on the + // inventory collector. In that case, we may need to do something more + // complicated. But for now, this works. + let task_inventory_collection = { + let collector = inventory_collection::InventoryCollector::new( + datastore.clone(), + resolver, + &nexus_id.to_string(), + config.inventory.nkeep, + config.inventory.disable, + ); + let task = driver.register( + String::from("inventory_collection"), + String::from( + "collects hardware and software inventory data from the \ + whole system", + ), + config.inventory.period_secs, + Box::new(collector), + opctx.child(BTreeMap::new()), + vec![Box::new(rx_blueprint_exec)], + ); + + task + }; + let task_service_zone_nat_tracker = { driver.register( "service_zone_nat_tracker".to_string(), From d514878f417a94247791bd5564fbaafa9b4170a0 Mon Sep 17 00:00:00 2001 From: Charlie Park Date: Mon, 26 Feb 2024 14:32:16 -0800 Subject: [PATCH 044/157] Update Floating IP field name from address to ip (#5144) Fixes #5046 Our API for Floating IPs currently has an `address` field, though in a few other places we use `ip`. This PR just makes the API more consistent across the API (and DB). --- nexus/db-queries/src/db/datastore/external_ip.rs | 2 +- nexus/test-utils/src/resource_helpers.rs | 4 ++-- nexus/tests/integration_tests/endpoints.rs | 2 +- nexus/tests/integration_tests/external_ips.rs | 8 ++++---- nexus/types/src/external_api/params.rs | 2 +- openapi/nexus.json | 8 ++++---- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 24439aa3a0..d15e1b7ca8 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -256,7 +256,7 @@ impl DataStore { let pool_id = pool.id(); - let data = if let Some(ip) = params.address { + let data = if let Some(ip) = params.ip { IncompleteExternalIp::for_floating_explicit( ip_id, &Name(params.identity.name), diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 764332c5bc..25e58d093d 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -267,7 +267,7 @@ pub async fn create_floating_ip( client: &ClientTestContext, fip_name: &str, project: &str, - address: Option, + ip: Option, parent_pool_name: Option<&str>, ) -> FloatingIp { object_create( @@ -278,7 +278,7 @@ pub async fn create_floating_ip( name: fip_name.parse().unwrap(), description: String::from("a floating ip"), }, - address, + ip, pool: parent_pool_name.map(|v| NameOrId::Name(v.parse().unwrap())), }, ) diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 81f2a02b31..c5c69df232 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -768,7 +768,7 @@ pub static DEMO_FLOAT_IP_CREATE: Lazy = name: DEMO_FLOAT_IP_NAME.clone(), description: String::from("a new IP pool"), }, - address: Some(std::net::Ipv4Addr::new(10, 0, 0, 141).into()), + ip: Some(std::net::Ipv4Addr::new(10, 0, 0, 141).into()), pool: None, }); diff --git a/nexus/tests/integration_tests/external_ips.rs b/nexus/tests/integration_tests/external_ips.rs index 57f813d505..ee59c6a034 100644 --- a/nexus/tests/integration_tests/external_ips.rs +++ b/nexus/tests/integration_tests/external_ips.rs @@ -189,7 +189,7 @@ async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { name: fip_name.parse().unwrap(), description: String::from("a floating ip"), }, - address: None, + ip: None, pool: Some(NameOrId::Name("other-pool".parse().unwrap())), }; let url = format!("/v1/floating-ips?project={}", project.identity.name); @@ -259,7 +259,7 @@ async fn test_floating_ip_create_fails_in_other_silo_pool( name: fip_name.parse().unwrap(), description: String::from("a floating ip"), }, - address: None, + ip: None, pool: Some(NameOrId::Name("external-silo-pool".parse().unwrap())), }; @@ -316,7 +316,7 @@ async fn test_floating_ip_create_ip_in_use( name: FIP_NAMES[1].parse().unwrap(), description: "another fip".into(), }, - address: Some(contested_ip), + ip: Some(contested_ip), pool: None, })) .expect_status(Some(StatusCode::BAD_REQUEST)), @@ -364,7 +364,7 @@ async fn test_floating_ip_create_name_in_use( name: contested_name.parse().unwrap(), description: "another fip".into(), }, - address: None, + ip: None, pool: None, })) .expect_status(Some(StatusCode::BAD_REQUEST)), diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 07eeb9b679..567b1ff4ad 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -883,7 +883,7 @@ pub struct FloatingIpCreate { /// An IP address to reserve for use as a floating IP. This field is /// optional: when not set, an address will be automatically chosen from /// `pool`. If set, then the IP must be available in the resolved `pool`. - pub address: Option, + pub ip: Option, /// The parent IP pool that a floating IP is pulled from. If unset, the /// default pool is selected. diff --git a/openapi/nexus.json b/openapi/nexus.json index cc8edec51d..b0aa84d67a 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -11864,15 +11864,15 @@ "description": "Parameters for creating a new floating IP address for instances.", "type": "object", "properties": { - "address": { + "description": { + "type": "string" + }, + "ip": { "nullable": true, "description": "An IP address to reserve for use as a floating IP. This field is optional: when not set, an address will be automatically chosen from `pool`. If set, then the IP must be available in the resolved `pool`.", "type": "string", "format": "ip" }, - "description": { - "type": "string" - }, "name": { "$ref": "#/components/schemas/Name" }, From 58f7129d09ba0bc99c213806aa6fa8c8965f2ea7 Mon Sep 17 00:00:00 2001 From: Charlie Park Date: Tue, 27 Feb 2024 11:24:01 -0800 Subject: [PATCH 045/157] Bump web console (#5148) Updating console for #5144 https://github.com/oxidecomputer/console/compare/80f11673...e991ec5c --- tools/console_version | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/console_version b/tools/console_version index 1b2cc62547..ee7f619b53 100644 --- a/tools/console_version +++ b/tools/console_version @@ -1,2 +1,2 @@ -COMMIT="80f116734fdf8ef4f3b5e49ed39e49339460407c" -SHA2="458727fe747797860d02c9f75e62d308586c235d4708c549d5b70a40cce4d2d1" +COMMIT="e991ec5cf148d83f135b6a692dca9b8df05acef0" +SHA2="18d53e4485e63d9e2273a889e70785648052e8a231df89f3bcbc2ed01a0eeeb1" From 2e4287b4656eb649db4dc8ad21915d5f673f52c6 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Tue, 27 Feb 2024 16:27:37 -0500 Subject: [PATCH 046/157] Blueprint execution: Add dataset records for Crucible zones (#5143) On each invocation, for every Crucible zone present in the blueprint (all of which have already successfully been sent to sled-agent by this point), we attempt to insert a `dataset` record for that zone, but only if a record does not already exist. The datasets themselves are created by sled-agent when we send the zone configs. Fixes #5118. --- Cargo.lock | 1 + nexus/blueprint-execution/Cargo.toml | 1 + nexus/blueprint-execution/src/datasets.rs | 315 +++++++++++++++++++ nexus/blueprint-execution/src/lib.rs | 13 +- nexus/db-model/src/dataset_kind.rs | 2 +- nexus/db-queries/src/db/collection_insert.rs | 21 ++ nexus/db-queries/src/db/datastore/dataset.rs | 265 +++++++++++++++- 7 files changed, 614 insertions(+), 4 deletions(-) create mode 100644 nexus/blueprint-execution/src/datasets.rs diff --git a/Cargo.lock b/Cargo.lock index 85b7e5a186..27b5bbd206 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4271,6 +4271,7 @@ dependencies = [ "dns-service-client", "futures", "httptest", + "illumos-utils", "internal-dns", "ipnet", "nexus-db-model", diff --git a/nexus/blueprint-execution/Cargo.toml b/nexus/blueprint-execution/Cargo.toml index 3284bda27e..164559b468 100644 --- a/nexus/blueprint-execution/Cargo.toml +++ b/nexus/blueprint-execution/Cargo.toml @@ -10,6 +10,7 @@ omicron-rpaths.workspace = true anyhow.workspace = true dns-service-client.workspace = true futures.workspace = true +illumos-utils.workspace = true internal-dns.workspace = true nexus-db-model.workspace = true nexus-db-queries.workspace = true diff --git a/nexus/blueprint-execution/src/datasets.rs b/nexus/blueprint-execution/src/datasets.rs new file mode 100644 index 0000000000..97d8324fdb --- /dev/null +++ b/nexus/blueprint-execution/src/datasets.rs @@ -0,0 +1,315 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Ensures dataset records required by a given blueprint + +use anyhow::Context; +use illumos_utils::zpool::ZpoolName; +use nexus_db_model::Dataset; +use nexus_db_model::DatasetKind; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::DataStore; +use nexus_types::deployment::OmicronZoneConfig; +use nexus_types::deployment::OmicronZoneType; +use nexus_types::identity::Asset; +use slog::info; +use slog::warn; +use slog_error_chain::InlineErrorChain; +use std::collections::BTreeSet; +use std::net::SocketAddrV6; + +/// For each crucible zone in `blueprint`, ensure that a corresponding dataset +/// record exists in `datastore` +/// +/// Does not modify any existing dataset records. Returns the number of datasets +/// inserted. +pub(crate) async fn ensure_crucible_dataset_records_exist( + opctx: &OpContext, + datastore: &DataStore, + all_omicron_zones: impl Iterator, +) -> anyhow::Result { + // Before attempting to insert any datasets, first query for any existing + // dataset records so we can filter them out. This looks like a typical + // TOCTOU issue, but it is purely a performance optimization. We expect + // almost all executions of this function to do nothing: new crucible + // datasets are created very rarely relative to how frequently blueprint + // realization happens. We could remove this check and filter and instead + // run the below "insert if not exists" query on every crucible zone, and + // the behavior would still be correct. However, that would issue far more + // queries than necessary in the very common case of "we don't need to do + // anything at all". + let mut crucible_datasets = datastore + .dataset_list_all_batched(opctx, Some(DatasetKind::Crucible)) + .await + .context("failed to list all datasets")? + .into_iter() + .map(|dataset| dataset.id()) + .collect::>(); + + let mut num_inserted = 0; + let mut num_already_exist = 0; + + for zone in all_omicron_zones { + let OmicronZoneType::Crucible { address, dataset } = &zone.zone_type + else { + continue; + }; + + let id = zone.id; + + // If already present in the datastore, move on. + if crucible_datasets.remove(&id) { + num_already_exist += 1; + continue; + } + + // Map progenitor client strings into the types we need. We never + // expect these to fail. + let addr: SocketAddrV6 = match address.parse() { + Ok(addr) => addr, + Err(err) => { + warn!( + opctx.log, "failed to parse crucible zone address"; + "address" => address, + "err" => InlineErrorChain::new(&err), + ); + continue; + } + }; + let zpool_name: ZpoolName = match dataset.pool_name.parse() { + Ok(name) => name, + Err(err) => { + warn!( + opctx.log, "failed to parse crucible zone pool name"; + "pool_name" => &*dataset.pool_name, + "err" => err, + ); + continue; + } + }; + + let pool_id = zpool_name.id(); + let dataset = Dataset::new(id, pool_id, addr, DatasetKind::Crucible); + let maybe_inserted = datastore + .dataset_insert_if_not_exists(dataset) + .await + .with_context(|| { + format!("failed to insert dataset record for dataset {id}") + })?; + + // If we succeeded in inserting, log it; if `maybe_dataset` is `None`, + // we must have lost the TOCTOU race described above, and another Nexus + // must have inserted this dataset before we could. + if maybe_inserted.is_some() { + info!( + opctx.log, + "inserted new dataset for crucible zone"; + "id" => %id, + ); + num_inserted += 1; + } else { + num_already_exist += 1; + } + } + + // We don't currently support removing datasets, so this would be + // surprising: the database contains dataset records that are no longer in + // our blueprint. We can't do anything about this, so just warn. + if !crucible_datasets.is_empty() { + warn!( + opctx.log, + "database contains {} unexpected crucible datasets", + crucible_datasets.len(); + "dataset_ids" => ?crucible_datasets, + ); + } + + info!( + opctx.log, + "ensured all crucible zones have dataset records"; + "num_inserted" => num_inserted, + "num_already_existed" => num_already_exist, + ); + + Ok(num_inserted) +} + +#[cfg(test)] +mod tests { + use super::*; + use nexus_db_model::SledBaseboard; + use nexus_db_model::SledSystemHardware; + use nexus_db_model::SledUpdate; + use nexus_db_model::Zpool; + use nexus_test_utils_macros::nexus_test; + use sled_agent_client::types::OmicronZoneDataset; + use uuid::Uuid; + + type ControlPlaneTestContext = + nexus_test_utils::ControlPlaneTestContext; + + #[nexus_test] + async fn test_ensure_crucible_dataset_records_exist( + cptestctx: &ControlPlaneTestContext, + ) { + // Set up. + let nexus = &cptestctx.server.apictx().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + let opctx = &opctx; + + // Use the standard representative inventory collection. + let representative = nexus_inventory::examples::representative(); + let collection = representative.builder.build(); + + // Record the sleds and zpools contained in this collection. + let rack_id = Uuid::new_v4(); + for (&sled_id, config) in &collection.omicron_zones { + let sled = SledUpdate::new( + sled_id, + "[::1]:0".parse().unwrap(), + SledBaseboard { + serial_number: format!("test-{sled_id}"), + part_number: "test-sled".to_string(), + revision: 0, + }, + SledSystemHardware { + is_scrimlet: false, + usable_hardware_threads: 128, + usable_physical_ram: (64 << 30).try_into().unwrap(), + reservoir_size: (16 << 30).try_into().unwrap(), + }, + rack_id, + ); + datastore + .sled_upsert(sled) + .await + .expect("failed to upsert sled") + .unwrap(); + + for zone in &config.zones.zones { + let OmicronZoneType::Crucible { dataset, .. } = &zone.zone_type + else { + continue; + }; + let zpool_name: ZpoolName = + dataset.pool_name.parse().expect("invalid zpool name"); + let zpool = Zpool::new( + zpool_name.id(), + sled_id, + Uuid::new_v4(), // physical_disk_id + (1 << 30).try_into().unwrap(), // total_size + ); + datastore + .zpool_upsert(zpool) + .await + .expect("failed to upsert zpool"); + } + } + + // How many crucible zones are there? + let ncrucible_zones = collection + .all_omicron_zones() + .filter(|z| matches!(z.zone_type, OmicronZoneType::Crucible { .. })) + .count(); + + // Prior to ensuring datasets exist, there should be none. + assert_eq!( + datastore + .dataset_list_all_batched(opctx, Some(DatasetKind::Crucible)) + .await + .unwrap() + .len(), + 0 + ); + let ndatasets_inserted = ensure_crucible_dataset_records_exist( + opctx, + datastore, + collection.all_omicron_zones(), + ) + .await + .expect("failed to ensure crucible datasets"); + + // We should have inserted a dataset for each crucible zone. + assert_eq!(ncrucible_zones, ndatasets_inserted); + assert_eq!( + datastore + .dataset_list_all_batched(opctx, Some(DatasetKind::Crucible)) + .await + .unwrap() + .len(), + ncrucible_zones, + ); + + // Ensuring the same crucible datasets again should insert no new + // records. + let ndatasets_inserted = ensure_crucible_dataset_records_exist( + opctx, + datastore, + collection.all_omicron_zones(), + ) + .await + .expect("failed to ensure crucible datasets"); + assert_eq!(0, ndatasets_inserted); + assert_eq!( + datastore + .dataset_list_all_batched(opctx, Some(DatasetKind::Crucible)) + .await + .unwrap() + .len(), + ncrucible_zones, + ); + + // Create another zpool on one of the sleds, so we can add a new + // crucible zone that uses it. + let new_zpool_id = Uuid::new_v4(); + for &sled_id in collection.omicron_zones.keys().take(1) { + let zpool = Zpool::new( + new_zpool_id, + sled_id, + Uuid::new_v4(), // physical_disk_id + (1 << 30).try_into().unwrap(), // total_size + ); + datastore + .zpool_upsert(zpool) + .await + .expect("failed to upsert zpool"); + } + + // Call `ensure_crucible_dataset_records_exist` again, adding a new + // crucible zone. It should insert only this new zone. + let new_zone = OmicronZoneConfig { + id: Uuid::new_v4(), + underlay_address: "::1".parse().unwrap(), + zone_type: OmicronZoneType::Crucible { + address: "[::1]:0".to_string(), + dataset: OmicronZoneDataset { + pool_name: ZpoolName::new_external(new_zpool_id) + .to_string() + .parse() + .unwrap(), + }, + }, + }; + let ndatasets_inserted = ensure_crucible_dataset_records_exist( + opctx, + datastore, + collection.all_omicron_zones().chain(std::iter::once(&new_zone)), + ) + .await + .expect("failed to ensure crucible datasets"); + assert_eq!(ndatasets_inserted, 1); + assert_eq!( + datastore + .dataset_list_all_batched(opctx, Some(DatasetKind::Crucible)) + .await + .unwrap() + .len(), + ncrucible_zones + 1, + ); + } +} diff --git a/nexus/blueprint-execution/src/lib.rs b/nexus/blueprint-execution/src/lib.rs index d6f5f8fc31..531c4f57a8 100644 --- a/nexus/blueprint-execution/src/lib.rs +++ b/nexus/blueprint-execution/src/lib.rs @@ -19,6 +19,7 @@ use std::collections::BTreeMap; use std::net::SocketAddrV6; use uuid::Uuid; +mod datasets; mod dns; mod omicron_zones; mod resource_allocation; @@ -89,6 +90,14 @@ where omicron_zones::deploy_zones(&opctx, &sleds_by_id, &blueprint.omicron_zones) .await?; + datasets::ensure_crucible_dataset_records_exist( + &opctx, + datastore, + blueprint.all_omicron_zones().map(|(_sled_id, zone)| zone), + ) + .await + .map_err(|err| vec![err])?; + dns::deploy_dns( &opctx, datastore, @@ -97,5 +106,7 @@ where &sleds_by_id, ) .await - .map_err(|e| vec![anyhow!("{}", InlineErrorChain::new(&e))]) + .map_err(|e| vec![anyhow!("{}", InlineErrorChain::new(&e))])?; + + Ok(()) } diff --git a/nexus/db-model/src/dataset_kind.rs b/nexus/db-model/src/dataset_kind.rs index 00317592e8..86495b9d61 100644 --- a/nexus/db-model/src/dataset_kind.rs +++ b/nexus/db-model/src/dataset_kind.rs @@ -11,7 +11,7 @@ impl_enum_type!( #[diesel(postgres_type(name = "dataset_kind", schema = "public"))] pub struct DatasetKindEnum; - #[derive(Clone, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq)] + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq)] #[diesel(sql_type = DatasetKindEnum)] pub enum DatasetKind; diff --git a/nexus/db-queries/src/db/collection_insert.rs b/nexus/db-queries/src/db/collection_insert.rs index b295f0574d..ef2a4a4d48 100644 --- a/nexus/db-queries/src/db/collection_insert.rs +++ b/nexus/db-queries/src/db/collection_insert.rs @@ -202,6 +202,27 @@ where .map_err(|e| Self::translate_async_error(e)) } + /// Issues the CTE asynchronously and parses the result. + /// + /// The four outcomes are: + /// - Ok(Some(new row)) + /// - Ok(None) + /// - Error(collection not found) + /// - Error(other diesel error) + pub async fn insert_and_get_optional_result_async( + self, + conn: &async_bb8_diesel::Connection, + ) -> AsyncInsertIntoCollectionResult> + where + // We require this bound to ensure that "Self" is runnable as query. + Self: query_methods::LoadQuery<'static, DbConnection, ResourceType>, + { + self.get_result_async::(conn) + .await + .optional() + .map_err(|e| Self::translate_async_error(e)) + } + /// Issues the CTE asynchronously and parses the result. /// /// The three outcomes are: diff --git a/nexus/db-queries/src/db/datastore/dataset.rs b/nexus/db-queries/src/db/datastore/dataset.rs index 0b26789e8f..3c1fd0afb1 100644 --- a/nexus/db-queries/src/db/datastore/dataset.rs +++ b/nexus/db-queries/src/db/datastore/dataset.rs @@ -5,6 +5,9 @@ //! [`DataStore`] methods on [`Dataset`]s. use super::DataStore; +use super::SQL_BATCH_SIZE; +use crate::authz; +use crate::context::OpContext; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; @@ -13,12 +16,17 @@ use crate::db::error::ErrorHandler; use crate::db::identity::Asset; use crate::db::model::Dataset; use crate::db::model::Zpool; +use crate::db::pagination::paginated; +use crate::db::pagination::Paginator; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; use diesel::upsert::excluded; +use nexus_db_model::DatasetKind; use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; @@ -45,11 +53,12 @@ impl DataStore { ) -> CreateResult { use db::schema::dataset::dsl; + let dataset_id = dataset.id(); let zpool_id = dataset.pool_id; Zpool::insert_resource( zpool_id, diesel::insert_into(dsl::dataset) - .values(dataset.clone()) + .values(dataset) .on_conflict(dsl::id) .do_update() .set(( @@ -73,9 +82,261 @@ impl DataStore { e, ErrorHandler::Conflict( ResourceType::Dataset, - &dataset.id().to_string(), + &dataset_id.to_string(), ), ), }) } + + /// Stores a new dataset in the database, but only if a dataset with the + /// given `id` does not already exist + /// + /// Does not update existing rows. If a dataset with the given ID already + /// exists, returns `Ok(None)`. + pub async fn dataset_insert_if_not_exists( + &self, + dataset: Dataset, + ) -> CreateResult> { + use db::schema::dataset::dsl; + + let zpool_id = dataset.pool_id; + Zpool::insert_resource( + zpool_id, + diesel::insert_into(dsl::dataset) + .values(dataset) + .on_conflict(dsl::id) + .do_nothing(), + ) + .insert_and_get_optional_result_async( + &*self.pool_connection_unauthorized().await?, + ) + .await + .map_err(|e| match e { + AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { + type_name: ResourceType::Zpool, + lookup_type: LookupType::ById(zpool_id), + }, + AsyncInsertError::DatabaseError(e) => { + public_error_from_diesel(e, ErrorHandler::Server) + } + }) + } + + /// List one page of datasets + /// + /// If `filter_kind` is `Some(value)`, only datasets with a `kind` matching + /// `value` will be returned. If `filter_kind` is `None`, all datasets will + /// be returned. + async fn dataset_list( + &self, + opctx: &OpContext, + filter_kind: Option, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + use db::schema::dataset::dsl; + + let mut query = paginated(dsl::dataset, dsl::id, pagparams) + .filter(dsl::time_deleted.is_null()); + + if let Some(kind) = filter_kind { + query = query.filter(dsl::kind.eq(kind)); + } + + query + .select(Dataset::as_select()) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// List all datasets, making as many queries as needed to get them all + /// + /// If `filter_kind` is `Some(value)`, only datasets with a `kind` matching + /// `value` will be returned. If `filter_kind` is `None`, all datasets will + /// be returned. + /// + /// This should generally not be used in API handlers or other + /// latency-sensitive contexts, but it can make sense in saga actions or + /// background tasks. + pub async fn dataset_list_all_batched( + &self, + opctx: &OpContext, + filter_kind: Option, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + opctx.check_complex_operations_allowed()?; + + let mut all_datasets = Vec::new(); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + let batch = self + .dataset_list(opctx, filter_kind, &p.current_pagparams()) + .await?; + paginator = + p.found_batch(&batch, &|d: &nexus_db_model::Dataset| d.id()); + all_datasets.extend(batch); + } + + Ok(all_datasets) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::db::datastore::test_utils::datastore_test; + use nexus_db_model::SledBaseboard; + use nexus_db_model::SledSystemHardware; + use nexus_db_model::SledUpdate; + use nexus_test_utils::db::test_setup_database; + use omicron_test_utils::dev; + + #[tokio::test] + async fn test_insert_if_not_exists() { + let logctx = dev::test_setup_log("inventory_insert"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + let opctx = &opctx; + + // There should be no datasets initially. + assert_eq!( + datastore.dataset_list_all_batched(opctx, None).await.unwrap(), + [] + ); + + // Create a fake sled that holds our fake zpool. + let sled_id = Uuid::new_v4(); + let sled = SledUpdate::new( + sled_id, + "[::1]:0".parse().unwrap(), + SledBaseboard { + serial_number: "test-sn".to_string(), + part_number: "test-pn".to_string(), + revision: 0, + }, + SledSystemHardware { + is_scrimlet: false, + usable_hardware_threads: 128, + usable_physical_ram: (64 << 30).try_into().unwrap(), + reservoir_size: (16 << 30).try_into().unwrap(), + }, + Uuid::new_v4(), + ); + datastore + .sled_upsert(sled) + .await + .expect("failed to upsert sled") + .unwrap(); + + // Create a fake zpool that backs our fake datasets. + let zpool_id = Uuid::new_v4(); + let zpool = Zpool::new( + zpool_id, + sled_id, + Uuid::new_v4(), + (1 << 30).try_into().unwrap(), + ); + datastore.zpool_upsert(zpool).await.expect("failed to upsert zpool"); + + // Inserting a new dataset should succeed. + let dataset1 = datastore + .dataset_insert_if_not_exists(Dataset::new( + Uuid::new_v4(), + zpool_id, + "[::1]:0".parse().unwrap(), + DatasetKind::Crucible, + )) + .await + .expect("failed to insert dataset") + .expect("insert found unexpected existing dataset"); + let mut expected_datasets = vec![dataset1.clone()]; + assert_eq!( + datastore.dataset_list_all_batched(opctx, None).await.unwrap(), + expected_datasets, + ); + assert_eq!( + datastore + .dataset_list_all_batched(opctx, Some(DatasetKind::Crucible)) + .await + .unwrap(), + expected_datasets, + ); + assert_eq!( + datastore + .dataset_list_all_batched(opctx, Some(DatasetKind::Cockroach)) + .await + .unwrap(), + [], + ); + + // Attempting to insert another dataset with the same ID should succeed + // without updating the existing record. We'll check this by passing a + // different socket address and kind. + let insert_again_result = datastore + .dataset_insert_if_not_exists(Dataset::new( + dataset1.id(), + zpool_id, + "[::1]:12345".parse().unwrap(), + DatasetKind::Cockroach, + )) + .await + .expect("failed to do-nothing insert dataset"); + assert_eq!(insert_again_result, None); + assert_eq!( + datastore.dataset_list_all_batched(opctx, None).await.unwrap(), + expected_datasets, + ); + + // We can can also upsert a different dataset... + let dataset2 = datastore + .dataset_upsert(Dataset::new( + Uuid::new_v4(), + zpool_id, + "[::1]:0".parse().unwrap(), + DatasetKind::Cockroach, + )) + .await + .expect("failed to upsert dataset"); + expected_datasets.push(dataset2.clone()); + expected_datasets.sort_by_key(|d| d.id()); + assert_eq!( + datastore.dataset_list_all_batched(opctx, None).await.unwrap(), + expected_datasets, + ); + assert_eq!( + datastore + .dataset_list_all_batched(opctx, Some(DatasetKind::Crucible)) + .await + .unwrap(), + [dataset1.clone()], + ); + assert_eq!( + datastore + .dataset_list_all_batched(opctx, Some(DatasetKind::Cockroach)) + .await + .unwrap(), + [dataset2.clone()], + ); + + // ... and trying to `insert_if_not_exists` should similarly return + // `None`. + let insert_again_result = datastore + .dataset_insert_if_not_exists(Dataset::new( + dataset1.id(), + zpool_id, + "[::1]:12345".parse().unwrap(), + DatasetKind::Cockroach, + )) + .await + .expect("failed to do-nothing insert dataset"); + assert_eq!(insert_again_result, None); + assert_eq!( + datastore.dataset_list_all_batched(opctx, None).await.unwrap(), + expected_datasets, + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } } From 0154193c5cee57e29e6d8f8f2e36610e3ea686d8 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 27 Feb 2024 22:29:42 -0800 Subject: [PATCH 047/157] chore(deps): update rust crate dyn-clone to 1.0.17 (#5141) --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 27b5bbd206..b057b84f6a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2042,9 +2042,9 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" [[package]] name = "ecdsa" diff --git a/Cargo.toml b/Cargo.toml index db37547ea0..ec2c345edf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -200,7 +200,7 @@ dns-server = { path = "dns-server" } dns-service-client = { path = "clients/dns-service-client" } dpd-client = { path = "clients/dpd-client" } dropshot = { git = "https://github.com/oxidecomputer/dropshot", branch = "main", features = [ "usdt-probes" ] } -dyn-clone = "1.0.16" +dyn-clone = "1.0.17" either = "1.10.0" expectorate = "1.1.0" fatfs = "0.3.6" From 0761adaef664c9c0f86726ca308ad91d36a0f53d Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Wed, 28 Feb 2024 13:30:27 +0000 Subject: [PATCH 048/157] chore(deps): update taiki-e/install-action digest to 1d776b1 (#5146) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`4ce8785` -> `1d776b1`](https://togithub.com/taiki-e/install-action/compare/4ce8785...1d776b1) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. â™» **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index 10c1c04003..0f942e7d76 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@4ce8785db2a8a56c9ede16f705c2c49c5c61669c # v2 + uses: taiki-e/install-action@1d776b18af134fca43744080130085d256938196 # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From ceeceee341967e776e349b0efffa55d30ff55c7e Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Thu, 29 Feb 2024 03:38:27 +0000 Subject: [PATCH 049/157] chore(deps): update rust to v1.76.0 (#5097) Co-authored-by: Rain --- Cargo.lock | 300 +++++++++++++------------------- Cargo.toml | 7 +- dev-tools/oxlog/src/lib.rs | 6 +- dev-tools/xtask/src/main.rs | 20 ++- nexus/src/app/background/mod.rs | 2 - nexus/src/cidata.rs | 6 +- rust-toolchain.toml | 2 +- sled-hardware/Cargo.toml | 1 + sled-hardware/src/cleanup.rs | 13 +- wicket/src/state/mod.rs | 7 +- wicket/src/ui/widgets/mod.rs | 2 +- workspace-hack/Cargo.toml | 14 +- 12 files changed, 169 insertions(+), 211 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b057b84f6a..2c1e4acd50 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -174,7 +174,7 @@ dependencies = [ "omicron-workspace-hack", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -275,7 +275,7 @@ checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -297,7 +297,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -308,7 +308,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -361,7 +361,7 @@ dependencies = [ "quote", "serde", "serde_tokenstream 0.2.0", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -463,20 +463,20 @@ dependencies = [ [[package]] name = "bhyve_api" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=c7cdaf1875d259e29ca50a14b77b0bfd9dfe443d#c7cdaf1875d259e29ca50a14b77b0bfd9dfe443d" +source = "git+https://github.com/oxidecomputer/propolis?rev=488a70c644e332dae98447292a95e45074c8107c#488a70c644e332dae98447292a95e45074c8107c" dependencies = [ "bhyve_api_sys", "libc", - "strum 0.25.0", + "strum 0.26.1", ] [[package]] name = "bhyve_api_sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=c7cdaf1875d259e29ca50a14b77b0bfd9dfe443d#c7cdaf1875d259e29ca50a14b77b0bfd9dfe443d" +source = "git+https://github.com/oxidecomputer/propolis?rev=488a70c644e332dae98447292a95e45074c8107c#488a70c644e332dae98447292a95e45074c8107c" dependencies = [ "libc", - "strum 0.25.0", + "strum 0.26.1", ] [[package]] @@ -498,7 +498,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.48", + "syn 2.0.51", "which", ] @@ -655,7 +655,7 @@ dependencies = [ "ipnetwork", "omicron-common", "omicron-workspace-hack", - "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", + "progenitor", "regress", "reqwest", "schemars", @@ -1018,7 +1018,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -1315,7 +1315,7 @@ dependencies = [ "chrono", "crucible-workspace-hack", "percent-encoding", - "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", + "progenitor", "reqwest", "schemars", "serde", @@ -1331,7 +1331,7 @@ dependencies = [ "chrono", "crucible-workspace-hack", "percent-encoding", - "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", + "progenitor", "reqwest", "schemars", "serde", @@ -1452,7 +1452,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -1500,7 +1500,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -1522,7 +1522,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core 0.20.3", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -1556,7 +1556,7 @@ dependencies = [ "quote", "serde", "serde_tokenstream 0.2.0", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -1568,8 +1568,8 @@ dependencies = [ "omicron-common", "omicron-workspace-hack", "omicron-zone-package", - "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", - "progenitor-client 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", + "progenitor", + "progenitor-client", "quote", "reqwest", "rustfmt-wrapper", @@ -1608,7 +1608,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -1641,7 +1641,7 @@ checksum = "5fe87ce4529967e0ba1dcf8450bab64d97dfd5010a6256187ffe2e43e6f0e049" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -1661,7 +1661,7 @@ checksum = "62d671cc41a825ebabc75757b62d3d168c577f9149b2d49ece1dad1f72119d25" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -1748,7 +1748,7 @@ dependencies = [ "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -1757,7 +1757,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" dependencies = [ - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -1891,7 +1891,7 @@ dependencies = [ "chrono", "http 0.2.11", "omicron-workspace-hack", - "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", + "progenitor", "reqwest", "schemars", "serde", @@ -1945,8 +1945,8 @@ dependencies = [ "ipnetwork", "omicron-workspace-hack", "omicron-zone-package", - "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", - "progenitor-client 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", + "progenitor", + "progenitor-client", "quote", "rand 0.8.5", "regress", @@ -2015,7 +2015,7 @@ dependencies = [ "quote", "serde", "serde_tokenstream 0.2.0", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -2388,7 +2388,7 @@ checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -2505,7 +2505,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -2584,7 +2584,7 @@ dependencies = [ "chrono", "gateway-messages", "omicron-workspace-hack", - "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", + "progenitor", "rand 0.8.5", "reqwest", "schemars", @@ -3506,7 +3506,7 @@ version = "0.1.0" dependencies = [ "installinator-common", "omicron-workspace-hack", - "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", + "progenitor", "regress", "reqwest", "schemars", @@ -3586,7 +3586,7 @@ dependencies = [ "omicron-common", "omicron-test-utils", "omicron-workspace-hack", - "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", + "progenitor", "reqwest", "serde", "serde_json", @@ -3739,7 +3739,7 @@ version = "0.1.0" source = "git+https://github.com/oxidecomputer/opte?rev=1d29ef60a18179babfb44f0f7a3c2fe71034a2c1#1d29ef60a18179babfb44f0f7a3c2fe71034a2c1" dependencies = [ "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -4083,8 +4083,8 @@ dependencies = [ "omicron-common", "omicron-workspace-hack", "omicron-zone-package", - "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", - "progenitor-client 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", + "progenitor", + "progenitor-client", "quote", "reqwest", "rustfmt-wrapper", @@ -4164,7 +4164,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -4306,7 +4306,7 @@ dependencies = [ "omicron-common", "omicron-passwords", "omicron-workspace-hack", - "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", + "progenitor", "regress", "reqwest", "schemars", @@ -4498,7 +4498,7 @@ dependencies = [ "omicron-workspace-hack", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -4560,7 +4560,7 @@ version = "0.1.0" dependencies = [ "omicron-workspace-hack", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -4714,7 +4714,7 @@ checksum = "9e6a0fd4f737c707bd9086cc16c925f294943eb62eb71499e9fd4cf71f8b9f4e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -4889,7 +4889,7 @@ dependencies = [ "omicron-workspace-hack", "once_cell", "parse-display", - "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", + "progenitor", "proptest", "rand 0.8.5", "regress", @@ -5064,7 +5064,7 @@ dependencies = [ "petgraph", "pq-sys", "pretty_assertions", - "progenitor-client 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", + "progenitor-client", "propolis-client", "rand 0.8.5", "rcgen", @@ -5439,10 +5439,9 @@ dependencies = [ "socket2 0.5.5", "spin 0.9.8", "string_cache", - "strum 0.25.0", "subtle", "syn 1.0.109", - "syn 2.0.48", + "syn 2.0.51", "time", "time-macros", "tokio", @@ -5562,7 +5561,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -5673,7 +5672,7 @@ dependencies = [ "http 0.2.11", "hyper 0.14.27", "omicron-workspace-hack", - "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", + "progenitor", "rand 0.8.5", "regress", "reqwest", @@ -5729,7 +5728,7 @@ dependencies = [ "futures", "omicron-common", "omicron-workspace-hack", - "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", + "progenitor", "reqwest", "serde", "slog", @@ -5847,7 +5846,7 @@ dependencies = [ "omicron-workspace-hack", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -6012,7 +6011,7 @@ dependencies = [ "regex", "regex-syntax 0.8.2", "structmeta 0.3.0", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -6159,7 +6158,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -6229,7 +6228,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -6484,7 +6483,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" dependencies = [ "proc-macro2", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -6542,43 +6541,18 @@ dependencies = [ [[package]] name = "progenitor" version = "0.5.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#bc0bb4b0fb40084f189eb1a8807b17fbd0ce0b64" -dependencies = [ - "progenitor-client 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", - "progenitor-impl 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", - "progenitor-macro 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", - "serde_json", -] - -[[package]] -name = "progenitor" -version = "0.5.0" -source = "git+https://github.com/oxidecomputer/progenitor#bc0bb4b0fb40084f189eb1a8807b17fbd0ce0b64" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#08bbafc251d3d6828ca5921d1658c3d12d64fe7c" dependencies = [ - "progenitor-client 0.5.0 (git+https://github.com/oxidecomputer/progenitor)", - "progenitor-impl 0.5.0 (git+https://github.com/oxidecomputer/progenitor)", - "progenitor-macro 0.5.0 (git+https://github.com/oxidecomputer/progenitor)", + "progenitor-client", + "progenitor-impl", + "progenitor-macro", "serde_json", ] [[package]] name = "progenitor-client" version = "0.5.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#bc0bb4b0fb40084f189eb1a8807b17fbd0ce0b64" -dependencies = [ - "bytes", - "futures-core", - "percent-encoding", - "reqwest", - "serde", - "serde_json", - "serde_urlencoded", -] - -[[package]] -name = "progenitor-client" -version = "0.5.0" -source = "git+https://github.com/oxidecomputer/progenitor#bc0bb4b0fb40084f189eb1a8807b17fbd0ce0b64" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#08bbafc251d3d6828ca5921d1658c3d12d64fe7c" dependencies = [ "bytes", "futures-core", @@ -6592,7 +6566,7 @@ dependencies = [ [[package]] name = "progenitor-impl" version = "0.5.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#bc0bb4b0fb40084f189eb1a8807b17fbd0ce0b64" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#08bbafc251d3d6828ca5921d1658c3d12d64fe7c" dependencies = [ "getopts", "heck 0.4.1", @@ -6605,77 +6579,38 @@ dependencies = [ "schemars", "serde", "serde_json", - "syn 2.0.48", + "syn 2.0.51", "thiserror", "typify", "unicode-ident", ] -[[package]] -name = "progenitor-impl" -version = "0.5.0" -source = "git+https://github.com/oxidecomputer/progenitor#bc0bb4b0fb40084f189eb1a8807b17fbd0ce0b64" -dependencies = [ - "getopts", - "heck 0.4.1", - "http 0.2.11", - "indexmap 2.2.3", - "openapiv3", - "proc-macro2", - "quote", - "regex", - "schemars", - "serde", - "serde_json", - "syn 2.0.48", - "thiserror", - "typify", - "unicode-ident", -] - -[[package]] -name = "progenitor-macro" -version = "0.5.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#bc0bb4b0fb40084f189eb1a8807b17fbd0ce0b64" -dependencies = [ - "openapiv3", - "proc-macro2", - "progenitor-impl 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", - "quote", - "schemars", - "serde", - "serde_json", - "serde_tokenstream 0.2.0", - "serde_yaml", - "syn 2.0.48", -] - [[package]] name = "progenitor-macro" version = "0.5.0" -source = "git+https://github.com/oxidecomputer/progenitor#bc0bb4b0fb40084f189eb1a8807b17fbd0ce0b64" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#08bbafc251d3d6828ca5921d1658c3d12d64fe7c" dependencies = [ "openapiv3", "proc-macro2", - "progenitor-impl 0.5.0 (git+https://github.com/oxidecomputer/progenitor)", + "progenitor-impl", "quote", "schemars", "serde", "serde_json", "serde_tokenstream 0.2.0", "serde_yaml", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] name = "propolis-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=c7cdaf1875d259e29ca50a14b77b0bfd9dfe443d#c7cdaf1875d259e29ca50a14b77b0bfd9dfe443d" +source = "git+https://github.com/oxidecomputer/propolis?rev=488a70c644e332dae98447292a95e45074c8107c#488a70c644e332dae98447292a95e45074c8107c" dependencies = [ "async-trait", "base64", "futures", - "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor)", + "progenitor", "rand 0.8.5", "reqwest", "schemars", @@ -6691,7 +6626,7 @@ dependencies = [ [[package]] name = "propolis-mock-server" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=c7cdaf1875d259e29ca50a14b77b0bfd9dfe443d#c7cdaf1875d259e29ca50a14b77b0bfd9dfe443d" +source = "git+https://github.com/oxidecomputer/propolis?rev=488a70c644e332dae98447292a95e45074c8107c#488a70c644e332dae98447292a95e45074c8107c" dependencies = [ "anyhow", "atty", @@ -6700,7 +6635,7 @@ dependencies = [ "dropshot", "futures", "hyper 0.14.27", - "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor)", + "progenitor", "propolis_types", "rand 0.8.5", "reqwest", @@ -6721,7 +6656,7 @@ dependencies = [ [[package]] name = "propolis_types" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=c7cdaf1875d259e29ca50a14b77b0bfd9dfe443d#c7cdaf1875d259e29ca50a14b77b0bfd9dfe443d" +source = "git+https://github.com/oxidecomputer/propolis?rev=488a70c644e332dae98447292a95e45074c8107c#488a70c644e332dae98447292a95e45074c8107c" dependencies = [ "schemars", "serde", @@ -7043,7 +6978,7 @@ checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -7120,9 +7055,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.22" +version = "0.11.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" +checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251" dependencies = [ "base64", "bytes", @@ -7148,6 +7083,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", + "sync_wrapper", "system-configuration", "tokio", "tokio-native-tls", @@ -7285,7 +7221,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.0", - "syn 2.0.48", + "syn 2.0.51", "unicode-ident", ] @@ -7714,7 +7650,7 @@ checksum = "7f81c2fde025af7e69b1d1420531c8a8811ca898919db177141a85313b1cb932" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -7808,9 +7744,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] @@ -7846,13 +7782,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -7913,7 +7849,7 @@ checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -7945,7 +7881,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -7987,7 +7923,7 @@ dependencies = [ "darling 0.20.3", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -8166,7 +8102,7 @@ dependencies = [ "ipnetwork", "omicron-common", "omicron-workspace-hack", - "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", + "progenitor", "regress", "reqwest", "schemars", @@ -8196,6 +8132,7 @@ dependencies = [ "schemars", "serde", "slog", + "slog-error-chain", "thiserror", "tofino", "tokio", @@ -8303,7 +8240,7 @@ source = "git+https://github.com/oxidecomputer/slog-error-chain?branch=main#15f6 dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -8558,7 +8495,7 @@ checksum = "01b2e185515564f15375f593fb966b5718bc624ba77fe49fa4616ad619690554" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -8665,7 +8602,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive 0.2.0", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -8677,7 +8614,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive 0.3.0", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -8688,7 +8625,7 @@ checksum = "a60bcaff7397072dca0017d1db428e30d5002e00b6847703e2e42005c95fbe00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -8699,7 +8636,7 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -8731,9 +8668,6 @@ name = "strum" version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" -dependencies = [ - "strum_macros 0.25.2", -] [[package]] name = "strum" @@ -8767,7 +8701,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -8780,7 +8714,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -8827,15 +8761,21 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.48" +version = "2.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +checksum = "6ab617d94515e94ae53b8406c628598680aa0c9587474ecbe58188f7b345d66c" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + [[package]] name = "synstructure" version = "0.12.6" @@ -9010,7 +8950,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta 0.2.0", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -9050,7 +8990,7 @@ checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -9237,7 +9177,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -9504,7 +9444,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -9731,7 +9671,7 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "typify" version = "0.0.15" -source = "git+https://github.com/oxidecomputer/typify#ce009d6f83b620cbd0e3acdd9b9ea071018471d8" +source = "git+https://github.com/oxidecomputer/typify#131fe0ef3722d3034a61a8ab2994c29f9c978903" dependencies = [ "typify-impl", "typify-macro", @@ -9740,7 +9680,7 @@ dependencies = [ [[package]] name = "typify-impl" version = "0.0.15" -source = "git+https://github.com/oxidecomputer/typify#ce009d6f83b620cbd0e3acdd9b9ea071018471d8" +source = "git+https://github.com/oxidecomputer/typify#131fe0ef3722d3034a61a8ab2994c29f9c978903" dependencies = [ "heck 0.4.1", "log", @@ -9749,7 +9689,7 @@ dependencies = [ "regress", "schemars", "serde_json", - "syn 2.0.48", + "syn 2.0.51", "thiserror", "unicode-ident", ] @@ -9757,7 +9697,7 @@ dependencies = [ [[package]] name = "typify-macro" version = "0.0.15" -source = "git+https://github.com/oxidecomputer/typify#ce009d6f83b620cbd0e3acdd9b9ea071018471d8" +source = "git+https://github.com/oxidecomputer/typify#131fe0ef3722d3034a61a8ab2994c29f9c978903" dependencies = [ "proc-macro2", "quote", @@ -9765,7 +9705,7 @@ dependencies = [ "serde", "serde_json", "serde_tokenstream 0.2.0", - "syn 2.0.48", + "syn 2.0.51", "typify-impl", ] @@ -9999,7 +9939,7 @@ dependencies = [ "proc-macro2", "quote", "serde_tokenstream 0.2.0", - "syn 2.0.48", + "syn 2.0.51", "usdt-impl 0.5.0", ] @@ -10037,7 +9977,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.48", + "syn 2.0.51", "thiserror", "thread-id", "version_check", @@ -10067,7 +10007,7 @@ dependencies = [ "proc-macro2", "quote", "serde_tokenstream 0.2.0", - "syn 2.0.48", + "syn 2.0.51", "usdt-impl 0.5.0", ] @@ -10219,7 +10159,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", "wasm-bindgen-shared", ] @@ -10253,7 +10193,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -10266,9 +10206,9 @@ checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "wasm-streams" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4609d447824375f43e1ffbc051b50ad8f4b3ae8219680c94452ea05eb240ac7" +checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" dependencies = [ "futures-util", "js-sys", @@ -10480,7 +10420,7 @@ dependencies = [ "installinator-common", "ipnetwork", "omicron-workspace-hack", - "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", + "progenitor", "regress", "reqwest", "schemars", @@ -10804,7 +10744,7 @@ checksum = "56097d5b91d711293a42be9289403896b68654625021732067eac7a4ca388a1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -10815,7 +10755,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] @@ -10835,7 +10775,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.51", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index ec2c345edf..ad3d801876 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -314,9 +314,9 @@ prettyplease = { version = "0.2.16", features = ["verbatim"] } proc-macro2 = "1.0" progenitor = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } progenitor-client = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } -bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "c7cdaf1875d259e29ca50a14b77b0bfd9dfe443d" } -propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "c7cdaf1875d259e29ca50a14b77b0bfd9dfe443d" } -propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "c7cdaf1875d259e29ca50a14b77b0bfd9dfe443d" } +bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "488a70c644e332dae98447292a95e45074c8107c" } +propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "488a70c644e332dae98447292a95e45074c8107c" } +propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "488a70c644e332dae98447292a95e45074c8107c" } proptest = "1.4.0" quote = "1.0" rand = "0.8.5" @@ -609,6 +609,7 @@ opt-level = 3 # #[patch."https://github.com/oxidecomputer/progenitor"] #progenitor = { path = "../progenitor/progenitor" } +#progenitor-client = { path = "../progenitor/progenitor-client" } #[patch."https://github.com/oxidecomputer/typify"] #typify = { path = "../typify/typify" } diff --git a/dev-tools/oxlog/src/lib.rs b/dev-tools/oxlog/src/lib.rs index 589b113928..a4774fabbc 100644 --- a/dev-tools/oxlog/src/lib.rs +++ b/dev-tools/oxlog/src/lib.rs @@ -351,8 +351,7 @@ fn load_svc_logs(dir: Utf8PathBuf, logs: &mut BTreeMap) { let is_current = filename.ends_with(".log"); - let svc_logs = - logs.entry(svc_name.to_string()).or_insert(SvcLogs::default()); + let svc_logs = logs.entry(svc_name.to_string()).or_default(); if is_current { svc_logs.current = Some(logfile.clone()); @@ -374,8 +373,7 @@ fn load_extra_logs( return; }; - let svc_logs = - logs.entry(svc_name.to_string()).or_insert(SvcLogs::default()); + let svc_logs = logs.entry(svc_name.to_string()).or_default(); for entry in entries { let Ok(entry) = entry else { diff --git a/dev-tools/xtask/src/main.rs b/dev-tools/xtask/src/main.rs index 4ab42736ca..42e6c10d64 100644 --- a/dev-tools/xtask/src/main.rs +++ b/dev-tools/xtask/src/main.rs @@ -26,23 +26,35 @@ enum Cmds { /// workspace CheckWorkspaceDeps, /// Run configured clippy checks - Clippy, + Clippy(ClippyArgs), +} + +#[derive(Parser)] +struct ClippyArgs { + /// Automatically apply lint suggestions. + #[clap(long)] + fix: bool, } fn main() -> Result<()> { let args = Args::parse(); match args.cmd { - Cmds::Clippy => cmd_clippy(), + Cmds::Clippy(args) => cmd_clippy(args), Cmds::CheckWorkspaceDeps => cmd_check_workspace_deps(), } } -fn cmd_clippy() -> Result<()> { +fn cmd_clippy(args: ClippyArgs) -> Result<()> { let cargo = std::env::var("CARGO").unwrap_or_else(|_| String::from("cargo")); let mut command = Command::new(&cargo); + command.arg("clippy"); + + if args.fix { + command.arg("--fix"); + } + command - .arg("clippy") // Make sure we check everything. .arg("--all-targets") .arg("--") diff --git a/nexus/src/app/background/mod.rs b/nexus/src/app/background/mod.rs index 27cdddfe15..0065a41a9e 100644 --- a/nexus/src/app/background/mod.rs +++ b/nexus/src/app/background/mod.rs @@ -20,6 +20,4 @@ mod region_replacement; mod status; mod sync_service_zone_nat; -pub use common::Driver; -pub use common::TaskHandle; pub use init::BackgroundTasks; diff --git a/nexus/src/cidata.rs b/nexus/src/cidata.rs index 8f776501b6..08e4926af1 100644 --- a/nexus/src/cidata.rs +++ b/nexus/src/cidata.rs @@ -6,8 +6,6 @@ use serde::Serialize; use std::io::{self, Cursor, Write}; use uuid::Uuid; -pub use nexus_types::external_api::params::MAX_USER_DATA_BYTES; - pub trait InstanceCiData { fn generate_cidata(&self, public_keys: &[String]) -> Result, Error>; @@ -93,6 +91,8 @@ fn build_vfat(meta_data: &[u8], user_data: &[u8]) -> io::Result> { #[cfg(test)] mod tests { + use nexus_types::external_api::params::MAX_USER_DATA_BYTES; + /// the fatfs crate has some unfortunate panics if you ask it to do /// incredibly stupid things, like format an empty disk or create a /// filesystem with an invalid cluster size. @@ -104,7 +104,7 @@ mod tests { /// little further.) #[test] fn build_vfat_works_with_arbitrarily_sized_input() { - let upper = crate::cidata::MAX_USER_DATA_BYTES + 4096; + let upper = MAX_USER_DATA_BYTES + 4096; // somewhat arbitrarily-chosen prime numbers near 1 KiB and 256 bytes for md_size in (0..upper).step_by(1019) { for ud_size in (0..upper).step_by(269) { diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 2e7a87b58b..2e3f4c137b 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -4,5 +4,5 @@ # # We choose a specific toolchain (rather than "stable") for repeatability. The # intent is to keep this up-to-date with recently-released stable Rust. -channel = "1.74.1" +channel = "1.76.0" profile = "default" diff --git a/sled-hardware/Cargo.toml b/sled-hardware/Cargo.toml index 3d1259f46f..50e3cce072 100644 --- a/sled-hardware/Cargo.toml +++ b/sled-hardware/Cargo.toml @@ -19,6 +19,7 @@ rand.workspace = true schemars.workspace = true serde.workspace = true slog.workspace = true +slog-error-chain.workspace = true thiserror.workspace = true tofino.workspace = true tokio.workspace = true diff --git a/sled-hardware/src/cleanup.rs b/sled-hardware/src/cleanup.rs index 1a7f8be2f7..f438098792 100644 --- a/sled-hardware/src/cleanup.rs +++ b/sled-hardware/src/cleanup.rs @@ -18,6 +18,7 @@ use illumos_utils::ExecutionError; use illumos_utils::{execute, PFEXEC}; use slog::warn; use slog::Logger; +use slog_error_chain::InlineErrorChain; use std::process::Command; pub fn delete_underlay_addresses(log: &Logger) -> Result<(), Error> { @@ -45,7 +46,17 @@ fn delete_addresses_matching_prefixes( let addrobjs = output .stdout .lines() - .flatten() + .filter_map(|line| match line { + Ok(line) => Some(line), + Err(err) => { + warn!( + log, + "ipadm show-addr returned line that wasn't valid UTF-8"; + InlineErrorChain::new(&err), + ); + None + } + }) .collect::>(); for addrobj in addrobjs { diff --git a/wicket/src/state/mod.rs b/wicket/src/state/mod.rs index e20ba1c9c3..d287a153a9 100644 --- a/wicket/src/state/mod.rs +++ b/wicket/src/state/mod.rs @@ -12,15 +12,14 @@ mod update; pub use force_update::ForceUpdateState; pub use inventory::{ - Component, ComponentId, Inventory, ParsableComponentId, PowerState, Sp, - ALL_COMPONENT_IDS, + Component, ComponentId, Inventory, ParsableComponentId, ALL_COMPONENT_IDS, }; pub use rack::{KnightRiderMode, RackState}; -pub use status::{Liveness, ServiceStatus}; +pub use status::ServiceStatus; pub use update::{ parse_event_report_map, update_component_title, CreateClearUpdateStateOptions, CreateStartUpdateOptions, RackUpdateState, - UpdateItemState, UpdateRunningState, + UpdateItemState, }; use serde::{Deserialize, Serialize}; diff --git a/wicket/src/ui/widgets/mod.rs b/wicket/src/ui/widgets/mod.rs index 9527b68e2f..fd5ed18e3a 100644 --- a/wicket/src/ui/widgets/mod.rs +++ b/wicket/src/ui/widgets/mod.rs @@ -16,6 +16,6 @@ pub use animated_logo::{Logo, LogoState, LOGO_HEIGHT, LOGO_WIDTH}; pub use box_connector::{BoxConnector, BoxConnectorKind}; pub use fade::Fade; pub use ignition::IgnitionPopup; -pub use popup::{ButtonText, Popup, PopupBuilder, PopupScrollOffset}; +pub use popup::{ButtonText, PopupBuilder, PopupScrollOffset}; pub use rack::Rack; pub use status_view::StatusView; diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index ebe683e51a..01b7e440ae 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -85,11 +85,11 @@ rand_chacha = { version = "0.3.1", default-features = false, features = ["std"] regex = { version = "1.10.3" } regex-automata = { version = "0.4.4", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } regex-syntax = { version = "0.8.2" } -reqwest = { version = "0.11.22", features = ["blocking", "json", "rustls-tls", "stream"] } +reqwest = { version = "0.11.24", features = ["blocking", "json", "rustls-tls", "stream"] } ring = { version = "0.17.8", features = ["std"] } schemars = { version = "0.8.16", features = ["bytes", "chrono", "uuid1"] } semver = { version = "1.0.22", features = ["serde"] } -serde = { version = "1.0.196", features = ["alloc", "derive", "rc"] } +serde = { version = "1.0.197", features = ["alloc", "derive", "rc"] } serde_json = { version = "1.0.114", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } similar = { version = "2.3.0", features = ["inline", "unicode"] } @@ -97,10 +97,9 @@ slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "rele socket2 = { version = "0.5.5", default-features = false, features = ["all"] } spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } -strum = { version = "0.25.0", features = ["derive"] } subtle = { version = "2.5.0" } syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit"] } -syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.48", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } +syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.51", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } time = { version = "0.3.27", features = ["formatting", "local-offset", "macros", "parsing"] } tokio = { version = "1.36.0", features = ["full", "test-util"] } tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } @@ -192,11 +191,11 @@ rand_chacha = { version = "0.3.1", default-features = false, features = ["std"] regex = { version = "1.10.3" } regex-automata = { version = "0.4.4", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } regex-syntax = { version = "0.8.2" } -reqwest = { version = "0.11.22", features = ["blocking", "json", "rustls-tls", "stream"] } +reqwest = { version = "0.11.24", features = ["blocking", "json", "rustls-tls", "stream"] } ring = { version = "0.17.8", features = ["std"] } schemars = { version = "0.8.16", features = ["bytes", "chrono", "uuid1"] } semver = { version = "1.0.22", features = ["serde"] } -serde = { version = "1.0.196", features = ["alloc", "derive", "rc"] } +serde = { version = "1.0.197", features = ["alloc", "derive", "rc"] } serde_json = { version = "1.0.114", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } similar = { version = "2.3.0", features = ["inline", "unicode"] } @@ -204,10 +203,9 @@ slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "rele socket2 = { version = "0.5.5", default-features = false, features = ["all"] } spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } -strum = { version = "0.25.0", features = ["derive"] } subtle = { version = "2.5.0" } syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit"] } -syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.48", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } +syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.51", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } time = { version = "0.3.27", features = ["formatting", "local-offset", "macros", "parsing"] } time-macros = { version = "0.2.13", default-features = false, features = ["formatting", "parsing"] } tokio = { version = "1.36.0", features = ["full", "test-util"] } From 3498c3c35edfd67d3efee65525a526181b6d3825 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Wed, 28 Feb 2024 22:38:22 -0800 Subject: [PATCH 050/157] chore(deps): update rust crate rayon to 1.9 (#5159) --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2c1e4acd50..0d1e149700 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6864,9 +6864,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" +checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" dependencies = [ "either", "rayon-core", @@ -6874,9 +6874,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ "crossbeam-deque", "crossbeam-utils", diff --git a/Cargo.toml b/Cargo.toml index ad3d801876..97300bc0d2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -321,7 +321,7 @@ proptest = "1.4.0" quote = "1.0" rand = "0.8.5" ratatui = "0.26.1" -rayon = "1.8" +rayon = "1.9" rcgen = "0.12.1" reedline = "0.29.0" ref-cast = "1.0" From e7a404e15c4d0df73baba7de9b83a7923c7ba56a Mon Sep 17 00:00:00 2001 From: liffy <629075+lifning@users.noreply.github.com> Date: Thu, 29 Feb 2024 02:09:41 -0800 Subject: [PATCH 051/157] Push hardcoded PROPOLIS_PORT out of all but the instance creation/migration sagas (#5138) Adds a database column for Nexus to track `propolis_port` in addition to the `propolis_ip`, such that tests using sled-agent-sim don't collide. Restrict hardcoded PROPOLIS_PORT to simply being the default in sagas. It remains 12400 in production, since there's no reason for it not to be, but for scenarios wherein each "propolis-server" API isn't necessarily served from an illumos zone with its own IP address, we can let the port be automatically assigned. (addressing issue #5051) --- common/src/address.rs | 2 -- nexus/db-model/src/schema.rs | 3 ++- nexus/db-model/src/vmm.rs | 6 ++++++ nexus/db-queries/src/db/datastore/vmm.rs | 14 ++++++++++---- nexus/src/app/instance.rs | 5 ++--- nexus/src/app/sagas/instance_common.rs | 4 ++++ nexus/src/app/sagas/instance_migrate.rs | 7 ++++--- nexus/tests/integration_tests/instances.rs | 8 ++++---- schema/crdb/38.0.0/up.sql | 11 +++++++++++ schema/crdb/dbinit.sql | 5 +++-- sled-agent/src/sim/sled_agent.rs | 22 ++++++++++------------ 11 files changed, 56 insertions(+), 31 deletions(-) create mode 100644 schema/crdb/38.0.0/up.sql diff --git a/common/src/address.rs b/common/src/address.rs index 152fb9319e..3a470c189b 100644 --- a/common/src/address.rs +++ b/common/src/address.rs @@ -44,8 +44,6 @@ pub const DNS_PORT: u16 = 53; pub const DNS_HTTP_PORT: u16 = 5353; pub const SLED_AGENT_PORT: u16 = 12345; -/// The port propolis-server listens on inside the propolis zone. -pub const PROPOLIS_PORT: u16 = 12400; pub const COCKROACH_PORT: u16 = 32221; pub const CRUCIBLE_PORT: u16 = 32345; pub const CLICKHOUSE_PORT: u16 = 8123; diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index d8344c2258..55d3e9b43f 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -13,7 +13,7 @@ use omicron_common::api::external::SemverVersion; /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(37, 0, 1); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(38, 0, 0); table! { disk (id) { @@ -380,6 +380,7 @@ table! { instance_id -> Uuid, sled_id -> Uuid, propolis_ip -> Inet, + propolis_port -> Int4, state -> crate::InstanceStateEnum, time_state_updated -> Timestamptz, state_generation -> Int8, diff --git a/nexus/db-model/src/vmm.rs b/nexus/db-model/src/vmm.rs index fe1158d5bb..ca3be120d4 100644 --- a/nexus/db-model/src/vmm.rs +++ b/nexus/db-model/src/vmm.rs @@ -14,6 +14,7 @@ use super::{Generation, InstanceState}; use crate::schema::vmm; +use crate::SqlU16; use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -43,6 +44,9 @@ pub struct Vmm { /// The IP address at which this VMM is serving the Propolis server API. pub propolis_ip: ipnetwork::IpNetwork, + /// The socket port on which this VMM is serving the Propolis server API. + pub propolis_port: SqlU16, + /// Runtime state for the VMM. #[diesel(embed)] pub runtime: VmmRuntimeState, @@ -61,6 +65,7 @@ impl Vmm { instance_id: Uuid, sled_id: Uuid, propolis_ip: ipnetwork::IpNetwork, + propolis_port: u16, initial_state: VmmInitialState, ) -> Self { use omicron_common::api::external::InstanceState as ApiInstanceState; @@ -78,6 +83,7 @@ impl Vmm { instance_id, sled_id, propolis_ip, + propolis_port: SqlU16(propolis_port), runtime: VmmRuntimeState { state: InstanceState::new(api_state), time_state_updated: now, diff --git a/nexus/db-queries/src/db/datastore/vmm.rs b/nexus/db-queries/src/db/datastore/vmm.rs index 18afde84f0..b9bfd7697e 100644 --- a/nexus/db-queries/src/db/datastore/vmm.rs +++ b/nexus/db-queries/src/db/datastore/vmm.rs @@ -23,6 +23,7 @@ use omicron_common::api::external::LookupResult; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; +use std::net::SocketAddr; use uuid::Uuid; impl DataStore { @@ -134,7 +135,7 @@ impl DataStore { Ok(updated) } - /// Forcibly overwrites the Propolis IP in the supplied VMM's record with + /// Forcibly overwrites the Propolis IP/Port in the supplied VMM's record with /// the supplied Propolis IP. /// /// This is used in tests to overwrite the IP for a VMM that is backed by a @@ -142,15 +143,20 @@ impl DataStore { /// allocated by the instance start procedure. (Unfortunately, this can't be /// marked #[cfg(test)] because the integration tests require this /// functionality.) - pub async fn vmm_overwrite_ip_for_test( + pub async fn vmm_overwrite_addr_for_test( &self, opctx: &OpContext, vmm_id: &Uuid, - new_ip: ipnetwork::IpNetwork, + new_addr: SocketAddr, ) -> UpdateResult { + let new_ip = ipnetwork::IpNetwork::from(new_addr.ip()); + let new_port = new_addr.port(); let vmm = diesel::update(dsl::vmm) .filter(dsl::id.eq(*vmm_id)) - .set(dsl::propolis_ip.eq(new_ip)) + .set(( + dsl::propolis_ip.eq(new_ip), + dsl::propolis_port.eq(new_port as i32), + )) .returning(Vmm::as_returning()) .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 4b52b597ba..88a427af6a 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -29,7 +29,6 @@ use nexus_db_queries::db::identity::Resource; use nexus_db_queries::db::lookup; use nexus_db_queries::db::lookup::LookupPath; use nexus_types::external_api::views; -use omicron_common::address::PROPOLIS_PORT; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::ByteCount; use omicron_common::api::external::CreateResult; @@ -1223,7 +1222,7 @@ impl super::Nexus { propolis_id: *propolis_id, propolis_addr: SocketAddr::new( initial_vmm.propolis_ip.ip(), - PROPOLIS_PORT, + initial_vmm.propolis_port.into(), ) .to_string(), }, @@ -1762,7 +1761,7 @@ impl super::Nexus { | InstanceState::Rebooting | InstanceState::Migrating | InstanceState::Repairing => { - Ok(SocketAddr::new(vmm.propolis_ip.ip(), PROPOLIS_PORT)) + Ok(SocketAddr::new(vmm.propolis_ip.ip(), vmm.propolis_port.into())) } InstanceState::Creating | InstanceState::Starting diff --git a/nexus/src/app/sagas/instance_common.rs b/nexus/src/app/sagas/instance_common.rs index 445abd5daf..e915c026dd 100644 --- a/nexus/src/app/sagas/instance_common.rs +++ b/nexus/src/app/sagas/instance_common.rs @@ -24,6 +24,9 @@ use uuid::Uuid; use super::NexusActionContext; +/// The port propolis-server listens on inside the propolis zone. +const DEFAULT_PROPOLIS_PORT: u16 = 12400; + /// Reserves resources for a new VMM whose instance has `ncpus` guest logical /// processors and `guest_memory` bytes of guest RAM. The selected sled is /// random within the set of sleds allowed by the supplied `constraints`. @@ -98,6 +101,7 @@ pub async fn create_and_insert_vmm_record( instance_id, sled_id, IpAddr::V6(propolis_ip).into(), + DEFAULT_PROPOLIS_PORT, initial_state, ); diff --git a/nexus/src/app/sagas/instance_migrate.rs b/nexus/src/app/sagas/instance_migrate.rs index ff3ff66e78..da3b3e93ea 100644 --- a/nexus/src/app/sagas/instance_migrate.rs +++ b/nexus/src/app/sagas/instance_migrate.rs @@ -12,7 +12,6 @@ use crate::app::sagas::{ use crate::external_api::params; use nexus_db_queries::db::{identity::Resource, lookup::LookupPath}; use nexus_db_queries::{authn, authz, db}; -use omicron_common::address::PROPOLIS_PORT; use serde::Deserialize; use serde::Serialize; use sled_agent_client::types::{ @@ -424,8 +423,10 @@ async fn sim_instance_migrate( let db_instance = sagactx.lookup::("set_migration_ids")?; - let src_vmm_addr = - SocketAddr::new(params.src_vmm.propolis_ip.ip(), PROPOLIS_PORT); + let src_vmm_addr = SocketAddr::new( + params.src_vmm.propolis_ip.ip(), + params.src_vmm.propolis_port.into(), + ); let src_propolis_id = db_instance.runtime().propolis_id.unwrap(); let dst_vmm = sagactx.lookup::("dst_vmm_record")?; diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 09f91a2288..0df2b83008 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -3762,7 +3762,7 @@ async fn test_instance_serial(cptestctx: &ControlPlaneTestContext) { let nexus = &apictx.nexus; let instance_name = "kris-picks"; - cptestctx + let propolis_addr = cptestctx .sled_agent .sled_agent .start_local_mock_propolis_server(&cptestctx.logctx.log) @@ -3819,12 +3819,12 @@ async fn test_instance_serial(cptestctx: &ControlPlaneTestContext) { .runtime() .propolis_id .expect("running instance should have vmm"); - let localhost = std::net::IpAddr::V6(std::net::Ipv6Addr::LOCALHOST); let updated_vmm = datastore - .vmm_overwrite_ip_for_test(&opctx, &propolis_id, localhost.into()) + .vmm_overwrite_addr_for_test(&opctx, &propolis_id, propolis_addr) .await .unwrap(); - assert_eq!(updated_vmm.propolis_ip.ip(), localhost); + assert_eq!(updated_vmm.propolis_ip.ip(), propolis_addr.ip()); + assert_eq!(updated_vmm.propolis_port.0, propolis_addr.port()); // Query serial output history endpoint // This is the first line of output generated by the mock propolis-server. diff --git a/schema/crdb/38.0.0/up.sql b/schema/crdb/38.0.0/up.sql new file mode 100644 index 0000000000..725a9e2c90 --- /dev/null +++ b/schema/crdb/38.0.0/up.sql @@ -0,0 +1,11 @@ +-- 12400 was the hardcoded PROPOLIS_PORT prior to this addition; default to +-- that for all existing instances when creating the column. +-- in production, this value will always end up still being 12400. +-- however, nexus' testability in scenarios where each "propolis-server" API +-- isn't necessarily served from an illumos zone with its own IP address, but +-- rather in a sled-agent-sim thread during a cargo nextest run? useful to +-- allow some dynamic range there to avoid flakes caused by port collisions. +ALTER TABLE omicron.public.vmm +ADD COLUMN IF NOT EXISTS propolis_port INT4 NOT NULL + CHECK (propolis_port BETWEEN 0 AND 65535) + DEFAULT 12400; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 837be42c35..40a6fd463f 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -3310,7 +3310,8 @@ CREATE TABLE IF NOT EXISTS omicron.public.vmm ( time_state_updated TIMESTAMPTZ NOT NULL, state_generation INT NOT NULL, sled_id UUID NOT NULL, - propolis_ip INET NOT NULL + propolis_ip INET NOT NULL, + propolis_port INT4 NOT NULL CHECK (propolis_port BETWEEN 0 AND 65535) DEFAULT 12400 ); /* @@ -3551,7 +3552,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '37.0.1', NULL) + ( TRUE, NOW(), NOW(), '38.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index d4c92fca51..27a06e4617 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -27,7 +27,6 @@ use illumos_utils::opte::params::{ DeleteVirtualNetworkInterfaceHost, SetVirtualNetworkInterfaceHost, }; use nexus_client::types::PhysicalDiskKind; -use omicron_common::address::PROPOLIS_PORT; use omicron_common::api::external::{ ByteCount, DiskState, Error, Generation, ResourceType, }; @@ -684,14 +683,15 @@ impl SledAgent { } /// Used for integration tests that require a component to talk to a - /// mocked propolis-server API. - // TODO: fix schemas so propolis-server's port isn't hardcoded in nexus - // such that we can run more than one of these. - // (this is only needed by test_instance_serial at present) + /// mocked propolis-server API. Returns the socket on which the dropshot + /// service is listening, which *must* be patched into Nexus with + /// `nexus_db_queries::db::datastore::vmm_overwrite_addr_for_test` after + /// the instance creation saga if functionality touching propolis-server + /// is to be tested (e.g. serial console connection). pub async fn start_local_mock_propolis_server( &self, log: &Logger, - ) -> Result<(), Error> { + ) -> Result { let mut mock_lock = self.mock_propolis.lock().await; if mock_lock.is_some() { return Err(Error::ObjectAlreadyExists { @@ -700,7 +700,7 @@ impl SledAgent { }); } let propolis_bind_address = - SocketAddr::new(Ipv6Addr::LOCALHOST.into(), PROPOLIS_PORT); + SocketAddr::new(Ipv6Addr::LOCALHOST.into(), 0); let dropshot_config = dropshot::ConfigDropshot { bind_address: propolis_bind_address, ..Default::default() @@ -721,12 +721,10 @@ impl SledAgent { Error::unavail(&format!("initializing propolis-server: {}", error)) })? .start(); - let client = propolis_client::Client::new(&format!( - "http://{}", - srv.local_addr() - )); + let addr = srv.local_addr(); + let client = propolis_client::Client::new(&format!("http://{}", addr)); *mock_lock = Some((srv, client)); - Ok(()) + Ok(addr) } pub fn inventory(&self, addr: SocketAddr) -> anyhow::Result { From 1bc50078a589560ad771665435856685fab89723 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Thu, 29 Feb 2024 04:37:37 -0600 Subject: [PATCH 052/157] Bump web console (fix rounding bug on cap & util) (#5165) https://github.com/oxidecomputer/console/compare/e991ec5c...25fb506d * [25fb506d](https://github.com/oxidecomputer/console/commit/25fb506d) hard code en-US locale in NumberFormat step of round() * [0ba26501](https://github.com/oxidecomputer/console/commit/0ba26501) oxidecomputer/console#2011 * [bce7285e](https://github.com/oxidecomputer/console/commit/bce7285e) oxidecomputer/console#2009 * [6947e026](https://github.com/oxidecomputer/console/commit/6947e026) fall back to EmptyCell on empty description in properties table (closes oxidecomputer/console#2007) * [7e60dfaa](https://github.com/oxidecomputer/console/commit/7e60dfaa) oxidecomputer/console#2008 * [629f1c29](https://github.com/oxidecomputer/console/commit/629f1c29) bump dax in bump-omicron script to fix quote escaping bug --- tools/console_version | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/console_version b/tools/console_version index ee7f619b53..f5534b5c50 100644 --- a/tools/console_version +++ b/tools/console_version @@ -1,2 +1,2 @@ -COMMIT="e991ec5cf148d83f135b6a692dca9b8df05acef0" -SHA2="18d53e4485e63d9e2273a889e70785648052e8a231df89f3bcbc2ed01a0eeeb1" +COMMIT="25fb506d0527f5508dc90b118672c0bf8ce14dd3" +SHA2="1083f693c66c275d0cf76e120aa923a51c3188dd9a7996eb51dc0fe4f611507a" From 7daf5e1efe1054119219a164b3eba64515363c69 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Thu, 29 Feb 2024 16:14:58 +0000 Subject: [PATCH 053/157] chore(deps): update taiki-e/install-action digest to 45dfafe (#5169) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`1d776b1` -> `45dfafe`](https://togithub.com/taiki-e/install-action/compare/1d776b1...45dfafe) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. â™» **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index 0f942e7d76..49914f36db 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@1d776b18af134fca43744080130085d256938196 # v2 + uses: taiki-e/install-action@45dfafe7696c1865602c71c94d46a16a45f8e88b # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From 10ada767920e7c8d988e43a613feab49d1157fc3 Mon Sep 17 00:00:00 2001 From: Dan Cross Date: Thu, 29 Feb 2024 11:44:58 -0500 Subject: [PATCH 054/157] env.sh: prefer `readlink` to `cd && echo ${PWD}` (#5162) A trivial change to make this script more robust for those of us who alias `cd` to something. --- env.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/env.sh b/env.sh index 483a89f597..74f3d1caf4 100644 --- a/env.sh +++ b/env.sh @@ -5,7 +5,7 @@ # See also: ./.envrc set -o xtrace -OMICRON_WS="$(cd $(dirname "${BASH_SOURCE[0]}") && echo $PWD)" +OMICRON_WS="$(readlink -f $(dirname "${BASH_SOURCE[0]}"))" export PATH="$OMICRON_WS/out/cockroachdb/bin:$PATH" export PATH="$OMICRON_WS/out/clickhouse:$PATH" export PATH="$OMICRON_WS/out/dendrite-stub/bin:$PATH" From c42b50531cf408d20385a1f2275088ae8e5dfbeb Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 29 Feb 2024 12:35:02 -0500 Subject: [PATCH 055/157] SP v1.0.9 (#5170) --- tools/hubris_checksums | 17 +++++++++-------- tools/hubris_version | 2 +- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/tools/hubris_checksums b/tools/hubris_checksums index dca8ea0ab6..fb1c8c1930 100644 --- a/tools/hubris_checksums +++ b/tools/hubris_checksums @@ -1,8 +1,9 @@ -28f432735a15f40101c133e4e9974d1681a33bb7b3386ccbe15b465b613f4826 build-gimlet-c-image-default-v1.0.8.zip -db927999398f0723d5d614db78a5abb4a1d515c711ffba944477bdac10c48907 build-gimlet-d-image-default-v1.0.8.zip -629a53b5d9d4bf3d410687d0ecedf4837a54233ce62b6223d209494777cc7ebc build-gimlet-e-image-default-v1.0.8.zip -e3c2a243257929a65de638f3be425370f084aeeefafbd1773d01ee71cf0b8ea7 build-gimlet-f-image-default-v1.0.8.zip -556595b42d05508ebfdac9dd71b38fe9b72e0cb30f6aa4be626c02141e375a71 build-psc-b-image-default-v1.0.8.zip -39fbf92cbc935b4eaecb81a9768357828cf3e79b5c74d36c9a655ae9023cc50c build-psc-c-image-default-v1.0.8.zip -4225dff721b034fe7cf1dc26277557e1f15f2710014dd46dfa7c92ff04c7e054 build-sidecar-b-image-default-v1.0.8.zip -ae8f12d7b66d0bcc372dd79abf515255f6ca97bb0339c570a058684e04e12cf8 build-sidecar-c-image-default-v1.0.8.zip +4ebca97672fafd3b2716c17f00c8a93f531c1ccb44137c756c77c13383b203da build-gimlet-c-image-default-v1.0.9.zip +a945dbcd4b1825e95db32bbf2fbdc56013501396dcdc5ebe6ded361decfcb1f6 build-gimlet-d-image-default-v1.0.9.zip +527c4fe9488c5834f0fd68fa59756ab01eb0c36d725ba8b8ade1375e2dcb4c3e build-gimlet-e-image-default-v1.0.9.zip +69d09a1367b5cfecf43c5b733f84adcb0e4f73b49e062f1c4574b769fc2f1de9 build-gimlet-f-image-default-v1.0.9.zip +0fa33a057dec7301280338cf5a80a5198b5d95475dba350a807979f1a53ce5c7 build-psc-b-image-default-v1.0.9.zip +88e7aa91fe4f976753ccfc8ba9a57a0521c54d9ef35504cf16e7b1ce78c87465 build-psc-c-image-default-v1.0.9.zip +cfbf21fef84b38c2a5ca3c0b45703c20c2605c6bf5d599b53b26e3a2d09d756d build-sidecar-b-image-default-v1.0.9.zip +c98b89993f1a9b5219bbabf06897db2f92d60f6a3afb4374cb8e6e79d57fdfe4 build-sidecar-c-image-default-v1.0.9.zip + diff --git a/tools/hubris_version b/tools/hubris_version index 4ee8ac61fe..37a180a853 100644 --- a/tools/hubris_version +++ b/tools/hubris_version @@ -1 +1 @@ -TAGS=(gimlet-v1.0.8 psc-v1.0.8 sidecar-v1.0.8) +TAGS=(gimlet-v1.0.9 psc-v1.0.9 sidecar-v1.0.9) From 55a0760a77a37333c51594229e06c9b598349c90 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Thu, 29 Feb 2024 10:50:05 -0800 Subject: [PATCH 056/157] rename reconfigurator packages (#5163) --- Cargo.lock | 112 +++++++++--------- Cargo.toml | 12 +- nexus/Cargo.toml | 4 +- .../tests/config.test.toml | 1 - nexus/db-queries/Cargo.toml | 2 +- .../db-queries/src/db/datastore/deployment.rs | 4 +- nexus/reconfigurator/README.md | 13 ++ .../execution}/Cargo.toml | 4 +- .../execution}/build.rs | 0 .../execution}/src/datasets.rs | 0 .../execution}/src/dns.rs | 2 +- .../execution}/src/lib.rs | 2 +- .../execution}/src/omicron_zones.rs | 0 .../execution}/src/resource_allocation.rs | 0 .../execution/tests/config.test.toml | 1 + .../planning}/Cargo.toml | 2 +- .../planning}/src/blueprint_builder.rs | 0 .../planning}/src/ip_allocator.rs | 0 .../planning}/src/lib.rs | 0 .../planning}/src/planner.rs | 0 .../src/app/background/blueprint_execution.rs | 4 +- nexus/src/app/deployment.rs | 4 +- nexus/types/src/deployment.rs | 13 +- 23 files changed, 97 insertions(+), 83 deletions(-) delete mode 120000 nexus/blueprint-execution/tests/config.test.toml create mode 100644 nexus/reconfigurator/README.md rename nexus/{blueprint-execution => reconfigurator/execution}/Cargo.toml (92%) rename nexus/{blueprint-execution => reconfigurator/execution}/build.rs (100%) rename nexus/{blueprint-execution => reconfigurator/execution}/src/datasets.rs (100%) rename nexus/{blueprint-execution => reconfigurator/execution}/src/dns.rs (99%) rename nexus/{blueprint-execution => reconfigurator/execution}/src/lib.rs (97%) rename nexus/{blueprint-execution => reconfigurator/execution}/src/omicron_zones.rs (100%) rename nexus/{blueprint-execution => reconfigurator/execution}/src/resource_allocation.rs (100%) create mode 120000 nexus/reconfigurator/execution/tests/config.test.toml rename nexus/{deployment => reconfigurator/planning}/Cargo.toml (92%) rename nexus/{deployment => reconfigurator/planning}/src/blueprint_builder.rs (100%) rename nexus/{deployment => reconfigurator/planning}/src/ip_allocator.rs (100%) rename nexus/{deployment => reconfigurator/planning}/src/lib.rs (100%) rename nexus/{deployment => reconfigurator/planning}/src/planner.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 0d1e149700..c3fc5d86cf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4262,39 +4262,6 @@ dependencies = [ "rustc_version 0.1.7", ] -[[package]] -name = "nexus-blueprint-execution" -version = "0.1.0" -dependencies = [ - "anyhow", - "chrono", - "dns-service-client", - "futures", - "httptest", - "illumos-utils", - "internal-dns", - "ipnet", - "nexus-db-model", - "nexus-db-queries", - "nexus-deployment", - "nexus-inventory", - "nexus-test-utils", - "nexus-test-utils-macros", - "nexus-types", - "omicron-common", - "omicron-nexus", - "omicron-rpaths", - "omicron-test-utils", - "omicron-workspace-hack", - "pq-sys", - "reqwest", - "sled-agent-client", - "slog", - "slog-error-chain", - "tokio", - "uuid", -] - [[package]] name = "nexus-client" version = "0.1.0" @@ -4386,8 +4353,8 @@ dependencies = [ "macaddr", "newtype_derive", "nexus-db-model", - "nexus-deployment", "nexus-inventory", + "nexus-reconfigurator-planning", "nexus-test-utils", "nexus-types", "omicron-common", @@ -4444,26 +4411,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "nexus-deployment" -version = "0.1.0" -dependencies = [ - "anyhow", - "chrono", - "internal-dns", - "ipnet", - "ipnetwork", - "nexus-inventory", - "nexus-types", - "omicron-common", - "omicron-test-utils", - "omicron-workspace-hack", - "sled-agent-client", - "slog", - "thiserror", - "uuid", -] - [[package]] name = "nexus-inventory" version = "0.1.0" @@ -4501,6 +4448,59 @@ dependencies = [ "syn 2.0.51", ] +[[package]] +name = "nexus-reconfigurator-execution" +version = "0.1.0" +dependencies = [ + "anyhow", + "chrono", + "dns-service-client", + "futures", + "httptest", + "illumos-utils", + "internal-dns", + "ipnet", + "nexus-db-model", + "nexus-db-queries", + "nexus-inventory", + "nexus-reconfigurator-planning", + "nexus-test-utils", + "nexus-test-utils-macros", + "nexus-types", + "omicron-common", + "omicron-nexus", + "omicron-rpaths", + "omicron-test-utils", + "omicron-workspace-hack", + "pq-sys", + "reqwest", + "sled-agent-client", + "slog", + "slog-error-chain", + "tokio", + "uuid", +] + +[[package]] +name = "nexus-reconfigurator-planning" +version = "0.1.0" +dependencies = [ + "anyhow", + "chrono", + "internal-dns", + "ipnet", + "ipnetwork", + "nexus-inventory", + "nexus-types", + "omicron-common", + "omicron-test-utils", + "omicron-workspace-hack", + "sled-agent-client", + "slog", + "thiserror", + "uuid", +] + [[package]] name = "nexus-test-interface" version = "0.1.0" @@ -5031,12 +5031,12 @@ dependencies = [ "macaddr", "mg-admin-client", "mime_guess", - "nexus-blueprint-execution", "nexus-db-model", "nexus-db-queries", "nexus-defaults", - "nexus-deployment", "nexus-inventory", + "nexus-reconfigurator-execution", + "nexus-reconfigurator-planning", "nexus-test-interface", "nexus-test-utils", "nexus-test-utils-macros", diff --git a/Cargo.toml b/Cargo.toml index 97300bc0d2..e19ed35c32 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,14 +37,14 @@ members = [ "key-manager", "nexus", "nexus/authz-macros", - "nexus/blueprint-execution", "nexus/db-macros", "nexus/db-model", "nexus/db-queries", "nexus/defaults", - "nexus/deployment", "nexus/inventory", "nexus/macros-common", + "nexus/reconfigurator/execution", + "nexus/reconfigurator/planning", "nexus/test-interface", "nexus/test-utils-macros", "nexus/test-utils", @@ -114,14 +114,14 @@ default-members = [ "key-manager", "nexus", "nexus/authz-macros", - "nexus/blueprint-execution", "nexus/macros-common", "nexus/db-macros", "nexus/db-model", "nexus/db-queries", "nexus/defaults", - "nexus/deployment", "nexus/inventory", + "nexus/reconfigurator/execution", + "nexus/reconfigurator/planning", "nexus/types", "oximeter/collector", "oximeter/db", @@ -253,14 +253,14 @@ mockall = "0.12" newtype_derive = "0.1.6" mg-admin-client = { path = "clients/mg-admin-client" } multimap = "0.10.0" -nexus-blueprint-execution = { path = "nexus/blueprint-execution" } nexus-client = { path = "clients/nexus-client" } nexus-db-model = { path = "nexus/db-model" } nexus-db-queries = { path = "nexus/db-queries" } nexus-defaults = { path = "nexus/defaults" } -nexus-deployment = { path = "nexus/deployment" } nexus-inventory = { path = "nexus/inventory" } nexus-macros-common = { path = "nexus/macros-common" } +nexus-reconfigurator-execution = { path = "nexus/reconfigurator/execution" } +nexus-reconfigurator-planning = { path = "nexus/reconfigurator/planning" } omicron-certificates = { path = "certificates" } omicron-passwords = { path = "passwords" } omicron-workspace-hack = "0.1.0" diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 6e9f2f135d..581e16e84d 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -77,12 +77,12 @@ tough.workspace = true trust-dns-resolver.workspace = true uuid.workspace = true -nexus-blueprint-execution.workspace = true nexus-defaults.workspace = true nexus-db-model.workspace = true nexus-db-queries.workspace = true -nexus-deployment.workspace = true nexus-inventory.workspace = true +nexus-reconfigurator-execution.workspace = true +nexus-reconfigurator-planning.workspace = true nexus-types.workspace = true omicron-common.workspace = true omicron-passwords.workspace = true diff --git a/nexus/blueprint-execution/tests/config.test.toml b/nexus/blueprint-execution/tests/config.test.toml deleted file mode 120000 index 52f00171fd..0000000000 --- a/nexus/blueprint-execution/tests/config.test.toml +++ /dev/null @@ -1 +0,0 @@ -../../tests/config.test.toml \ No newline at end of file diff --git a/nexus/db-queries/Cargo.toml b/nexus/db-queries/Cargo.toml index 539a913476..9d0c22c373 100644 --- a/nexus/db-queries/Cargo.toml +++ b/nexus/db-queries/Cargo.toml @@ -69,8 +69,8 @@ gateway-client.workspace = true illumos-utils.workspace = true internal-dns.workspace = true itertools.workspace = true -nexus-deployment.workspace = true nexus-inventory.workspace = true +nexus-reconfigurator-planning.workspace = true nexus-test-utils.workspace = true omicron-sled-agent.workspace = true omicron-test-utils.workspace = true diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index 00a75a21da..020916928d 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -1055,9 +1055,9 @@ impl RunQueryDsl for InsertTargetQuery {} mod tests { use super::*; use crate::db::datastore::test_utils::datastore_test; - use nexus_deployment::blueprint_builder::BlueprintBuilder; - use nexus_deployment::blueprint_builder::Ensure; use nexus_inventory::now_db_precision; + use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; + use nexus_reconfigurator_planning::blueprint_builder::Ensure; use nexus_test_utils::db::test_setup_database; use nexus_types::deployment::Policy; use nexus_types::deployment::SledResources; diff --git a/nexus/reconfigurator/README.md b/nexus/reconfigurator/README.md new file mode 100644 index 0000000000..7c6a98e666 --- /dev/null +++ b/nexus/reconfigurator/README.md @@ -0,0 +1,13 @@ +# Reconfigurator + +**Reconfigurator** is a project and control plane facility for dynamically reconfiguring the software running on the Oxide rack. This will be used for a variety of use cases: + +* Adding, removing, or replacing sleds (gracefully or otherwise) +* Upgrading any Oxide-delivered software (see below) +* Scale-up and scale-down of control plane services +* Automated response to hardware failure + +For those with access (primarily, Oxide employees), see: + +* [RFD 418 Towards automated system update](https://rfd.shared.oxide.computer/rfd/418). +* [Reconfigurator GitHub Project](https://github.com/orgs/oxidecomputer/projects/44?pane=info). diff --git a/nexus/blueprint-execution/Cargo.toml b/nexus/reconfigurator/execution/Cargo.toml similarity index 92% rename from nexus/blueprint-execution/Cargo.toml rename to nexus/reconfigurator/execution/Cargo.toml index 164559b468..2f4807d38d 100644 --- a/nexus/blueprint-execution/Cargo.toml +++ b/nexus/reconfigurator/execution/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "nexus-blueprint-execution" +name = "nexus-reconfigurator-execution" version = "0.1.0" edition = "2021" @@ -33,7 +33,7 @@ omicron-workspace-hack.workspace = true chrono.workspace = true httptest.workspace = true ipnet.workspace = true -nexus-deployment.workspace = true +nexus-reconfigurator-planning.workspace = true nexus-inventory.workspace = true nexus-test-utils.workspace = true nexus-test-utils-macros.workspace = true diff --git a/nexus/blueprint-execution/build.rs b/nexus/reconfigurator/execution/build.rs similarity index 100% rename from nexus/blueprint-execution/build.rs rename to nexus/reconfigurator/execution/build.rs diff --git a/nexus/blueprint-execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs similarity index 100% rename from nexus/blueprint-execution/src/datasets.rs rename to nexus/reconfigurator/execution/src/datasets.rs diff --git a/nexus/blueprint-execution/src/dns.rs b/nexus/reconfigurator/execution/src/dns.rs similarity index 99% rename from nexus/blueprint-execution/src/dns.rs rename to nexus/reconfigurator/execution/src/dns.rs index 54611e9f66..0fa8eb1c10 100644 --- a/nexus/blueprint-execution/src/dns.rs +++ b/nexus/reconfigurator/execution/src/dns.rs @@ -320,8 +320,8 @@ mod test { use crate::Sled; use internal_dns::ServiceName; use internal_dns::DNS_ZONE; - use nexus_deployment::blueprint_builder::BlueprintBuilder; use nexus_inventory::CollectionBuilder; + use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_types::deployment::Blueprint; use nexus_types::deployment::OmicronZoneConfig; use nexus_types::deployment::OmicronZoneType; diff --git a/nexus/blueprint-execution/src/lib.rs b/nexus/reconfigurator/execution/src/lib.rs similarity index 97% rename from nexus/blueprint-execution/src/lib.rs rename to nexus/reconfigurator/execution/src/lib.rs index 531c4f57a8..74b4764d7a 100644 --- a/nexus/blueprint-execution/src/lib.rs +++ b/nexus/reconfigurator/execution/src/lib.rs @@ -4,7 +4,7 @@ //! Execution of Nexus blueprints //! -//! See `nexus_deployment` crate-level docs for background. +//! See `nexus_reconfigurator_planning` crate-level docs for background. use anyhow::{anyhow, Context}; use nexus_db_queries::context::OpContext; diff --git a/nexus/blueprint-execution/src/omicron_zones.rs b/nexus/reconfigurator/execution/src/omicron_zones.rs similarity index 100% rename from nexus/blueprint-execution/src/omicron_zones.rs rename to nexus/reconfigurator/execution/src/omicron_zones.rs diff --git a/nexus/blueprint-execution/src/resource_allocation.rs b/nexus/reconfigurator/execution/src/resource_allocation.rs similarity index 100% rename from nexus/blueprint-execution/src/resource_allocation.rs rename to nexus/reconfigurator/execution/src/resource_allocation.rs diff --git a/nexus/reconfigurator/execution/tests/config.test.toml b/nexus/reconfigurator/execution/tests/config.test.toml new file mode 120000 index 0000000000..7e4238a9d6 --- /dev/null +++ b/nexus/reconfigurator/execution/tests/config.test.toml @@ -0,0 +1 @@ +../../../tests/config.test.toml \ No newline at end of file diff --git a/nexus/deployment/Cargo.toml b/nexus/reconfigurator/planning/Cargo.toml similarity index 92% rename from nexus/deployment/Cargo.toml rename to nexus/reconfigurator/planning/Cargo.toml index 115dec98a5..35ead9494e 100644 --- a/nexus/deployment/Cargo.toml +++ b/nexus/reconfigurator/planning/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "nexus-deployment" +name = "nexus-reconfigurator-planning" version = "0.1.0" edition = "2021" diff --git a/nexus/deployment/src/blueprint_builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder.rs similarity index 100% rename from nexus/deployment/src/blueprint_builder.rs rename to nexus/reconfigurator/planning/src/blueprint_builder.rs diff --git a/nexus/deployment/src/ip_allocator.rs b/nexus/reconfigurator/planning/src/ip_allocator.rs similarity index 100% rename from nexus/deployment/src/ip_allocator.rs rename to nexus/reconfigurator/planning/src/ip_allocator.rs diff --git a/nexus/deployment/src/lib.rs b/nexus/reconfigurator/planning/src/lib.rs similarity index 100% rename from nexus/deployment/src/lib.rs rename to nexus/reconfigurator/planning/src/lib.rs diff --git a/nexus/deployment/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs similarity index 100% rename from nexus/deployment/src/planner.rs rename to nexus/reconfigurator/planning/src/planner.rs diff --git a/nexus/src/app/background/blueprint_execution.rs b/nexus/src/app/background/blueprint_execution.rs index 4ba60ab566..3c2530a3d3 100644 --- a/nexus/src/app/background/blueprint_execution.rs +++ b/nexus/src/app/background/blueprint_execution.rs @@ -69,7 +69,7 @@ impl BackgroundTask for BlueprintExecutor { }); } - let result = nexus_blueprint_execution::realize_blueprint( + let result = nexus_reconfigurator_execution::realize_blueprint( opctx, &self.datastore, blueprint, @@ -258,7 +258,7 @@ mod test { // Make sure that requests get made to the sled agent. This is not a // careful check of exactly what gets sent. For that, see the tests in - // nexus-blueprint-execution. + // nexus-reconfigurator-execution. for s in [&mut s1, &mut s2] { s.expect( Expectation::matching(all_of![request::method_path( diff --git a/nexus/src/app/deployment.rs b/nexus/src/app/deployment.rs index adf6119c5c..31ba9fe065 100644 --- a/nexus/src/app/deployment.rs +++ b/nexus/src/app/deployment.rs @@ -9,8 +9,8 @@ use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::datastore::SQL_BATCH_SIZE; use nexus_db_queries::db::pagination::Paginator; -use nexus_deployment::blueprint_builder::BlueprintBuilder; -use nexus_deployment::planner::Planner; +use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; +use nexus_reconfigurator_planning::planner::Planner; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintMetadata; use nexus_types::deployment::BlueprintTarget; diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 0f601f3db5..ef3c03a302 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -4,12 +4,13 @@ //! Types representing deployed software and configuration //! -//! For more on this, see the crate-level documentation for `nexus/deployment`. +//! For more on this, see the crate-level documentation for +//! `nexus/reconfigurator/planning`. //! //! This lives in nexus/types because it's used by both nexus/db-model and -//! nexus/deployment. (It could as well just live in nexus/db-model, but -//! nexus/deployment does not currently know about nexus/db-model and it's -//! convenient to separate these concerns.) +//! nexus/reconfigurator/planning. (It could as well just live in +//! nexus/db-model, but nexus/reconfigurator/planning does not currently know +//! about nexus/db-model and it's convenient to separate these concerns.) use crate::external_api::views::SledPolicy; use crate::external_api::views::SledState; @@ -98,8 +99,8 @@ impl SledResources { /// Describes a complete set of software and configuration for the system // Blueprints are a fundamental part of how the system modifies itself. Each // blueprint completely describes all of the software and configuration -// that the control plane manages. See the nexus/deployment crate-level -// documentation for details. +// that the control plane manages. See the nexus/reconfigurator/planning +// crate-level documentation for details. // // Blueprints are different from policy. Policy describes the things that an // operator would generally want to control. The blueprint describes the From b993768e4d62515f33a058e4082dd7a721c9cee1 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 29 Feb 2024 17:16:10 -0500 Subject: [PATCH 057/157] Bump Gimlet to v1.0.10 (#5171) This specifcially contains some additional debugging information --- tools/hubris_checksums | 9 ++++----- tools/hubris_version | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/tools/hubris_checksums b/tools/hubris_checksums index fb1c8c1930..65a2739fcd 100644 --- a/tools/hubris_checksums +++ b/tools/hubris_checksums @@ -1,9 +1,8 @@ -4ebca97672fafd3b2716c17f00c8a93f531c1ccb44137c756c77c13383b203da build-gimlet-c-image-default-v1.0.9.zip -a945dbcd4b1825e95db32bbf2fbdc56013501396dcdc5ebe6ded361decfcb1f6 build-gimlet-d-image-default-v1.0.9.zip -527c4fe9488c5834f0fd68fa59756ab01eb0c36d725ba8b8ade1375e2dcb4c3e build-gimlet-e-image-default-v1.0.9.zip -69d09a1367b5cfecf43c5b733f84adcb0e4f73b49e062f1c4574b769fc2f1de9 build-gimlet-f-image-default-v1.0.9.zip +05e4d531c2db947379c08bda099a53055e18e86d4c7fbcba1004f7f7983da701 build-gimlet-c-image-default-v1.0.10.zip +9cba42b8d6c4dbd1bcc805b44e14e6bac7fa590f1faaaa47664f7aa254e91602 build-gimlet-d-image-default-v1.0.10.zip +3ace1b1d692e3c23815665b932dadb42b58abc1942fdd4564c1884a13fa0f0cb build-gimlet-e-image-default-v1.0.10.zip +b3b05e1926bb0f66be8587a67084c76cd760da5db0f1e288c265a03cf80637e9 build-gimlet-f-image-default-v1.0.10.zip 0fa33a057dec7301280338cf5a80a5198b5d95475dba350a807979f1a53ce5c7 build-psc-b-image-default-v1.0.9.zip 88e7aa91fe4f976753ccfc8ba9a57a0521c54d9ef35504cf16e7b1ce78c87465 build-psc-c-image-default-v1.0.9.zip cfbf21fef84b38c2a5ca3c0b45703c20c2605c6bf5d599b53b26e3a2d09d756d build-sidecar-b-image-default-v1.0.9.zip c98b89993f1a9b5219bbabf06897db2f92d60f6a3afb4374cb8e6e79d57fdfe4 build-sidecar-c-image-default-v1.0.9.zip - diff --git a/tools/hubris_version b/tools/hubris_version index 37a180a853..cb9e30d980 100644 --- a/tools/hubris_version +++ b/tools/hubris_version @@ -1 +1 @@ -TAGS=(gimlet-v1.0.9 psc-v1.0.9 sidecar-v1.0.9) +TAGS=(gimlet-v1.0.10 psc-v1.0.9 sidecar-v1.0.9) From bc1efb35c7b98c049d041fa2fde3613a57490e17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karen=20C=C3=A1rcamo?= Date: Fri, 1 Mar 2024 20:08:15 +1300 Subject: [PATCH 058/157] Add logging to identify client::tests::test_replicated test flake (#5175) To help us identify what is going on with the test flake Related https://github.com/oxidecomputer/omicron/issues/5166 --- oximeter/db/src/client.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/oximeter/db/src/client.rs b/oximeter/db/src/client.rs index ca996dc894..2e3029a938 100644 --- a/oximeter/db/src/client.rs +++ b/oximeter/db/src/client.rs @@ -1944,19 +1944,22 @@ mod tests { let sql = String::from( "INSERT INTO oximeter.measurements_string (datum) VALUES ('hiya');", ); - client_2.execute_with_body(sql).await.unwrap(); + let result = client_2.execute_with_body(sql.clone()).await.unwrap(); + info!(log, "Inserted datum to client #2"; "sql" => sql, "result" => result); // Make sure replicas are synched let sql = String::from( "SYSTEM SYNC REPLICA oximeter.measurements_string_local;", ); - client_1.execute_with_body(sql).await.unwrap(); + let result = client_1.execute_with_body(sql.clone()).await.unwrap(); + info!(log, "Synced replicas via client #1"; "sql" => sql, "result" => result); // Make sure data exists in the other replica let sql = String::from( "SELECT * FROM oximeter.measurements_string FORMAT JSONEachRow;", ); - let result = client_1.execute_with_body(sql).await.unwrap(); + let result = client_1.execute_with_body(sql.clone()).await.unwrap(); + info!(log, "Retrieved values via client #1"; "sql" => sql, "result" => result.clone()); assert!(result.contains("hiya")); client_1.wipe_replicated_db().await?; From f6efad4126986f72d3bedfdc04cb4ed30a926f0b Mon Sep 17 00:00:00 2001 From: David Crespo Date: Fri, 1 Mar 2024 11:38:13 -0600 Subject: [PATCH 059/157] Floating IP attach fix: fip by ID + instance by name and vice versa (#5173) When I tried attaching a floating IP to an instance in the web console on dogfood, I got this error from the API. It turns out we were taking the project as specified for determining the floating IP and jamming it into the instance selector, regardless of whether it needed it (selecting instance by name) or not (selecting instance by ID). I added a bit to the floating IP attach test and confirmed that it fails without the fix and passes with the fix. Even with this fix in place, there is still one more loose end to tie up: if the floating IP is specified by ID, and therefore does not have the project, but the _instance_ is specified by name, this will fail because there is no project to include for instance specification purposes. The only way I can see of making this work without asking the user to somehow specify the project for the instance is to fetch the floating IP and pull the project off of that to include on the instance selector. This makes sense because you can only ever attach a floating IP to an instance in the same project. Edit: did that in 6c144e82e04e1a64da9bd1a6ccc6026efb76f98e. Kinda sad to do an extra DB fetch, but I don't see another way to preserve the ability to do name or ID on the instance. --- nexus/src/app/external_ip.rs | 26 +++++++- nexus/src/app/instance.rs | 2 +- nexus/tests/integration_tests/external_ips.rs | 59 +++++++++++++++++++ nexus/types/src/external_api/params.rs | 2 +- 4 files changed, 86 insertions(+), 3 deletions(-) diff --git a/nexus/src/app/external_ip.rs b/nexus/src/app/external_ip.rs index 45b05fbb0b..22907d4ea9 100644 --- a/nexus/src/app/external_ip.rs +++ b/nexus/src/app/external_ip.rs @@ -146,10 +146,34 @@ impl super::Nexus { ) -> UpdateResult { match target.kind { params::FloatingIpParentKind::Instance => { + // Handle the case where floating IP is specified by name (and + // therefore a project is given) but instance is specified by + // ID (and therefore the lookup doesn't want a project), as well + // as the converse: floating IP specified by ID (and no project + // given) but instance specified by name, and therefore needs + // a project. In the latter case, we have to fetch the floating + // IP by its ID in order to get the project to include with + // the instance. + let project = + match (target.parent.clone(), fip_selector.clone().project) + { + (NameOrId::Id(_), _) => None, + (NameOrId::Name(_), Some(p)) => Some(p), + (NameOrId::Name(_), None) => { + let fip_lookup = self.floating_ip_lookup( + opctx, + fip_selector.clone(), + )?; + let (.., fip) = fip_lookup.fetch().await?; + Some(fip.project_id.into()) + } + }; + let instance_selector = params::InstanceSelector { - project: fip_selector.project, + project, instance: target.parent, }; + let instance = self.instance_lookup(opctx, instance_selector)?; let attach_params = ¶ms::ExternalIpCreate::Floating { diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 88a427af6a..200fad9f05 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -201,7 +201,7 @@ impl super::Nexus { .. } => { Err(Error::invalid_request( - "when providing instance as an ID project should not be specified", + "when providing instance as an ID, project should not be specified", )) } _ => { diff --git a/nexus/tests/integration_tests/external_ips.rs b/nexus/tests/integration_tests/external_ips.rs index ee59c6a034..39c27174a1 100644 --- a/nexus/tests/integration_tests/external_ips.rs +++ b/nexus/tests/integration_tests/external_ips.rs @@ -643,6 +643,65 @@ async fn test_external_ip_live_attach_detach( "instance does not have an ephemeral IP attached".to_string() ); } + + // Finally, two kind of funny tests. There is special logic in the handler + // for the case where the floating IP is specified by name but the instance + // by ID and vice versa, so we want to test both combinations. + + // Attach to an instance by instance ID with floating IP selected by name + let floating_ip_name = fips[0].identity.name.as_str(); + let instance_id = instances[0].identity.id; + let url = attach_floating_ip_url(floating_ip_name, PROJECT_NAME); + let body = params::FloatingIpAttach { + kind: params::FloatingIpParentKind::Instance, + parent: instance_id.into(), + }; + let attached: views::FloatingIp = NexusRequest::new( + RequestBuilder::new(client, Method::POST, &url) + .body(Some(&body)) + .expect_status(Some(StatusCode::ACCEPTED)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + assert_eq!(attached.identity.name.as_str(), floating_ip_name); + + let instance_name = instances[0].identity.name.as_str(); + let eip_list = + fetch_instance_external_ips(client, instance_name, PROJECT_NAME).await; + assert_eq!(eip_list.len(), 1); + assert_eq!(eip_list[0].ip(), fips[0].ip); + + // now the other way: floating IP by ID and instance by name + let floating_ip_id = fips[1].identity.id; + let instance_name = instances[1].identity.name.as_str(); + let url = format!("/v1/floating-ips/{floating_ip_id}/attach"); + let body = params::FloatingIpAttach { + kind: params::FloatingIpParentKind::Instance, + parent: instance_name.parse::().unwrap().into(), + }; + let attached: views::FloatingIp = NexusRequest::new( + RequestBuilder::new(client, Method::POST, &url) + .body(Some(&body)) + .expect_status(Some(StatusCode::ACCEPTED)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + assert_eq!(attached.identity.id, floating_ip_id); + + let eip_list = + fetch_instance_external_ips(client, instance_name, PROJECT_NAME).await; + assert_eq!(eip_list.len(), 1); + assert_eq!(eip_list[0].ip(), fips[1].ip); } #[nexus_test] diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 567b1ff4ad..46260d20d0 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -167,7 +167,7 @@ pub struct OptionalProjectSelector { pub project: Option, } -#[derive(Deserialize, JsonSchema)] +#[derive(Deserialize, JsonSchema, Clone)] pub struct FloatingIpSelector { /// Name or ID of the project, only required if `floating_ip` is provided as a `Name` pub project: Option, From 6c54c826cd74f272bffc139d97ad2b728d8dd232 Mon Sep 17 00:00:00 2001 From: Kyle Simpson Date: Fri, 1 Mar 2024 21:30:31 +0000 Subject: [PATCH 060/157] Fix: prevent cross-project floating IP attach (#5177) This PR ensures that attached FIPs have the same project ID as their target instance, and prevents double lookup of a FIP in the `FIP [name + project] + Instance [ID]` attach case. --- .../src/db/datastore/external_ip.rs | 15 ++-- nexus/src/app/external_ip.rs | 53 +++++------- nexus/src/app/instance.rs | 54 ++++++++++-- nexus/src/app/sagas/instance_common.rs | 8 +- nexus/src/app/sagas/instance_create.rs | 8 +- nexus/src/app/sagas/instance_ip_attach.rs | 72 +++++++--------- nexus/src/app/sagas/instance_ip_detach.rs | 7 +- nexus/src/app/sagas/mod.rs | 2 +- nexus/src/external_api/http_entrypoints.rs | 6 +- nexus/tests/integration_tests/external_ips.rs | 83 ++++++++++++++++++- 10 files changed, 213 insertions(+), 95 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index d15e1b7ca8..3fdfbc1d4b 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -37,12 +37,12 @@ use chrono::Utc; use diesel::prelude::*; use nexus_db_model::Instance; use nexus_db_model::IpAttachState; -use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; +use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::ResourceType; @@ -227,7 +227,8 @@ impl DataStore { &self, opctx: &OpContext, project_id: Uuid, - params: params::FloatingIpCreate, + identity: IdentityMetadataCreateParams, + ip: Option, pool: Option, ) -> CreateResult { let ip_id = Uuid::new_v4(); @@ -256,11 +257,11 @@ impl DataStore { let pool_id = pool.id(); - let data = if let Some(ip) = params.ip { + let data = if let Some(ip) = ip { IncompleteExternalIp::for_floating_explicit( ip_id, - &Name(params.identity.name), - ¶ms.identity.description, + &Name(identity.name), + &identity.description, project_id, ip, pool_id, @@ -268,8 +269,8 @@ impl DataStore { } else { IncompleteExternalIp::for_floating( ip_id, - &Name(params.identity.name), - ¶ms.identity.description, + &Name(identity.name), + &identity.description, project_id, pool_id, ) diff --git a/nexus/src/app/external_ip.rs b/nexus/src/app/external_ip.rs index 22907d4ea9..19a2cdee36 100644 --- a/nexus/src/app/external_ip.rs +++ b/nexus/src/app/external_ip.rs @@ -109,9 +109,11 @@ impl super::Nexus { let (.., authz_project) = project_lookup.lookup_for(authz::Action::CreateChild).await?; - let pool = match ¶ms.pool { + let params::FloatingIpCreate { identity, pool, ip } = params; + + let pool = match pool { Some(pool) => Some( - self.ip_pool_lookup(opctx, pool)? + self.ip_pool_lookup(opctx, &pool)? .lookup_for(authz::Action::Read) .await? .0, @@ -121,7 +123,7 @@ impl super::Nexus { Ok(self .db_datastore - .allocate_floating_ip(opctx, authz_project.id(), params, pool) + .allocate_floating_ip(opctx, authz_project.id(), identity, ip, pool) .await? .try_into() .unwrap()) @@ -144,45 +146,32 @@ impl super::Nexus { fip_selector: params::FloatingIpSelector, target: params::FloatingIpAttach, ) -> UpdateResult { + let fip_lookup = self.floating_ip_lookup(opctx, fip_selector)?; + let (.., authz_project, authz_fip) = + fip_lookup.lookup_for(authz::Action::Modify).await?; + match target.kind { params::FloatingIpParentKind::Instance => { - // Handle the case where floating IP is specified by name (and - // therefore a project is given) but instance is specified by - // ID (and therefore the lookup doesn't want a project), as well - // as the converse: floating IP specified by ID (and no project - // given) but instance specified by name, and therefore needs - // a project. In the latter case, we have to fetch the floating - // IP by its ID in order to get the project to include with - // the instance. - let project = - match (target.parent.clone(), fip_selector.clone().project) - { - (NameOrId::Id(_), _) => None, - (NameOrId::Name(_), Some(p)) => Some(p), - (NameOrId::Name(_), None) => { - let fip_lookup = self.floating_ip_lookup( - opctx, - fip_selector.clone(), - )?; - let (.., fip) = fip_lookup.fetch().await?; - Some(fip.project_id.into()) - } - }; - + // Handle the cases where the FIP and instance are specified by + // name and ID (or ID and name) respectively. We remove the project + // from the instance lookup if using the instance's ID, and insert + // the floating IP's project ID otherwise. let instance_selector = params::InstanceSelector { - project, + project: match &target.parent { + NameOrId::Id(_) => None, + NameOrId::Name(_) => Some(authz_project.id().into()), + }, instance: target.parent, }; let instance = self.instance_lookup(opctx, instance_selector)?; - let attach_params = ¶ms::ExternalIpCreate::Floating { - floating_ip: fip_selector.floating_ip, - }; - self.instance_attach_external_ip( + + self.instance_attach_floating_ip( opctx, &instance, - attach_params, + authz_fip, + authz_project, ) .await .and_then(FloatingIp::try_from) diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 200fad9f05..f6cf90718f 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -49,6 +49,7 @@ use propolis_client::support::tungstenite::Message as WebSocketMessage; use propolis_client::support::InstanceSerialConsoleHelper; use propolis_client::support::WSClientOffset; use propolis_client::support::WebSocketStream; +use sagas::instance_common::ExternalIpAttach; use sled_agent_client::types::InstanceMigrationSourceParams; use sled_agent_client::types::InstanceMigrationTargetParams; use sled_agent_client::types::InstanceProperties; @@ -201,7 +202,7 @@ impl super::Nexus { .. } => { Err(Error::invalid_request( - "when providing instance as an ID, project should not be specified", + "when providing instance as an ID project should not be specified", )) } _ => { @@ -1952,20 +1953,63 @@ impl super::Nexus { Ok(()) } - /// Attach an external IP to an instance. - pub(crate) async fn instance_attach_external_ip( + /// Attach an ephemeral IP to an instance. + pub(crate) async fn instance_attach_ephemeral_ip( + self: &Arc, + opctx: &OpContext, + instance_lookup: &lookup::Instance<'_>, + pool: Option, + ) -> UpdateResult { + let (.., authz_project, authz_instance) = + instance_lookup.lookup_for(authz::Action::Modify).await?; + + self.instance_attach_external_ip( + opctx, + authz_instance, + authz_project.id(), + ExternalIpAttach::Ephemeral { pool }, + ) + .await + } + + /// Attach an ephemeral IP to an instance. + pub(crate) async fn instance_attach_floating_ip( self: &Arc, opctx: &OpContext, instance_lookup: &lookup::Instance<'_>, - ext_ip: ¶ms::ExternalIpCreate, + authz_fip: authz::FloatingIp, + authz_fip_project: authz::Project, ) -> UpdateResult { let (.., authz_project, authz_instance) = instance_lookup.lookup_for(authz::Action::Modify).await?; + if authz_fip_project.id() != authz_project.id() { + return Err(Error::invalid_request( + "floating IP must be in the same project as the instance", + )); + } + + self.instance_attach_external_ip( + opctx, + authz_instance, + authz_project.id(), + ExternalIpAttach::Floating { floating_ip: authz_fip }, + ) + .await + } + + /// Attach an external IP to an instance. + pub(crate) async fn instance_attach_external_ip( + self: &Arc, + opctx: &OpContext, + authz_instance: authz::Instance, + project_id: Uuid, + ext_ip: ExternalIpAttach, + ) -> UpdateResult { let saga_params = sagas::instance_ip_attach::Params { create_params: ext_ip.clone(), authz_instance, - project_id: authz_project.id(), + project_id, serialized_authn: authn::saga::Serialized::for_opctx(opctx), }; diff --git a/nexus/src/app/sagas/instance_common.rs b/nexus/src/app/sagas/instance_common.rs index e915c026dd..b941739393 100644 --- a/nexus/src/app/sagas/instance_common.rs +++ b/nexus/src/app/sagas/instance_common.rs @@ -16,8 +16,8 @@ use nexus_db_queries::authz; use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::db::queries::external_ip::SAFE_TRANSIENT_INSTANCE_STATES; use nexus_db_queries::{authn, context::OpContext, db, db::DataStore}; -use omicron_common::api::external::Error; use omicron_common::api::external::InstanceState; +use omicron_common::api::external::{Error, NameOrId}; use serde::{Deserialize, Serialize}; use steno::ActionError; use uuid::Uuid; @@ -469,3 +469,9 @@ pub async fn instance_ip_remove_opte( Ok(()) } + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub enum ExternalIpAttach { + Ephemeral { pool: Option }, + Floating { floating_ip: authz::FloatingIp }, +} diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 88dd0ae36e..0950754572 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -733,7 +733,7 @@ async fn sic_allocate_instance_external_ip( } // Set the parent of an existing floating IP to the new instance's ID. params::ExternalIpCreate::Floating { floating_ip } => { - let (.., authz_fip) = match floating_ip { + let (.., authz_project, authz_fip) = match floating_ip { NameOrId::Name(name) => LookupPath::new(&opctx, datastore) .project_id(saga_params.project_id) .floating_ip_name(db::model::Name::ref_cast(name)), @@ -745,6 +745,12 @@ async fn sic_allocate_instance_external_ip( .await .map_err(ActionError::action_failed)?; + if authz_project.id() != saga_params.project_id { + return Err(ActionError::action_failed(Error::invalid_request( + "floating IP must be in the same project as the instance" + ))); + } + datastore .floating_ip_begin_attach(&opctx, &authz_fip, instance_id, true) .await diff --git a/nexus/src/app/sagas/instance_ip_attach.rs b/nexus/src/app/sagas/instance_ip_attach.rs index be7f81368e..3cd6ac1c46 100644 --- a/nexus/src/app/sagas/instance_ip_attach.rs +++ b/nexus/src/app/sagas/instance_ip_attach.rs @@ -4,17 +4,15 @@ use super::instance_common::{ instance_ip_add_nat, instance_ip_add_opte, instance_ip_get_instance_state, - instance_ip_move_state, instance_ip_remove_opte, ModifyStateForExternalIp, + instance_ip_move_state, instance_ip_remove_opte, ExternalIpAttach, + ModifyStateForExternalIp, }; use super::{ActionRegistry, NexusActionContext, NexusSaga}; use crate::app::sagas::declare_saga_actions; -use crate::app::{authn, authz, db}; -use crate::external_api::params; +use crate::app::{authn, authz}; use nexus_db_model::{IpAttachState, Ipv4NatEntry}; -use nexus_db_queries::db::lookup::LookupPath; use nexus_types::external_api::views; -use omicron_common::api::external::{Error, NameOrId}; -use ref_cast::RefCast; +use omicron_common::api::external::Error; use serde::Deserialize; use serde::Serialize; use steno::ActionError; @@ -72,7 +70,7 @@ declare_saga_actions! { #[derive(Debug, Deserialize, Serialize)] pub struct Params { - pub create_params: params::ExternalIpCreate, + pub create_params: ExternalIpAttach, pub authz_instance: authz::Instance, pub project_id: Uuid, /// Authentication context to use to fetch the instance's current state from @@ -93,7 +91,7 @@ async fn siia_begin_attach_ip( match ¶ms.create_params { // Allocate a new IP address from the target, possibly default, pool - params::ExternalIpCreate::Ephemeral { pool } => { + ExternalIpAttach::Ephemeral { pool } => { let pool = if let Some(name_or_id) = pool { Some( osagactx @@ -125,33 +123,19 @@ async fn siia_begin_attach_ip( }) } // Set the parent of an existing floating IP to the new instance's ID. - params::ExternalIpCreate::Floating { floating_ip } => { - let (.., authz_fip) = match floating_ip { - NameOrId::Name(name) => LookupPath::new(&opctx, datastore) - .project_id(params.project_id) - .floating_ip_name(db::model::Name::ref_cast(name)), - NameOrId::Id(id) => { - LookupPath::new(&opctx, datastore).floating_ip_id(*id) - } - } - .lookup_for(authz::Action::Modify) + ExternalIpAttach::Floating { floating_ip } => datastore + .floating_ip_begin_attach( + &opctx, + &floating_ip, + params.authz_instance.id(), + false, + ) .await - .map_err(ActionError::action_failed)?; - - datastore - .floating_ip_begin_attach( - &opctx, - &authz_fip, - params.authz_instance.id(), - false, - ) - .await - .map_err(ActionError::action_failed) - .map(|(external_ip, do_saga)| ModifyStateForExternalIp { - external_ip: Some(external_ip), - do_saga, - }) - } + .map_err(ActionError::action_failed) + .map(|(external_ip, do_saga)| ModifyStateForExternalIp { + external_ip: Some(external_ip), + do_saga, + }), } } @@ -349,7 +333,7 @@ impl NexusSaga for SagaInstanceIpAttach { #[cfg(test)] pub(crate) mod test { use super::*; - use crate::app::{saga::create_saga_dag, sagas::test_helpers}; + use crate::app::{db, saga::create_saga_dag, sagas::test_helpers}; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::{ ExpressionMethods, OptionalExtension, QueryDsl, SelectableHelper, @@ -357,12 +341,13 @@ pub(crate) mod test { use dropshot::test_util::ClientTestContext; use nexus_db_model::{ExternalIp, IpKind}; use nexus_db_queries::context::OpContext; + use nexus_db_queries::db::lookup::LookupPath; use nexus_test_utils::resource_helpers::{ create_default_ip_pool, create_floating_ip, create_instance, create_project, }; use nexus_test_utils_macros::nexus_test; - use omicron_common::api::external::{Name, SimpleIdentity}; + use omicron_common::api::external::SimpleIdentity; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -391,17 +376,22 @@ pub(crate) mod test { datastore: &db::DataStore, use_floating: bool, ) -> Params { + let project_name = db::model::Name(PROJECT_NAME.parse().unwrap()); let create_params = if use_floating { - params::ExternalIpCreate::Floating { - floating_ip: FIP_NAME.parse::().unwrap().into(), - } + let (.., floating_ip) = LookupPath::new(opctx, datastore) + .project_name(&project_name) + .floating_ip_name(&db::model::Name(FIP_NAME.parse().unwrap())) + .lookup_for(authz::Action::Modify) + .await + .unwrap(); + ExternalIpAttach::Floating { floating_ip } } else { - params::ExternalIpCreate::Ephemeral { pool: None } + ExternalIpAttach::Ephemeral { pool: None } }; let (.., authz_project, authz_instance) = LookupPath::new(opctx, datastore) - .project_name(&db::model::Name(PROJECT_NAME.parse().unwrap())) + .project_name(&project_name) .instance_name(&db::model::Name(INSTANCE_NAME.parse().unwrap())) .lookup_for(authz::Action::Modify) .await diff --git a/nexus/src/app/sagas/instance_ip_detach.rs b/nexus/src/app/sagas/instance_ip_detach.rs index da6c92077d..7a71824376 100644 --- a/nexus/src/app/sagas/instance_ip_detach.rs +++ b/nexus/src/app/sagas/instance_ip_detach.rs @@ -357,6 +357,8 @@ pub(crate) mod test { .project_name(&proj_name) .instance_name(&inst_name); + let (.., authz_proj, authz_instance, _) = lookup.fetch().await.unwrap(); + for use_float in [false, true] { let params = instance_ip_attach::test::new_test_params( opctx, datastore, use_float, @@ -365,8 +367,9 @@ pub(crate) mod test { nexus .instance_attach_external_ip( opctx, - &lookup, - ¶ms.create_params, + authz_instance.clone(), + authz_proj.id(), + params.create_params, ) .await .unwrap(); diff --git a/nexus/src/app/sagas/mod.rs b/nexus/src/app/sagas/mod.rs index e9f800c61b..aef8442090 100644 --- a/nexus/src/app/sagas/mod.rs +++ b/nexus/src/app/sagas/mod.rs @@ -24,7 +24,7 @@ pub mod disk_create; pub mod disk_delete; pub mod finalize_disk; pub mod image_delete; -mod instance_common; +pub(crate) mod instance_common; pub mod instance_create; pub mod instance_delete; pub mod instance_ip_attach; diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index b007cc6217..0a626db234 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -4088,12 +4088,10 @@ async fn instance_ephemeral_ip_attach( let instance_lookup = nexus.instance_lookup(&opctx, instance_selector)?; let ip = nexus - .instance_attach_external_ip( + .instance_attach_ephemeral_ip( &opctx, &instance_lookup, - ¶ms::ExternalIpCreate::Ephemeral { - pool: ip_to_create.into_inner().pool, - }, + ip_to_create.into_inner().pool, ) .await?; Ok(HttpResponseAccepted(ip)) diff --git a/nexus/tests/integration_tests/external_ips.rs b/nexus/tests/integration_tests/external_ips.rs index 39c27174a1..c1c71e9afc 100644 --- a/nexus/tests/integration_tests/external_ips.rs +++ b/nexus/tests/integration_tests/external_ips.rs @@ -36,8 +36,10 @@ use nexus_types::external_api::views::FloatingIp; use nexus_types::identity::Resource; use omicron_common::address::IpRange; use omicron_common::address::Ipv4Range; +use omicron_common::api::external::ByteCount; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Instance; +use omicron_common::api::external::InstanceCpuCount; use omicron_common::api::external::Name; use omicron_common::api::external::NameOrId; use uuid::Uuid; @@ -70,6 +72,10 @@ pub fn attach_floating_ip_url( format!("/v1/floating-ips/{floating_ip_name}/attach?project={project_name}") } +pub fn attach_floating_ip_uuid(floating_ip_uuid: &Uuid) -> String { + format!("/v1/floating-ips/{floating_ip_uuid}/attach") +} + pub fn detach_floating_ip_url( floating_ip_name: &str, project_name: &str, @@ -705,7 +711,82 @@ async fn test_external_ip_live_attach_detach( } #[nexus_test] -async fn test_external_ip_attach_detach_fail_if_in_use_by_other( +async fn test_floating_ip_attach_fail_between_projects( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let apictx = &cptestctx.server.apictx(); + let _nexus = &apictx.nexus; + + create_default_ip_pool(&client).await; + let _project = create_project(client, PROJECT_NAME).await; + let _project2 = create_project(client, "proj2").await; + + // Create a floating IP in another project. + let fip = + create_floating_ip(client, FIP_NAMES[0], "proj2", None, None).await; + + // Create a new instance *then* bind the FIP to it, both by ID. + let instance = + instance_for_external_ips(client, INSTANCE_NAMES[0], true, false, &[]) + .await; + + let url = attach_floating_ip_uuid(&fip.identity.id); + let error: HttpErrorResponseBody = NexusRequest::new( + RequestBuilder::new(client, Method::POST, &url) + .body(Some(¶ms::FloatingIpAttach { + kind: params::FloatingIpParentKind::Instance, + parent: instance.identity.id.into(), + })) + .expect_status(Some(StatusCode::BAD_REQUEST)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + assert_eq!( + error.message, + "floating IP must be in the same project as the instance".to_string() + ); + + // Create a new instance with a FIP, referenced by ID. + let url = format!("/v1/instances?project={PROJECT_NAME}"); + let error = object_create_error( + client, + &url, + ¶ms::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: INSTANCE_NAMES[1].parse().unwrap(), + description: "".into(), + }, + ncpus: InstanceCpuCount(4), + memory: ByteCount::from_gibibytes_u32(1), + hostname: "the-host".parse().unwrap(), + user_data: + b"#cloud-config\nsystem_info:\n default_user:\n name: oxide" + .to_vec(), + ssh_public_keys: Some(Vec::new()), + network_interfaces: + params::InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![params::ExternalIpCreate::Floating { + floating_ip: fip.identity.id.into(), + }], + disks: vec![], + start: true, + }, + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!( + error.message, + "floating IP must be in the same project as the instance".to_string() + ); +} + +#[nexus_test] +async fn test_external_ip_attach_fail_if_in_use_by_other( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; From 4ab41a074aa1492e6a78eeb4294f4a178f71c922 Mon Sep 17 00:00:00 2001 From: Charlie Park Date: Fri, 1 Mar 2024 13:55:42 -0800 Subject: [PATCH 061/157] Enable Updates for Floating IPs (#5176) Fixes #5016 This PR adds the ability to update names and descriptions for floating IPs. --------- Co-authored-by: David Crespo --- nexus/db-model/src/external_ip.rs | 19 +++++ .../src/db/datastore/external_ip.rs | 27 +++++++ nexus/src/app/external_ip.rs | 16 +++++ nexus/src/external_api/http_entrypoints.rs | 46 ++++++++++-- nexus/tests/integration_tests/endpoints.rs | 11 +++ nexus/tests/integration_tests/external_ips.rs | 55 ++++++++++++++ nexus/tests/output/nexus_tags.txt | 1 + nexus/types/src/external_api/params.rs | 6 ++ openapi/nexus.json | 72 +++++++++++++++++++ 9 files changed, 249 insertions(+), 4 deletions(-) diff --git a/nexus/db-model/src/external_ip.rs b/nexus/db-model/src/external_ip.rs index b30f91c7c0..0f484f7610 100644 --- a/nexus/db-model/src/external_ip.rs +++ b/nexus/db-model/src/external_ip.rs @@ -16,6 +16,7 @@ use db_macros::Resource; use diesel::Queryable; use diesel::Selectable; use ipnetwork::IpNetwork; +use nexus_types::external_api::params; use nexus_types::external_api::shared; use nexus_types::external_api::views; use omicron_common::address::NUM_SOURCE_NAT_PORTS; @@ -538,6 +539,24 @@ impl From for views::FloatingIp { } } +#[derive(AsChangeset)] +#[diesel(table_name = external_ip)] +pub struct FloatingIpUpdate { + pub name: Option, + pub description: Option, + pub time_modified: DateTime, +} + +impl From for FloatingIpUpdate { + fn from(params: params::FloatingIpUpdate) -> Self { + Self { + name: params.identity.name.map(Name), + description: params.identity.description, + time_modified: Utc::now(), + } + } +} + impl TryFrom for InstanceExternalIpBody { type Error = Error; diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 3fdfbc1d4b..f561a024e8 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -35,6 +35,7 @@ use crate::db::update_and_check::UpdateStatus; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; +use nexus_db_model::FloatingIpUpdate; use nexus_db_model::Instance; use nexus_db_model::IpAttachState; use nexus_types::identity::Resource; @@ -824,6 +825,32 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } + /// Update a Floating IP + pub async fn floating_ip_update( + &self, + opctx: &OpContext, + authz_fip: &authz::FloatingIp, + update: FloatingIpUpdate, + ) -> UpdateResult { + use db::schema::external_ip::dsl; + + opctx.authorize(authz::Action::Modify, authz_fip).await?; + + diesel::update(dsl::external_ip) + .filter(dsl::id.eq(authz_fip.id())) + .filter(dsl::time_deleted.is_null()) + .set(update) + .returning(ExternalIp::as_returning()) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_fip), + ) + }) + } + /// Delete a Floating IP, verifying first that it is not in use. pub async fn floating_ip_delete( &self, diff --git a/nexus/src/app/external_ip.rs b/nexus/src/app/external_ip.rs index 19a2cdee36..9908512c0f 100644 --- a/nexus/src/app/external_ip.rs +++ b/nexus/src/app/external_ip.rs @@ -129,6 +129,22 @@ impl super::Nexus { .unwrap()) } + pub(crate) async fn floating_ip_update( + &self, + opctx: &OpContext, + ip_lookup: lookup::FloatingIp<'_>, + params: params::FloatingIpUpdate, + ) -> UpdateResult { + let (.., authz_fip) = + ip_lookup.lookup_for(authz::Action::Modify).await?; + Ok(self + .db_datastore + .floating_ip_update(opctx, &authz_fip, params.clone().into()) + .await? + .try_into() + .unwrap()) + } + pub(crate) async fn floating_ip_delete( &self, opctx: &OpContext, diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 0a626db234..8fe02a0fd8 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -7,10 +7,10 @@ use super::{ console_api, device_auth, params, views::{ - self, Certificate, Group, IdentityProvider, Image, IpPool, IpPoolRange, - PhysicalDisk, Project, Rack, Role, Silo, SiloQuotas, SiloUtilization, - Sled, Snapshot, SshKey, User, UserBuiltin, Utilization, Vpc, VpcRouter, - VpcSubnet, + self, Certificate, FloatingIp, Group, IdentityProvider, Image, IpPool, + IpPoolRange, PhysicalDisk, Project, Rack, Role, Silo, SiloQuotas, + SiloUtilization, Sled, Snapshot, SshKey, User, UserBuiltin, + Utilization, Vpc, VpcRouter, VpcSubnet, }, }; use crate::external_api::shared; @@ -141,6 +141,7 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(floating_ip_list)?; api.register(floating_ip_create)?; api.register(floating_ip_view)?; + api.register(floating_ip_update)?; api.register(floating_ip_delete)?; api.register(floating_ip_attach)?; api.register(floating_ip_detach)?; @@ -1921,6 +1922,43 @@ async fn floating_ip_create( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } +/// Update floating IP +#[endpoint { + method = PUT, + path = "/v1/floating-ips/{floating_ip}", + tags = ["floating-ips"], +}] +async fn floating_ip_update( + rqctx: RequestContext>, + path_params: Path, + query_params: Query, + updated_floating_ip: TypedBody, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let updated_floating_ip_params = updated_floating_ip.into_inner(); + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let floating_ip_selector = params::FloatingIpSelector { + project: query.project, + floating_ip: path.floating_ip, + }; + let floating_ip_lookup = + nexus.floating_ip_lookup(&opctx, floating_ip_selector)?; + let floating_ip = nexus + .floating_ip_update( + &opctx, + floating_ip_lookup, + updated_floating_ip_params, + ) + .await?; + Ok(HttpResponseOk(floating_ip)) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + /// Delete floating IP #[endpoint { method = DELETE, diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index c5c69df232..f18b2d961d 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -772,6 +772,14 @@ pub static DEMO_FLOAT_IP_CREATE: Lazy = pool: None, }); +pub static DEMO_FLOAT_IP_UPDATE: Lazy = + Lazy::new(|| params::FloatingIpUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: Some(String::from("an updated Floating IP")), + }, + }); + pub static DEMO_FLOAT_IP_ATTACH: Lazy = Lazy::new(|| params::FloatingIpAttach { kind: params::FloatingIpParentKind::Instance, @@ -2277,6 +2285,9 @@ pub static VERIFY_ENDPOINTS: Lazy> = Lazy::new(|| { unprivileged_access: UnprivilegedAccess::None, allowed_methods: vec![ AllowedMethod::Get, + AllowedMethod::Put( + serde_json::to_value(&*DEMO_FLOAT_IP_UPDATE).unwrap() + ), AllowedMethod::Delete, ], }, diff --git a/nexus/tests/integration_tests/external_ips.rs b/nexus/tests/integration_tests/external_ips.rs index c1c71e9afc..25c74bfd7f 100644 --- a/nexus/tests/integration_tests/external_ips.rs +++ b/nexus/tests/integration_tests/external_ips.rs @@ -28,6 +28,8 @@ use nexus_test_utils::resource_helpers::object_create; use nexus_test_utils::resource_helpers::object_create_error; use nexus_test_utils::resource_helpers::object_delete; use nexus_test_utils::resource_helpers::object_delete_error; +use nexus_test_utils::resource_helpers::object_get; +use nexus_test_utils::resource_helpers::object_put; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; use nexus_types::external_api::shared; @@ -38,6 +40,7 @@ use omicron_common::address::IpRange; use omicron_common::address::Ipv4Range; use omicron_common::api::external::ByteCount; use omicron_common::api::external::IdentityMetadataCreateParams; +use omicron_common::api::external::IdentityMetadataUpdateParams; use omicron_common::api::external::Instance; use omicron_common::api::external::InstanceCpuCount; use omicron_common::api::external::Name; @@ -387,6 +390,58 @@ async fn test_floating_ip_create_name_in_use( ); } +#[nexus_test] +async fn test_floating_ip_update(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + + create_default_ip_pool(&client).await; + let project = create_project(client, PROJECT_NAME).await; + + // Create the Floating IP + let fip = create_floating_ip( + client, + FIP_NAMES[0], + project.identity.name.as_str(), + None, + None, + ) + .await; + + let floating_ip_url = get_floating_ip_by_id_url(&fip.identity.id); + + // Verify that the Floating IP was created correctly + let fetched_floating_ip: FloatingIp = + object_get(client, &floating_ip_url).await; + + assert_eq!(fip.identity, fetched_floating_ip.identity); + + // Set up the updated values + let new_fip_name: &str = "updated"; + let new_fip_desc: &str = "updated description"; + let updates: params::FloatingIpUpdate = params::FloatingIpUpdate { + identity: IdentityMetadataUpdateParams { + name: Some(String::from(new_fip_name).parse().unwrap()), + description: Some(String::from(new_fip_desc).parse().unwrap()), + }, + }; + + // Update the Floating IP + let new_fip: FloatingIp = + object_put(client, &floating_ip_url, &updates).await; + + assert_eq!(new_fip.identity.name.as_str(), new_fip_name); + assert_eq!(new_fip.identity.description, new_fip_desc); + assert_eq!(new_fip.project_id, project.identity.id); + assert_eq!(new_fip.identity.time_created, fip.identity.time_created); + assert_ne!(new_fip.identity.time_modified, fip.identity.time_modified); + + // Verify that the Floating IP was updated correctly + let fetched_modified_floating_ip: FloatingIp = + object_get(client, &floating_ip_url).await; + + assert_eq!(new_fip.identity, fetched_modified_floating_ip.identity); +} + #[nexus_test] async fn test_floating_ip_delete(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index adb36a24af..ecffadcb4d 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -17,6 +17,7 @@ floating_ip_create POST /v1/floating-ips floating_ip_delete DELETE /v1/floating-ips/{floating_ip} floating_ip_detach POST /v1/floating-ips/{floating_ip}/detach floating_ip_list GET /v1/floating-ips +floating_ip_update PUT /v1/floating-ips/{floating_ip} floating_ip_view GET /v1/floating-ips/{floating_ip} API operations found with tag "hidden" diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 46260d20d0..31cb1d3e5c 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -890,6 +890,12 @@ pub struct FloatingIpCreate { pub pool: Option, } +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct FloatingIpUpdate { + #[serde(flatten)] + pub identity: IdentityMetadataUpdateParams, +} + /// The type of resource that a floating IP is attached to #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] #[serde(rename_all = "snake_case")] diff --git a/openapi/nexus.json b/openapi/nexus.json index b0aa84d67a..3d31331a90 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -964,6 +964,60 @@ } } }, + "put": { + "tags": [ + "floating-ips" + ], + "summary": "Update floating IP", + "operationId": "floating_ip_update", + "parameters": [ + { + "in": "path", + "name": "floating_ip", + "description": "Name or ID of the floating IP", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FloatingIpUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FloatingIp" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, "delete": { "tags": [ "floating-ips" @@ -11919,6 +11973,24 @@ "items" ] }, + "FloatingIpUpdate": { + "description": "Updateable identity-related parameters", + "type": "object", + "properties": { + "description": { + "nullable": true, + "type": "string" + }, + "name": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + } + }, "Group": { "description": "View of a Group", "type": "object", From ac78673a8de8475213ea6c8d32c93473e9c704cf Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 1 Mar 2024 14:10:20 -0800 Subject: [PATCH 062/157] Use the right error conversion to external::Error for service errors (#5178) Service errors already define their own conversion to external errors: https://github.com/oxidecomputer/omicron/blob/f6efad4126986f72d3bedfdc04cb4ed30a926f0b/sled-agent/src/services.rs#L286 But this conversion was not used by the Sled Agent because it treated ALL `sled_agent::Error` types as internal errors: https://github.com/oxidecomputer/omicron/blob/f6efad4126986f72d3bedfdc04cb4ed30a926f0b/sled-agent/src/sled_agent.rs#L154-L160 This PR starts converting better error types by enabling dispatch to more specific error converters. --- sled-agent/src/sled_agent.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index 1a634a6346..07c4dbbf4e 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -153,8 +153,12 @@ pub enum Error { impl From for omicron_common::api::external::Error { fn from(err: Error) -> Self { - omicron_common::api::external::Error::InternalError { - internal_message: err.to_string(), + match err { + // Service errors can convert themselves into the external error + Error::Services(err) => err.into(), + _ => omicron_common::api::external::Error::InternalError { + internal_message: err.to_string(), + }, } } } From 18cb8ed2382eb782ebf6f522289e355b0441583c Mon Sep 17 00:00:00 2001 From: Rain Date: Fri, 1 Mar 2024 17:08:23 -0800 Subject: [PATCH 063/157] [meta] patch whoami (#5184) Patch it to fix memory corruption on illumos. --- Cargo.lock | 29 +++++++++++++++++++++++++++-- Cargo.toml | 5 +++++ 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c3fc5d86cf..7f3ea50279 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -883,6 +883,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chacha20" version = "0.9.1" @@ -4623,6 +4629,18 @@ dependencies = [ "libc", ] +[[package]] +name = "nix" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" +dependencies = [ + "bitflags 2.4.0", + "cfg-if", + "cfg_aliases", + "libc", +] + [[package]] name = "nodrop" version = "0.1.14" @@ -10138,6 +10156,12 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + [[package]] name = "wasm-bindgen" version = "0.2.87" @@ -10247,9 +10271,10 @@ dependencies = [ [[package]] name = "whoami" version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" +source = "git+https://github.com/oxidecomputer/whoami?branch=use-nix#da2a26e695849f1ffc394627ba27a6a473650705" dependencies = [ + "nix 0.28.0", + "wasite", "wasm-bindgen", "web-sys", ] diff --git a/Cargo.toml b/Cargo.toml index e19ed35c32..6f23ab929c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -621,6 +621,11 @@ opt-level = 3 git = 'https://github.com/oxidecomputer/pq-sys' branch = "oxide/omicron" +# See https://github.com/oxidecomputer/security-operations/issues/43. +[patch.crates-io.whoami] +git = "https://github.com/oxidecomputer/whoami" +branch = "use-nix" + # Using the workspace-hack via this patch directive means that it only applies # while building within this workspace. If another workspace imports a crate # from here via a git dependency, it will not have the workspace-hack applied From dcd3d9efcadc6ff1f6ac4a34726f41d62bec296f Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sat, 2 Mar 2024 09:49:40 +0000 Subject: [PATCH 064/157] chore(deps): update taiki-e/install-action digest to c6ffb58 (#5174) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`45dfafe` -> `c6ffb58`](https://togithub.com/taiki-e/install-action/compare/45dfafe...c6ffb58) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. â™» **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index 49914f36db..e3e032806c 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@45dfafe7696c1865602c71c94d46a16a45f8e88b # v2 + uses: taiki-e/install-action@c6ffb5827ec9fecafc811f72ec5d7854680dc6ca # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From 77669cdc49ebf07e451ee77109d0567129db3174 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Mon, 4 Mar 2024 11:49:11 -0500 Subject: [PATCH 065/157] Log all possible sled targets when creating a reservation (#5182) We hope this gives us some insight into https://github.com/oxidecomputer/omicron/issues/5181. --- nexus/db-queries/src/db/datastore/sled.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/nexus/db-queries/src/db/datastore/sled.rs b/nexus/db-queries/src/db/datastore/sled.rs index 8809f4d60d..8d812f69cc 100644 --- a/nexus/db-queries/src/db/datastore/sled.rs +++ b/nexus/db-queries/src/db/datastore/sled.rs @@ -247,11 +247,20 @@ impl DataStore { } sql_function!(fn random() -> diesel::sql_types::Float); + + // We only actually care about one target here, so this + // query should have a `.limit(1)` attached. We fetch all + // sled targets to leave additional debugging information in + // the logs, for now. let sled_targets = sled_targets .order(random()) - .limit(1) .get_results_async::(&conn) .await?; + info!( + opctx.log, + "found {} available sled targets", sled_targets.len(); + "sled_ids" => ?sled_targets, + ); if sled_targets.is_empty() { return Err(err.bail(SledReservationError::NotFound)); From 9f51dcbeb19ac5d317477c1e5553d13f4ad329d5 Mon Sep 17 00:00:00 2001 From: Andy Fiddaman Date: Mon, 4 Mar 2024 18:15:05 +0000 Subject: [PATCH 066/157] Upgrade clickhouse from v22.8.9.24 to v23.8.7.24 (#5127) --- oximeter/db/src/client.rs | 8 +- oximeter/db/src/configs/keeper_config.xml | 2 +- smf/clickhouse_keeper/keeper_config.xml | 2 +- smf/clickhouse_keeper/method_script.sh | 30 ++--- smf/profile/profile | 4 +- test-utils/src/dev/clickhouse.rs | 131 ++++++++++++++-------- tools/ci_download_clickhouse | 4 +- tools/clickhouse_checksums | 6 +- tools/clickhouse_version | 2 +- tools/install_builder_prerequisites.sh | 6 +- 10 files changed, 122 insertions(+), 73 deletions(-) diff --git a/oximeter/db/src/client.rs b/oximeter/db/src/client.rs index 2e3029a938..abea11aa06 100644 --- a/oximeter/db/src/client.rs +++ b/oximeter/db/src/client.rs @@ -50,6 +50,10 @@ use tokio::fs; use tokio::sync::Mutex; use uuid::Uuid; +const CLICKHOUSE_DB_MISSING: &'static str = "Database oximeter does not exist"; +const CLICKHOUSE_DB_VERSION_MISSING: &'static str = + "Table oximeter.version does not exist"; + #[usdt::provider(provider = "clickhouse_client")] mod probes { fn query__start(_: &usdt::UniqueId, sql: &str) {} @@ -856,10 +860,10 @@ impl Client { })?, Err(Error::Database(err)) // Case 1: The database has not been created. - if err.contains("Database oximeter doesn't exist") || + if err.contains(CLICKHOUSE_DB_MISSING) || // Case 2: The database has been created, but it's old (exists // prior to the version table). - err.contains("Table oximeter.version doesn't exist") => + err.contains(CLICKHOUSE_DB_VERSION_MISSING) => { warn!(self.log, "oximeter database does not exist, or is out-of-date"); 0 diff --git a/oximeter/db/src/configs/keeper_config.xml b/oximeter/db/src/configs/keeper_config.xml index 19ab99f909..ae9d2a9cc5 100644 --- a/oximeter/db/src/configs/keeper_config.xml +++ b/oximeter/db/src/configs/keeper_config.xml @@ -9,7 +9,7 @@ - + diff --git a/smf/clickhouse_keeper/keeper_config.xml b/smf/clickhouse_keeper/keeper_config.xml index ec114694cc..2d64c2c0a0 100644 --- a/smf/clickhouse_keeper/keeper_config.xml +++ b/smf/clickhouse_keeper/keeper_config.xml @@ -9,7 +9,7 @@ - + diff --git a/smf/clickhouse_keeper/method_script.sh b/smf/clickhouse_keeper/method_script.sh index 8499e0001f..a017409a98 100755 --- a/smf/clickhouse_keeper/method_script.sh +++ b/smf/clickhouse_keeper/method_script.sh @@ -39,20 +39,22 @@ KEEPER_HOST_01="${keepers[0]}" KEEPER_HOST_02="${keepers[1]}" KEEPER_HOST_03="${keepers[2]}" -# Generate unique reproduceable number IDs by removing letters from KEEPER_IDENTIFIER_* -# Keeper IDs must be numbers, and they cannot be reused (i.e. when a keeper node is -# unrecoverable the ID must be changed to something new). -# By trimming the hosts we can make sure all keepers will always be up to date when -# a new keeper is spun up. Clickhouse does not allow very large numbers, so we will -# be reducing to 7 characters. This should be enough entropy given the small amount -# of keepers we have. +# Generate unique reproduceable number IDs by removing letters from +# KEEPER_IDENTIFIER_* Keeper IDs must be numbers, and they cannot be reused +# (i.e. when a keeper node is unrecoverable the ID must be changed to something +# new). By trimming the hosts we can make sure all keepers will always be up to +# date when a new keeper is spun up. Clickhouse does not allow very large +# numbers, so we will be reducing to 7 characters. This should be enough +# entropy given the small amount of keepers we have. KEEPER_ID_01="$( echo "${KEEPER_HOST_01}" | tr -dc [:digit:] | cut -c1-7)" KEEPER_ID_02="$( echo "${KEEPER_HOST_02}" | tr -dc [:digit:] | cut -c1-7)" KEEPER_ID_03="$( echo "${KEEPER_HOST_03}" | tr -dc [:digit:] | cut -c1-7)" -# Identify the node type this is as this will influence how the config is constructed -# TODO(https://github.com/oxidecomputer/omicron/issues/3824): There are probably much better ways to do this service name lookup, but this works -# for now. The services contain the same IDs as the hostnames. +# Identify the node type this is as this will influence how the config is +# constructed +# TODO(https://github.com/oxidecomputer/omicron/issues/3824): There are +# probably much better ways to do this service name lookup, but this works for +# now. The services contain the same IDs as the hostnames. KEEPER_SVC="$(zonename | tr -dc [:digit:] | cut -c1-7)" if [[ $KEEPER_ID_01 == $KEEPER_SVC ]] then @@ -68,9 +70,9 @@ else exit "$SMF_EXIT_ERR_CONFIG" fi -# Setting environment variables this way is best practice, but has the downside of -# obscuring the field values to anyone ssh=ing into the zone. To mitigate this, -# we will be saving them to ${DATASTORE}/config_env_vars +# Setting environment variables this way is best practice, but has the downside +# of obscuring the field values to anyone ssh=ing into the zone. To mitigate +# this, we will be saving them to ${DATASTORE}/config_env_vars export CH_LOG="${DATASTORE}/clickhouse-keeper.log" export CH_ERROR_LOG="${DATASTORE}/clickhouse-keeper.err.log" export CH_LISTEN_ADDR=${LISTEN_ADDR} @@ -103,7 +105,7 @@ CH_KEEPER_HOST_03="${CH_KEEPER_HOST_03}"" echo $content >> "${DATASTORE}/config_env_vars" -# The clickhouse binary must be run from within the directory that contains it. +# The clickhouse binary must be run from within the directory that contains it. # Otherwise, it does not automatically detect the configuration files, nor does # it append them when necessary cd /opt/oxide/clickhouse_keeper/ diff --git a/smf/profile/profile b/smf/profile/profile index 73256cd6fd..a8378f7403 100644 --- a/smf/profile/profile +++ b/smf/profile/profile @@ -12,8 +12,8 @@ case "$HOSTNAME" in oxz_crucible*) PATH+=:/opt/oxide/crucible/bin ;; - oxz_clockhouse*) - PATH+=:/opt/oxide/clickhouse + oxz_clickhouse*) + PATH+=:/opt/oxide/clickhouse:/opt/oxide/clickhouse_keeper ;; oxz_external_dns*|oxz_internal_dns*) PATH+=:/opt/oxide/dns-server/bin diff --git a/test-utils/src/dev/clickhouse.rs b/test-utils/src/dev/clickhouse.rs index 220662d9bb..8c415d949e 100644 --- a/test-utils/src/dev/clickhouse.rs +++ b/test-utils/src/dev/clickhouse.rs @@ -29,6 +29,19 @@ const CLICKHOUSE_TIMEOUT: Duration = Duration::from_secs(30); // Timeout used when starting a ClickHouse keeper subprocess. const CLICKHOUSE_KEEPER_TIMEOUT: Duration = Duration::from_secs(30); +// The string to look for in a keeper log file that indicates that the server +// is ready. +const KEEPER_READY: &'static str = "Server initialized, waiting for quorum"; + +// The string to look for in a clickhouse log file that indicates that the +// server is ready. +const CLICKHOUSE_READY: &'static str = + " Application: Ready for connections"; + +// The string to look for in a clickhouse log file when trying to determine the +// port number on which it is listening. +const CLICKHOUSE_PORT: &'static str = "Application: Listening for http://[::1]"; + /// A `ClickHouseInstance` is used to start and manage a ClickHouse single node server process. #[derive(Debug)] pub struct ClickHouseInstance { @@ -157,9 +170,11 @@ impl ClickHouseInstance { .env("CH_REPLICA_NUMBER", r_number) .env("CH_REPLICA_HOST_01", "::1") .env("CH_REPLICA_HOST_02", "::1") - // ClickHouse servers have a small quirk, where when setting the keeper hosts as IPv6 localhost - // addresses in the replica configuration file, they must be wrapped in square brackets - // Otherwise, when running any query, a "Service not found" error appears. + // ClickHouse servers have a small quirk, where when setting the + // keeper hosts as IPv6 localhost addresses in the replica + // configuration file, they must be wrapped in square brackets + // Otherwise, when running any query, a "Service not found" error + // appears. .env("CH_KEEPER_HOST_01", "[::1]") .env("CH_KEEPER_HOST_02", "[::1]") .env("CH_KEEPER_HOST_03", "[::1]") @@ -171,7 +186,12 @@ impl ClickHouseInstance { let data_path = data_dir.root_path().to_path_buf(); let address = SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), port); - let result = wait_for_ready(data_dir.log_path()).await; + let result = wait_for_ready( + data_dir.log_path(), + CLICKHOUSE_TIMEOUT, + CLICKHOUSE_READY, + ) + .await; match result { Ok(()) => Ok(Self { data_dir: Some(data_dir), @@ -192,9 +212,9 @@ impl ClickHouseInstance { k_id: u16, config_path: PathBuf, ) -> Result { - // We assume that only 3 keepers will be run, and the ID of the keeper can only - // be one of "1", "2" or "3". This is to avoid having to pass the IDs of the - // other keepers as part of the function's parameters. + // We assume that only 3 keepers will be run, and the ID of the keeper + // can only be one of "1", "2" or "3". This is to avoid having to pass + // the IDs of the other keepers as part of the function's parameters. if ![1, 2, 3].contains(&k_id) { return Err(ClickHouseError::InvalidKeeperId.into()); } @@ -240,7 +260,12 @@ impl ClickHouseInstance { let data_path = data_dir.root_path().to_path_buf(); let address = SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), port); - let result = wait_for_ready(data_dir.keeper_log_path()).await; + let result = wait_for_ready( + data_dir.keeper_log_path(), + CLICKHOUSE_KEEPER_TIMEOUT, + KEEPER_READY, + ) + .await; match result { Ok(()) => Ok(Self { data_dir: Some(data_dir), @@ -641,8 +666,9 @@ pub async fn wait_for_port( Ok(p) } -// Parse the ClickHouse log file at the given path, looking for a line reporting the port number of -// the HTTP server. This is only used if the port is chosen by the OS, not the caller. +// Parse the ClickHouse log file at the given path, looking for a line +// reporting the port number of the HTTP server. This is only used if the port +// is chosen by the OS, not the caller. async fn discover_local_listening_port( path: &Utf8Path, timeout: Duration, @@ -655,23 +681,21 @@ async fn discover_local_listening_port( // Parse the clickhouse log for a port number. // -// NOTE: This function loops forever until the expected line is found. It should be run under a -// timeout, or some other mechanism for cancelling it. +// NOTE: This function loops forever until the expected line is found. It +// should be run under a timeout, or some other mechanism for cancelling it. async fn find_clickhouse_port_in_log( path: &Utf8Path, ) -> Result { let mut reader = BufReader::new(File::open(path).await?); - const NEEDLE: &str = - " Application: Listening for http://[::1]"; let mut lines = reader.lines(); loop { let line = lines.next_line().await?; match line { Some(line) => { - if let Some(needle_start) = line.find(NEEDLE) { + if let Some(needle_start) = line.find(CLICKHOUSE_PORT) { // Our needle ends with `http://[::1]`; we'll split on the // colon we expect to follow it to find the port. - let address_start = needle_start + NEEDLE.len(); + let address_start = needle_start + CLICKHOUSE_PORT.len(); return line[address_start..] .trim() .split(':') @@ -697,11 +721,12 @@ async fn find_clickhouse_port_in_log( // Wait for the ClickHouse log file to report it is ready to receive connections pub async fn wait_for_ready( log_path: Utf8PathBuf, + timeout: Duration, + needle: &str, ) -> Result<(), anyhow::Error> { let p = poll::wait_for_condition( || async { - let result = - discover_ready(&log_path, CLICKHOUSE_KEEPER_TIMEOUT).await; + let result = discover_ready(&log_path, timeout, needle).await; match result { Ok(ready) => Ok(ready), Err(e) => { @@ -721,40 +746,41 @@ pub async fn wait_for_ready( } }, &Duration::from_millis(500), - &CLICKHOUSE_KEEPER_TIMEOUT, + &timeout, ) .await .context("waiting to discover if ClickHouse is ready for connections")?; Ok(p) } -// Parse the ClickHouse log file at the given path, looking for a line reporting that the server -// is ready for connections. +// Parse the ClickHouse log file at the given path, looking for a line +// reporting that the server is ready for connections. async fn discover_ready( path: &Utf8Path, timeout: Duration, + needle: &str, ) -> Result<(), ClickHouseError> { let timeout = Instant::now() + timeout; - tokio::time::timeout_at(timeout, clickhouse_ready_from_log(path)) + tokio::time::timeout_at(timeout, clickhouse_ready_from_log(path, needle)) .await .map_err(|_| ClickHouseError::Timeout)? } // Parse the clickhouse log to know if the server is ready for connections. // -// NOTE: This function loops forever until the expected line is found. It should be run under a -// timeout, or some other mechanism for cancelling it. +// NOTE: This function loops forever until the expected line is found. It +// should be run under a timeout, or some other mechanism for cancelling it. async fn clickhouse_ready_from_log( path: &Utf8Path, + needle: &str, ) -> Result<(), ClickHouseError> { let mut reader = BufReader::new(File::open(path).await?); - const READY: &str = " Application: Ready for connections"; let mut lines = reader.lines(); loop { let line = lines.next_line().await?; match line { Some(line) => { - if let Some(_) = line.find(READY) { + if let Some(_) = line.find(needle) { return Ok(()); } } @@ -775,7 +801,7 @@ async fn clickhouse_ready_from_log( mod tests { use super::{ discover_local_listening_port, discover_ready, ClickHouseError, - CLICKHOUSE_TIMEOUT, + CLICKHOUSE_PORT, CLICKHOUSE_READY, CLICKHOUSE_TIMEOUT, }; use camino_tempfile::NamedUtf8TempFile; use std::process::Stdio; @@ -824,14 +850,16 @@ mod tests { writeln!(file, "A garbage line").unwrap(); writeln!( file, - "2023.07.31 20:12:38.936192 [ 82373 ] Application: Ready for connections.", + "2023.07.31 20:12:38.936192 [ 82373 ] {}", + CLICKHOUSE_READY, ) .unwrap(); writeln!(file, "Another garbage line").unwrap(); file.flush().unwrap(); assert!(matches!( - discover_ready(file.path(), CLICKHOUSE_TIMEOUT).await, + discover_ready(file.path(), CLICKHOUSE_TIMEOUT, CLICKHOUSE_READY) + .await, Ok(()) )); } @@ -849,17 +877,23 @@ mod tests { writeln!(file, "Another garbage line").unwrap(); file.flush().unwrap(); assert!(matches!( - discover_ready(file.path(), Duration::from_secs(1)).await, + discover_ready( + file.path(), + Duration::from_secs(1), + CLICKHOUSE_READY + ) + .await, Err(ClickHouseError::Timeout {}) )); } // A regression test for #131. // - // The function `discover_local_listening_port` initially read from the log file until EOF, but - // there's no guarantee that ClickHouse has written the port we're searching for before the - // reader consumes the whole file. This test confirms that the file is read until the line is - // found, ignoring EOF, at least until the timeout is hit. + // The function `discover_local_listening_port` initially read from the log + // file until EOF, but there's no guarantee that ClickHouse has written the + // port we're searching for before the reader consumes the whole file. This + // test confirms that the file is read until the line is found, ignoring + // EOF, at least until the timeout is hit. #[tokio::test] async fn test_discover_local_listening_port_slow_write() { // In this case the writer is slightly "slower" than the reader. @@ -880,9 +914,11 @@ mod tests { assert!(read_log_file(reader_timeout, writer_interval).await.is_err()); } - // Implementation of the above tests, simulating simultaneous reading/writing of the log file + // Implementation of the above tests, simulating simultaneous + // reading/writing of the log file // - // This uses Tokio's test utilities to manage time, rather than relying on timeouts. + // This uses Tokio's test utilities to manage time, rather than relying on + // timeouts. async fn read_log_file( reader_timeout: Duration, writer_interval: Duration, @@ -904,12 +940,14 @@ mod tests { // Start a task that slowly writes lines to the log file. // - // NOTE: This looks overly complicated, and it is. We have to wrap this in a mutex because - // both this function, and the writer task we're spawning, need access to the file. They - // may complete in any order, and so it's not possible to give one of them ownership over - // the `NamedTempFile`. If the owning task completes, that may delete the file before the - // other task accesses it. So we need interior mutability (because one of the references is - // mutable for writing), and _this_ scope must own it. + // NOTE: This looks overly complicated, and it is. We have to wrap this + // in a mutex because both this function, and the writer task we're + // spawning, need access to the file. They may complete in any order, + // and so it's not possible to give one of them ownership over the + // `NamedTempFile`. If the owning task completes, that may delete the + // file before the other task accesses it. So we need interior + // mutability (because one of the references is mutable for writing), + // and _this_ scope must own it. let file = Arc::new(Mutex::new(NamedUtf8TempFile::new()?)); let path = file.lock().await.path().to_path_buf(); let writer_file = file.clone(); @@ -927,13 +965,13 @@ mod tests { // (https://github.com/oxidecomputer/omicron/issues/3580). write_and_wait( &mut file, - " Application: List".to_string(), + (&CLICKHOUSE_PORT[..30]).to_string(), writer_interval, ) .await; write_and_wait( &mut file, - format!("ening for http://[::1]:{}\n", EXPECTED_PORT), + format!("{}:{}\n", &CLICKHOUSE_PORT[30..], EXPECTED_PORT), writer_interval, ) .await; @@ -950,8 +988,9 @@ mod tests { // "Run" the test. // - // Note that the futures for the reader/writer tasks must be pinned to the stack, so that - // they may be polled on multiple passes through the select loop without consuming them. + // Note that the futures for the reader/writer tasks must be pinned to + // the stack, so that they may be polled on multiple passes through the + // select loop without consuming them. tokio::pin!(writer_task); tokio::pin!(reader_task); let mut poll_writer = true; diff --git a/tools/ci_download_clickhouse b/tools/ci_download_clickhouse index 675566fad7..93201a772b 100755 --- a/tools/ci_download_clickhouse +++ b/tools/ci_download_clickhouse @@ -19,7 +19,8 @@ DOWNLOAD_DIR="$TARGET_DIR/downloads" # Location where the final clickhouse directory should end up. DEST_DIR="./$TARGET_DIR/clickhouse" -# If you change this, you must also update the md5sums below +# If you change the version in clickhouse_version, you must also update the +# md5sums in clickhouse_checksums CIDL_VERSION="$(cat "$SOURCE_DIR/clickhouse_version")" source "$SOURCE_DIR/clickhouse_checksums" @@ -113,7 +114,6 @@ function configure_os CIDL_PLATFORM="illumos" CIDL_MD5="$CIDL_MD5_ILLUMOS" CIDL_MD5FUNC="do_md5sum" - CIDL_DASHREV=-1 ;; *) fail "unsupported OS: $1" diff --git a/tools/clickhouse_checksums b/tools/clickhouse_checksums index 92a5237301..afddb15cab 100644 --- a/tools/clickhouse_checksums +++ b/tools/clickhouse_checksums @@ -1,3 +1,3 @@ -CIDL_MD5_DARWIN="20603974a929926591fca70ff1df0e45" -CIDL_MD5_LINUX="ea909519bd9d989fd5d090fd9bdd42f1" -CIDL_MD5_ILLUMOS="7702939ce5b4b51846a1ba39f1392306" +CIDL_MD5_DARWIN="3e20c3284b7e6b0cfcfedf622ecf547a" +CIDL_MD5_LINUX="f6c30a25a86deac3bad6c50dcf758fd5" +CIDL_MD5_ILLUMOS="409222de8ecb59e5dd97dcc942ccdffe" diff --git a/tools/clickhouse_version b/tools/clickhouse_version index 93b98bf738..0a988071cd 100644 --- a/tools/clickhouse_version +++ b/tools/clickhouse_version @@ -1 +1 @@ -v22.8.9.24 \ No newline at end of file +v23.8.7.24 diff --git a/tools/install_builder_prerequisites.sh b/tools/install_builder_prerequisites.sh index 0427629960..5fa8ec11ba 100755 --- a/tools/install_builder_prerequisites.sh +++ b/tools/install_builder_prerequisites.sh @@ -128,6 +128,7 @@ function install_packages { fi elif [[ "${HOST_OS}" == "SunOS" ]]; then CLANGVER=15 + RTVER=13 PGVER=13 packages=( "pkg:/package/pkg" @@ -135,6 +136,8 @@ function install_packages { "library/postgresql-$PGVER" "pkg-config" "library/libxmlsec1" + "system/library/gcc-runtime@$RTVER" + "system/library/g++-runtime@$RTVER" # "bindgen leverages libclang to preprocess, parse, and type check C and C++ header files." "pkg:/ooce/developer/clang-$CLANGVER" "system/library/gcc-runtime" @@ -159,7 +162,8 @@ function install_packages { } pkg mediator -a - pkg list -v "${packages[@]}" + pkg publisher + pkg list -afv "${packages[@]}" elif [[ "${HOST_OS}" == "Darwin" ]]; then packages=( 'coreutils' From 55cc240236a41fa0288aaa0c7a1ea3b3b5e66c42 Mon Sep 17 00:00:00 2001 From: Alan Hanson Date: Mon, 4 Mar 2024 10:39:18 -0800 Subject: [PATCH 067/157] Update Propolis and Crucible to latest (#5187) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Crucible changes: Per client, queue-based backpressure (#1186) A builder for the Downstairs Downstairs struct. (#1152) Update Rust to v1.76.0 (#1153) Deactivate the read only parent after a scrub (#1180) Start byte-based backpressure earlier (#1179) Tweak CI scripts to fix warnings (#1178) Make `gw_ds_complete` less public (#1175) Verify extent under repair is valid after copying files (#1159) Remove individual panic setup, use global panic settings (#1174) [smf] Use new zone network config service (#1096) Move a few methods into downstairs (#1160) Remove extra clone in upstairs read (#1163) Make `crucible-downstairs` not depend on upstairs (#1165) Update Rust crate rusqlite to 0.31 (#1171) Update Rust crate reedline to 0.29.0 (#1170) Update Rust crate clap to 4.5 (#1169) Update Rust crate indicatif to 0.17.8 (#1168) Update progenitor to bc0bb4b (#1164) Do not 500 on snapshot delete for deleted region (#1162) Drop jobs from Offline downstairs. (#1157) `Mutex` → `Work` (#1156) Added a contributing.md (#1158) Remove ExtentFlushClose::source_downstairs (#1154) Remove unnecessary mutexes from Downstairs (#1132) Propolis changes: PHD: improve Windows reliability (#651) Update progenitor and omicron deps Clean up VMM resource on server shutdown Remove Inventory mechanism Update vergen dependency Properly handle pre/post illumos#16183 fixups PHD: add `pfexec` to xtask phd-runner invocation (#647) PHD: add Windows Server 2016 adapter & improve WS2016/2019 reliability (#646) PHD: use `clap` for more `cargo xtask phd` args (#645) PHD: several `cargo xtask phd` CLI fixes (#642) PHD: Use ZFS clones for file-backed disks (#640) PHD: improve ctrl-c handling (#634) Co-authored-by: Alan Hanson --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 12 ++++++------ package-manifest.toml | 12 ++++++------ 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f3ea50279..7a491135b5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -463,7 +463,7 @@ dependencies = [ [[package]] name = "bhyve_api" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=488a70c644e332dae98447292a95e45074c8107c#488a70c644e332dae98447292a95e45074c8107c" +source = "git+https://github.com/oxidecomputer/propolis?rev=a9fdbc464db4cbd4ac0f43ffa0d045b4b02e0c1c#a9fdbc464db4cbd4ac0f43ffa0d045b4b02e0c1c" dependencies = [ "bhyve_api_sys", "libc", @@ -473,7 +473,7 @@ dependencies = [ [[package]] name = "bhyve_api_sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=488a70c644e332dae98447292a95e45074c8107c#488a70c644e332dae98447292a95e45074c8107c" +source = "git+https://github.com/oxidecomputer/propolis?rev=a9fdbc464db4cbd4ac0f43ffa0d045b4b02e0c1c#a9fdbc464db4cbd4ac0f43ffa0d045b4b02e0c1c" dependencies = [ "libc", "strum 0.26.1", @@ -1315,7 +1315,7 @@ dependencies = [ [[package]] name = "crucible-agent-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=796dce526dd7ed7b52a0429a486ccba4a9da1ce5#796dce526dd7ed7b52a0429a486ccba4a9da1ce5" +source = "git+https://github.com/oxidecomputer/crucible?rev=1c1574fb721f98f2df1b23e3fd27d83be018882e#1c1574fb721f98f2df1b23e3fd27d83be018882e" dependencies = [ "anyhow", "chrono", @@ -1331,7 +1331,7 @@ dependencies = [ [[package]] name = "crucible-pantry-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=796dce526dd7ed7b52a0429a486ccba4a9da1ce5#796dce526dd7ed7b52a0429a486ccba4a9da1ce5" +source = "git+https://github.com/oxidecomputer/crucible?rev=1c1574fb721f98f2df1b23e3fd27d83be018882e#1c1574fb721f98f2df1b23e3fd27d83be018882e" dependencies = [ "anyhow", "chrono", @@ -1348,7 +1348,7 @@ dependencies = [ [[package]] name = "crucible-smf" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/crucible?rev=796dce526dd7ed7b52a0429a486ccba4a9da1ce5#796dce526dd7ed7b52a0429a486ccba4a9da1ce5" +source = "git+https://github.com/oxidecomputer/crucible?rev=1c1574fb721f98f2df1b23e3fd27d83be018882e#1c1574fb721f98f2df1b23e3fd27d83be018882e" dependencies = [ "crucible-workspace-hack", "libc", @@ -6623,7 +6623,7 @@ dependencies = [ [[package]] name = "propolis-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=488a70c644e332dae98447292a95e45074c8107c#488a70c644e332dae98447292a95e45074c8107c" +source = "git+https://github.com/oxidecomputer/propolis?rev=a9fdbc464db4cbd4ac0f43ffa0d045b4b02e0c1c#a9fdbc464db4cbd4ac0f43ffa0d045b4b02e0c1c" dependencies = [ "async-trait", "base64", @@ -6644,7 +6644,7 @@ dependencies = [ [[package]] name = "propolis-mock-server" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=488a70c644e332dae98447292a95e45074c8107c#488a70c644e332dae98447292a95e45074c8107c" +source = "git+https://github.com/oxidecomputer/propolis?rev=a9fdbc464db4cbd4ac0f43ffa0d045b4b02e0c1c#a9fdbc464db4cbd4ac0f43ffa0d045b4b02e0c1c" dependencies = [ "anyhow", "atty", @@ -6674,7 +6674,7 @@ dependencies = [ [[package]] name = "propolis_types" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=488a70c644e332dae98447292a95e45074c8107c#488a70c644e332dae98447292a95e45074c8107c" +source = "git+https://github.com/oxidecomputer/propolis?rev=a9fdbc464db4cbd4ac0f43ffa0d045b4b02e0c1c#a9fdbc464db4cbd4ac0f43ffa0d045b4b02e0c1c" dependencies = [ "schemars", "serde", diff --git a/Cargo.toml b/Cargo.toml index 6f23ab929c..1d701c550e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -182,9 +182,9 @@ cookie = "0.18" criterion = { version = "0.5.1", features = [ "async_tokio" ] } crossbeam = "0.8" crossterm = { version = "0.27.0", features = ["event-stream"] } -crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "796dce526dd7ed7b52a0429a486ccba4a9da1ce5" } -crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "796dce526dd7ed7b52a0429a486ccba4a9da1ce5" } -crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "796dce526dd7ed7b52a0429a486ccba4a9da1ce5" } +crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "1c1574fb721f98f2df1b23e3fd27d83be018882e" } +crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "1c1574fb721f98f2df1b23e3fd27d83be018882e" } +crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "1c1574fb721f98f2df1b23e3fd27d83be018882e" } csv = "1.3.0" curve25519-dalek = "4" datatest-stable = "0.2.3" @@ -314,9 +314,9 @@ prettyplease = { version = "0.2.16", features = ["verbatim"] } proc-macro2 = "1.0" progenitor = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } progenitor-client = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } -bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "488a70c644e332dae98447292a95e45074c8107c" } -propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "488a70c644e332dae98447292a95e45074c8107c" } -propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "488a70c644e332dae98447292a95e45074c8107c" } +bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "a9fdbc464db4cbd4ac0f43ffa0d045b4b02e0c1c" } +propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "a9fdbc464db4cbd4ac0f43ffa0d045b4b02e0c1c" } +propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "a9fdbc464db4cbd4ac0f43ffa0d045b4b02e0c1c" } proptest = "1.4.0" quote = "1.0" rand = "0.8.5" diff --git a/package-manifest.toml b/package-manifest.toml index c474a52736..065da4bf42 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -441,10 +441,10 @@ only_for_targets.image = "standard" # 3. Use source.type = "manual" instead of "prebuilt" source.type = "prebuilt" source.repo = "crucible" -source.commit = "fe0c5c7909707a0f826025be4fe2bbf5f6e0206f" +source.commit = "1c1574fb721f98f2df1b23e3fd27d83be018882e" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible.sha256.txt -source.sha256 = "5da4f93b16fc7c0f3cc3a67919dbaa3f143cc07b703183a236f5c98b61504d15" +source.sha256 = "a14ce9d6b17c6c8427d526e044bb1196ee81316674936873ac9dc1d7ca51459d" output.type = "zone" output.intermediate_only = true @@ -453,10 +453,10 @@ service_name = "crucible_pantry_prebuilt" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "crucible" -source.commit = "fe0c5c7909707a0f826025be4fe2bbf5f6e0206f" +source.commit = "1c1574fb721f98f2df1b23e3fd27d83be018882e" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible-pantry.sha256.txt -source.sha256 = "ed5027cc37c5ba4f2b9a568528f5bb49deedccaaa60bd770311c7bface6aa02b" +source.sha256 = "8839a3748cd2926f61244c99344d0802e59ea4673a5f31a2aaad5d759176aa80" output.type = "zone" output.intermediate_only = true @@ -468,10 +468,10 @@ service_name = "propolis-server" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "propolis" -source.commit = "c7cdaf1875d259e29ca50a14b77b0bfd9dfe443d" +source.commit = "a9fdbc464db4cbd4ac0f43ffa0d045b4b02e0c1c" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/propolis/image//propolis-server.sha256.txt -source.sha256 = "0203b7f702377c877c4132851ca102d68cd8fd2c20e4fd5b59d950cbb07fd9ff" +source.sha256 = "7998a71c4e905a4bf9ef2d2863b7695d0bb4d1b34322aa3ce20d173d4a7ee77a" output.type = "zone" [package.mg-ddm-gz] From 7f43a7e3f1d8290fd721faa0d13136a2c6c0c8e1 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Mon, 4 Mar 2024 15:44:12 -0500 Subject: [PATCH 068/157] Bump PSC/Sidecar SP to v1.0.10 (#5188) This syncs them with the gimlet --- tools/hubris_checksums | 8 ++++---- tools/hubris_version | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/hubris_checksums b/tools/hubris_checksums index 65a2739fcd..83a22f9a43 100644 --- a/tools/hubris_checksums +++ b/tools/hubris_checksums @@ -2,7 +2,7 @@ 9cba42b8d6c4dbd1bcc805b44e14e6bac7fa590f1faaaa47664f7aa254e91602 build-gimlet-d-image-default-v1.0.10.zip 3ace1b1d692e3c23815665b932dadb42b58abc1942fdd4564c1884a13fa0f0cb build-gimlet-e-image-default-v1.0.10.zip b3b05e1926bb0f66be8587a67084c76cd760da5db0f1e288c265a03cf80637e9 build-gimlet-f-image-default-v1.0.10.zip -0fa33a057dec7301280338cf5a80a5198b5d95475dba350a807979f1a53ce5c7 build-psc-b-image-default-v1.0.9.zip -88e7aa91fe4f976753ccfc8ba9a57a0521c54d9ef35504cf16e7b1ce78c87465 build-psc-c-image-default-v1.0.9.zip -cfbf21fef84b38c2a5ca3c0b45703c20c2605c6bf5d599b53b26e3a2d09d756d build-sidecar-b-image-default-v1.0.9.zip -c98b89993f1a9b5219bbabf06897db2f92d60f6a3afb4374cb8e6e79d57fdfe4 build-sidecar-c-image-default-v1.0.9.zip +2d1313c4f282c49a411c32c643347dcb63bb063ae47539b2a1e4003da5a20402 build-psc-b-image-default-v1.0.10.zip +bb16ee95be863fe5bb7a155d07732a42fc56178ce08bcb9323f46315e9616e62 build-psc-c-image-default-v1.0.10.zip +97763288ca52a779334422cd7f3aff892341f38f79d1022a1d7ff49b85fb8356 build-sidecar-b-image-default-v1.0.10.zip +814098557e67f5f8720e95fb76b4f694e7edbd26cd3aa120266b4238641d7029 build-sidecar-c-image-default-v1.0.10.zip diff --git a/tools/hubris_version b/tools/hubris_version index cb9e30d980..8098ff8759 100644 --- a/tools/hubris_version +++ b/tools/hubris_version @@ -1 +1 @@ -TAGS=(gimlet-v1.0.10 psc-v1.0.9 sidecar-v1.0.9) +TAGS=(gimlet-v1.0.10 psc-v1.0.10 sidecar-v1.0.10) From 5cf0a29e5630ebb37df54d04bbdacd2e9a6a9a9d Mon Sep 17 00:00:00 2001 From: David Crespo Date: Mon, 4 Mar 2024 15:37:12 -0600 Subject: [PATCH 069/157] Bump web console (React Canary) (#5193) https://github.com/oxidecomputer/console/compare/25fb506d...269c2c82 * [269c2c82](https://github.com/oxidecomputer/console/commit/269c2c82) standardize on ~/ instead of app/ * [e7ff4efb](https://github.com/oxidecomputer/console/commit/e7ff4efb) eliminate @oxide/ui alias and barrel file * [cd8df25a](https://github.com/oxidecomputer/console/commit/cd8df25a) oxidecomputer/console#2028 * [a72768f8](https://github.com/oxidecomputer/console/commit/a72768f8) oxidecomputer/console#2027 * [67f6d908](https://github.com/oxidecomputer/console/commit/67f6d908) oxidecomputer/console#2022 * [8f1133ca](https://github.com/oxidecomputer/console/commit/8f1133ca) oxidecomputer/console#2020 * [555c2f65](https://github.com/oxidecomputer/console/commit/555c2f65) oxidecomputer/console#2017 * [8690be44](https://github.com/oxidecomputer/console/commit/8690be44) oxidecomputer/console#2018 * [fa0bcf7a](https://github.com/oxidecomputer/console/commit/fa0bcf7a) oxidecomputer/console#2016 --- tools/console_version | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/console_version b/tools/console_version index f5534b5c50..24d16b17eb 100644 --- a/tools/console_version +++ b/tools/console_version @@ -1,2 +1,2 @@ -COMMIT="25fb506d0527f5508dc90b118672c0bf8ce14dd3" -SHA2="1083f693c66c275d0cf76e120aa923a51c3188dd9a7996eb51dc0fe4f611507a" +COMMIT="269c2c82018d09d442a932474d84366a54837068" +SHA2="7a6a04f43c6189384065675ebb7c429de2b8369ffbb7c1989a13aafe779be2d6" From d9111d3ccc92830a39a7f1aa08bc2920d4e0c9b3 Mon Sep 17 00:00:00 2001 From: Rain Date: Mon, 4 Mar 2024 19:37:00 -0800 Subject: [PATCH 070/157] [meta] update whoami to 1.5.0, remove patch (#5194) whoami 1.5.0 contains the fix for https://github.com/oxidecomputer/security-operations/issues/43. --- Cargo.lock | 26 ++++---------------------- Cargo.toml | 5 ----- 2 files changed, 4 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7a491135b5..2e822122c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -883,12 +883,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" -[[package]] -name = "cfg_aliases" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" - [[package]] name = "chacha20" version = "0.9.1" @@ -4629,18 +4623,6 @@ dependencies = [ "libc", ] -[[package]] -name = "nix" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" -dependencies = [ - "bitflags 2.4.0", - "cfg-if", - "cfg_aliases", - "libc", -] - [[package]] name = "nodrop" version = "0.1.14" @@ -10270,12 +10252,12 @@ dependencies = [ [[package]] name = "whoami" -version = "1.4.1" -source = "git+https://github.com/oxidecomputer/whoami?branch=use-nix#da2a26e695849f1ffc394627ba27a6a473650705" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fec781d48b41f8163426ed18e8fc2864c12937df9ce54c88ede7bd47270893e" dependencies = [ - "nix 0.28.0", + "redox_syscall 0.4.1", "wasite", - "wasm-bindgen", "web-sys", ] diff --git a/Cargo.toml b/Cargo.toml index 1d701c550e..9d556c7b68 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -621,11 +621,6 @@ opt-level = 3 git = 'https://github.com/oxidecomputer/pq-sys' branch = "oxide/omicron" -# See https://github.com/oxidecomputer/security-operations/issues/43. -[patch.crates-io.whoami] -git = "https://github.com/oxidecomputer/whoami" -branch = "use-nix" - # Using the workspace-hack via this patch directive means that it only applies # while building within this workspace. If another workspace imports a crate # from here via a git dependency, it will not have the workspace-hack applied From 25e905b63dba5783aa4fc962f5f2582d30906950 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 23:41:52 -0800 Subject: [PATCH 071/157] Bump mio from 0.8.9 to 0.8.11 (#5195) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- workspace-hack/Cargo.toml | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2e822122c2..962f515ebd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4130,9 +4130,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.9" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "log", diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 01b7e440ae..aa2523d6ff 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -229,45 +229,45 @@ zip = { version = "0.6.6", default-features = false, features = ["bzip2", "defla [target.x86_64-unknown-linux-gnu.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } dof = { version = "0.3.0", default-features = false, features = ["des"] } -mio = { version = "0.8.9", features = ["net", "os-ext"] } +mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } [target.x86_64-unknown-linux-gnu.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } dof = { version = "0.3.0", default-features = false, features = ["des"] } -mio = { version = "0.8.9", features = ["net", "os-ext"] } +mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } [target.x86_64-apple-darwin.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -mio = { version = "0.8.9", features = ["net", "os-ext"] } +mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } [target.x86_64-apple-darwin.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -mio = { version = "0.8.9", features = ["net", "os-ext"] } +mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } [target.aarch64-apple-darwin.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -mio = { version = "0.8.9", features = ["net", "os-ext"] } +mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } [target.aarch64-apple-darwin.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -mio = { version = "0.8.9", features = ["net", "os-ext"] } +mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } [target.x86_64-unknown-illumos.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } dof = { version = "0.3.0", default-features = false, features = ["des"] } -mio = { version = "0.8.9", features = ["net", "os-ext"] } +mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } @@ -276,7 +276,7 @@ toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", featu [target.x86_64-unknown-illumos.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } dof = { version = "0.3.0", default-features = false, features = ["des"] } -mio = { version = "0.8.9", features = ["net", "os-ext"] } +mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } From a505c234948df7eff042d9a0dcd458dc520ede14 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 5 Mar 2024 08:54:14 +0000 Subject: [PATCH 072/157] chore(deps): update rust crate indexmap to 2.2.4 (#5167) --- Cargo.lock | 34 +++++++++++++++++----------------- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 4 ++-- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 962f515ebd..1ed8bbdfef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1978,7 +1978,7 @@ dependencies = [ "hostname", "http 0.2.11", "hyper 0.14.27", - "indexmap 2.2.3", + "indexmap 2.2.5", "multer", "openapiv3", "paste", @@ -2781,7 +2781,7 @@ dependencies = [ "debug-ignore", "fixedbitset", "guppy-workspace-hack", - "indexmap 2.2.3", + "indexmap 2.2.5", "itertools 0.12.1", "nested", "once_cell", @@ -2813,7 +2813,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.11", - "indexmap 2.2.3", + "indexmap 2.2.5", "slab", "tokio", "tokio-util", @@ -3408,9 +3408,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.3" +version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -5397,7 +5397,7 @@ dependencies = [ "hex", "hmac", "hyper 0.14.27", - "indexmap 2.2.3", + "indexmap 2.2.5", "inout", "ipnetwork", "itertools 0.10.5", @@ -5521,7 +5521,7 @@ version = "0.4.0" source = "git+https://github.com/oxidecomputer/openapi-lint?branch=main#ef442ee4343e97b6d9c217d3e7533962fe7d7236" dependencies = [ "heck 0.4.1", - "indexmap 2.2.3", + "indexmap 2.2.5", "lazy_static", "openapiv3", "regex", @@ -5533,7 +5533,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc02deea53ffe807708244e5914f6b099ad7015a207ee24317c22112e17d9c5c" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "serde", "serde_json", ] @@ -5790,7 +5790,7 @@ dependencies = [ "expectorate", "futures", "highway", - "indexmap 2.2.3", + "indexmap 2.2.5", "itertools 0.12.1", "omicron-common", "omicron-test-utils", @@ -6179,7 +6179,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.2.3", + "indexmap 2.2.5", "serde", "serde_derive", ] @@ -6571,7 +6571,7 @@ dependencies = [ "getopts", "heck 0.4.1", "http 0.2.11", - "indexmap 2.2.3", + "indexmap 2.2.5", "openapiv3", "proc-macro2", "quote", @@ -7906,7 +7906,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.2.3", + "indexmap 2.2.5", "serde", "serde_derive", "serde_json", @@ -7932,7 +7932,7 @@ version = "0.9.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "itoa", "ryu", "serde", @@ -9322,7 +9322,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "serde", "serde_spanned", "toml_datetime", @@ -9335,7 +9335,7 @@ version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c1b5fd4128cc8d3e0cb74d4ed9a9cc7c7284becd4df68f5f940e1ad123606f6" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "serde", "serde_spanned", "toml_datetime", @@ -9854,7 +9854,7 @@ dependencies = [ "derive-where", "either", "futures", - "indexmap 2.2.3", + "indexmap 2.2.5", "indicatif", "libsw", "linear-map", @@ -10274,7 +10274,7 @@ dependencies = [ "crossterm", "futures", "humantime", - "indexmap 2.2.3", + "indexmap 2.2.5", "indicatif", "itertools 0.12.1", "omicron-common", diff --git a/Cargo.toml b/Cargo.toml index 9d556c7b68..5a477da0a2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -231,7 +231,7 @@ hyper = "0.14" hyper-rustls = "0.26.0" hyper-staticfile = "0.9.5" illumos-utils = { path = "illumos-utils" } -indexmap = "2.2.3" +indexmap = "2.2.5" indicatif = { version = "0.17.8", features = ["rayon"] } installinator = { path = "installinator" } installinator-artifactd = { path = "installinator-artifactd" } diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index aa2523d6ff..e70c7c329e 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -58,7 +58,7 @@ hashbrown = { version = "0.14.3", features = ["raw"] } hex = { version = "0.4.3", features = ["serde"] } hmac = { version = "0.12.1", default-features = false, features = ["reset"] } hyper = { version = "0.14.27", features = ["full"] } -indexmap = { version = "2.2.3", features = ["serde"] } +indexmap = { version = "2.2.5", features = ["serde"] } inout = { version = "0.1.3", default-features = false, features = ["std"] } ipnetwork = { version = "0.20.0", features = ["schemars"] } itertools = { version = "0.10.5" } @@ -164,7 +164,7 @@ hashbrown = { version = "0.14.3", features = ["raw"] } hex = { version = "0.4.3", features = ["serde"] } hmac = { version = "0.12.1", default-features = false, features = ["reset"] } hyper = { version = "0.14.27", features = ["full"] } -indexmap = { version = "2.2.3", features = ["serde"] } +indexmap = { version = "2.2.5", features = ["serde"] } inout = { version = "0.1.3", default-features = false, features = ["std"] } ipnetwork = { version = "0.20.0", features = ["schemars"] } itertools = { version = "0.10.5" } From 1f26c66921b9215bfe11d750514939bcdc11ae12 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 5 Mar 2024 10:06:38 +0000 Subject: [PATCH 073/157] chore(deps): update taiki-e/install-action digest to dabb9c1 (#5199) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`c6ffb58` -> `dabb9c1`](https://togithub.com/taiki-e/install-action/compare/c6ffb58...dabb9c1) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. â™» **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index e3e032806c..cac987b277 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@c6ffb5827ec9fecafc811f72ec5d7854680dc6ca # v2 + uses: taiki-e/install-action@dabb9c1ee51c21c545764a0d517f069ff52e6477 # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From b664ded41efee55bf49942604bf7f36fa0abd6f7 Mon Sep 17 00:00:00 2001 From: Alan Hanson Date: Tue, 5 Mar 2024 10:43:07 -0800 Subject: [PATCH 074/157] Add a show-empty option to oxlog (#5186) This adds an option flag (off by default) that will show matching log files even if they are zero length. Co-authored-by: Alan Hanson --- dev-tools/oxlog/src/bin/oxlog.rs | 7 ++++++- dev-tools/oxlog/src/lib.rs | 24 ++++++++++++++++++------ 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/dev-tools/oxlog/src/bin/oxlog.rs b/dev-tools/oxlog/src/bin/oxlog.rs index ef79605dda..88e067c382 100644 --- a/dev-tools/oxlog/src/bin/oxlog.rs +++ b/dev-tools/oxlog/src/bin/oxlog.rs @@ -4,7 +4,7 @@ //! Tool for discovering oxide related logfiles on sleds -use clap::{Args, Parser, Subcommand}; +use clap::{ArgAction, Args, Parser, Subcommand}; use oxlog::{Filter, LogFile, Zones}; #[derive(Debug, Parser)] @@ -50,6 +50,10 @@ struct FilterArgs { // Print only the extra log files #[arg(short, long)] extra: bool, + + /// Show log files even if they are empty + #[arg(short, long, action=ArgAction::SetTrue)] + show_empty: bool, } fn main() -> Result<(), anyhow::Error> { @@ -68,6 +72,7 @@ fn main() -> Result<(), anyhow::Error> { current: filter.current, archived: filter.archived, extra: filter.extra, + show_empty: filter.show_empty, }; let print_metadata = |f: &LogFile| { println!( diff --git a/dev-tools/oxlog/src/lib.rs b/dev-tools/oxlog/src/lib.rs index a4774fabbc..625d360368 100644 --- a/dev-tools/oxlog/src/lib.rs +++ b/dev-tools/oxlog/src/lib.rs @@ -75,6 +75,9 @@ pub struct Filter { /// standard paths or don't follow the naming conventions of SMF service /// files. e.g. `/pool/ext/e12f29b8-1ab8-431e-bc96-1c1298947980/crypt/zone/oxz_cockroachdb_8bbea076-ff60-4330-8302-383e18140ef3/root/data/logs/cockroach.log` pub extra: bool, + + /// Show a log file even if is has zero size. + pub show_empty: bool, } /// Path and metadata about a logfile @@ -264,17 +267,21 @@ impl Zones { // 'archived'. These files have not yet been migrated into the debug // directory. if filter.current || filter.archived { - load_svc_logs(paths.primary.clone(), &mut output); + load_svc_logs( + paths.primary.clone(), + &mut output, + filter.show_empty, + ); } if filter.archived { for dir in paths.debug.clone() { - load_svc_logs(dir, &mut output); + load_svc_logs(dir, &mut output, filter.show_empty); } } if filter.extra { for (svc_name, dir) in paths.extra.clone() { - load_extra_logs(dir, svc_name, &mut output); + load_extra_logs(dir, svc_name, &mut output, filter.show_empty); } } output @@ -320,7 +327,11 @@ pub fn oxide_smf_service_name_from_log_file_name( // Given a directory, find all oxide specific SMF service logs and return them // mapped to their inferred service name. -fn load_svc_logs(dir: Utf8PathBuf, logs: &mut BTreeMap) { +fn load_svc_logs( + dir: Utf8PathBuf, + logs: &mut BTreeMap, + show_empty: bool, +) { let Ok(entries) = dir.read_dir_utf8() else { return; }; @@ -344,7 +355,7 @@ fn load_svc_logs(dir: Utf8PathBuf, logs: &mut BTreeMap) { }; logfile.read_metadata(&entry); - if logfile.size == Some(0) { + if logfile.size == Some(0) && !show_empty { // skip 0 size files continue; } @@ -368,6 +379,7 @@ fn load_extra_logs( dir: Utf8PathBuf, svc_name: &str, logs: &mut BTreeMap, + show_empty: bool, ) { let Ok(entries) = dir.read_dir_utf8() else { return; @@ -384,7 +396,7 @@ fn load_extra_logs( path.push(filename); let mut logfile = LogFile::new(path); logfile.read_metadata(&entry); - if logfile.size == Some(0) { + if logfile.size == Some(0) && !show_empty { // skip 0 size files continue; } From 94bc6f542efc54a06bf6e17159e3b896a61959e5 Mon Sep 17 00:00:00 2001 From: Rain Date: Tue, 5 Mar 2024 13:56:44 -0800 Subject: [PATCH 075/157] [common] move config types to a new nexus-config crate (#5198) Part of https://github.com/oxidecomputer/security-operations/issues/43 -- move these types into their own crate so omicron-common no longer depends on tokio-postgres. There are also other things that can be moved into their own crates, but nothing immediately security-critical. --- Cargo.lock | 33 ++++++- Cargo.toml | 3 + common/Cargo.toml | 8 +- common/src/lib.rs | 2 - dev-tools/omdb/Cargo.toml | 1 + dev-tools/omdb/src/bin/omdb/db.rs | 2 +- dev-tools/omicron-dev/Cargo.toml | 1 + dev-tools/omicron-dev/src/bin/omicron-dev.rs | 3 +- nexus-config/Cargo.toml | 22 +++++ nexus-config/README.adoc | 4 + nexus-config/src/lib.rs | 13 +++ {common => nexus-config}/src/nexus_config.rs | 95 +++++++++---------- .../src/postgres_config.rs | 0 nexus/Cargo.toml | 1 + nexus/db-model/Cargo.toml | 1 + nexus/db-model/src/ipv4net.rs | 2 +- nexus/db-model/src/ipv6net.rs | 2 +- nexus/db-model/src/vpc_subnet.rs | 2 +- nexus/db-queries/Cargo.toml | 1 + nexus/db-queries/src/db/config.rs | 2 +- .../src/db/datastore/db_metadata.rs | 2 +- nexus/db-queries/src/db/datastore/mod.rs | 4 +- nexus/db-queries/src/db/datastore/rack.rs | 2 +- nexus/db-queries/src/db/datastore/region.rs | 2 +- .../src/db/queries/network_interface.rs | 2 +- .../src/db/queries/region_allocation.rs | 2 +- nexus/reconfigurator/execution/Cargo.toml | 1 + .../execution/src/resource_allocation.rs | 2 +- nexus/reconfigurator/planning/Cargo.toml | 1 + .../planning/src/blueprint_builder.rs | 2 +- nexus/src/app/background/init.rs | 4 +- nexus/src/app/mod.rs | 14 +-- nexus/src/app/vpc_subnet.rs | 2 +- nexus/src/bin/nexus.rs | 4 +- nexus/src/bin/schema-updater.rs | 4 +- nexus/src/config.rs | 13 --- nexus/src/context.rs | 14 +-- nexus/src/lib.rs | 13 ++- nexus/test-interface/Cargo.toml | 1 + nexus/test-interface/src/lib.rs | 6 +- nexus/test-utils/Cargo.toml | 1 + nexus/test-utils/src/lib.rs | 53 +++++------ .../tests/integration_tests/initialization.rs | 56 ++++++----- nexus/tests/integration_tests/schema.rs | 6 +- .../integration_tests/subnet_allocation.rs | 2 +- nexus/tests/integration_tests/updates.rs | 2 +- sled-agent/Cargo.toml | 1 + sled-agent/src/rack_setup/plan/service.rs | 2 +- sled-agent/src/services.rs | 6 +- sled-agent/src/sim/server.rs | 2 +- test-utils/Cargo.toml | 1 + test-utils/src/dev/db.rs | 2 +- 52 files changed, 244 insertions(+), 183 deletions(-) create mode 100644 nexus-config/Cargo.toml create mode 100644 nexus-config/README.adoc create mode 100644 nexus-config/src/lib.rs rename {common => nexus-config}/src/nexus_config.rs (96%) rename {common => nexus-config}/src/postgres_config.rs (100%) delete mode 100644 nexus/src/config.rs diff --git a/Cargo.lock b/Cargo.lock index 1ed8bbdfef..18c783037f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4283,6 +4283,26 @@ dependencies = [ "uuid", ] +[[package]] +name = "nexus-config" +version = "0.1.0" +dependencies = [ + "anyhow", + "camino", + "dropshot", + "expectorate", + "libc", + "omicron-common", + "omicron-workspace-hack", + "schemars", + "serde", + "serde_json", + "serde_with", + "tokio-postgres", + "toml 0.8.10", + "uuid", +] + [[package]] name = "nexus-db-model" version = "0.1.0" @@ -4297,6 +4317,7 @@ dependencies = [ "ipnetwork", "macaddr", "newtype_derive", + "nexus-config", "nexus-defaults", "nexus-types", "omicron-certificates", @@ -4352,6 +4373,7 @@ dependencies = [ "itertools 0.12.1", "macaddr", "newtype_derive", + "nexus-config", "nexus-db-model", "nexus-inventory", "nexus-reconfigurator-planning", @@ -4460,6 +4482,7 @@ dependencies = [ "illumos-utils", "internal-dns", "ipnet", + "nexus-config", "nexus-db-model", "nexus-db-queries", "nexus-inventory", @@ -4490,6 +4513,7 @@ dependencies = [ "internal-dns", "ipnet", "ipnetwork", + "nexus-config", "nexus-inventory", "nexus-types", "omicron-common", @@ -4506,6 +4530,7 @@ name = "nexus-test-interface" version = "0.1.0" dependencies = [ "async-trait", + "nexus-config", "nexus-types", "omicron-common", "omicron-workspace-hack", @@ -4533,6 +4558,7 @@ dependencies = [ "http 0.2.11", "hyper 0.14.27", "internal-dns", + "nexus-config", "nexus-db-queries", "nexus-test-interface", "nexus-types", @@ -4906,8 +4932,6 @@ dependencies = [ "test-strategy", "thiserror", "tokio", - "tokio-postgres", - "toml 0.8.10", "uuid", ] @@ -4925,6 +4949,7 @@ dependencies = [ "gateway-messages", "gateway-test-utils", "libc", + "nexus-config", "nexus-test-interface", "nexus-test-utils", "omicron-common", @@ -5031,6 +5056,7 @@ dependencies = [ "macaddr", "mg-admin-client", "mime_guess", + "nexus-config", "nexus-db-model", "nexus-db-queries", "nexus-defaults", @@ -5129,6 +5155,7 @@ dependencies = [ "ipnetwork", "multimap", "nexus-client", + "nexus-config", "nexus-db-model", "nexus-db-queries", "nexus-test-utils", @@ -5259,6 +5286,7 @@ dependencies = [ "macaddr", "mg-admin-client", "nexus-client", + "nexus-config", "omicron-common", "omicron-test-utils", "omicron-workspace-hack", @@ -5321,6 +5349,7 @@ dependencies = [ "hex", "http 0.2.11", "libc", + "nexus-config", "omicron-common", "omicron-workspace-hack", "pem", diff --git a/Cargo.toml b/Cargo.toml index 5a477da0a2..474739a932 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,7 @@ members = [ "ipcc", "key-manager", "nexus", + "nexus-config", "nexus/authz-macros", "nexus/db-macros", "nexus/db-model", @@ -113,6 +114,7 @@ default-members = [ "ipcc", "key-manager", "nexus", + "nexus-config", "nexus/authz-macros", "nexus/macros-common", "nexus/db-macros", @@ -254,6 +256,7 @@ newtype_derive = "0.1.6" mg-admin-client = { path = "clients/mg-admin-client" } multimap = "0.10.0" nexus-client = { path = "clients/nexus-client" } +nexus-config = { path = "nexus-config" } nexus-db-model = { path = "nexus/db-model" } nexus-db-queries = { path = "nexus/db-queries" } nexus-defaults = { path = "nexus/defaults" } diff --git a/common/Cargo.toml b/common/Cargo.toml index 5628a93397..4451d92bdb 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -4,6 +4,12 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +# NOTE: +# +# This crate is depended on by several other workspaces! Be careful of adding +# new regular or build dependencies here, as they will be added to those crates +# as well. (Dev-dependencies are fine.) + [dependencies] anyhow.workspace = true api_identity.workspace = true @@ -32,8 +38,6 @@ strum.workspace = true test-strategy = { workspace = true, optional = true } thiserror.workspace = true tokio = { workspace = true, features = ["full"] } -tokio-postgres.workspace = true -toml.workspace = true uuid.workspace = true parse-display.workspace = true progenitor.workspace = true diff --git a/common/src/lib.rs b/common/src/lib.rs index 0d63de90fb..411bc3e426 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -26,8 +26,6 @@ pub mod backoff; pub mod cmd; pub mod disk; pub mod ledger; -pub mod nexus_config; -pub mod postgres_config; pub mod update; pub mod vlan; diff --git a/dev-tools/omdb/Cargo.toml b/dev-tools/omdb/Cargo.toml index 3f566f55ee..df56fda571 100644 --- a/dev-tools/omdb/Cargo.toml +++ b/dev-tools/omdb/Cargo.toml @@ -25,6 +25,7 @@ gateway-test-utils.workspace = true humantime.workspace = true internal-dns.workspace = true nexus-client.workspace = true +nexus-config.workspace = true nexus-db-model.workspace = true nexus-db-queries.workspace = true nexus-types.workspace = true diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index a51deacaf4..c7e9022fc2 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -36,6 +36,7 @@ use diesel::OptionalExtension; use diesel::TextExpressionMethods; use gateway_client::types::SpType; use ipnetwork::IpNetwork; +use nexus_config::PostgresConfigWithUrl; use nexus_db_model::Dataset; use nexus_db_model::Disk; use nexus_db_model::DnsGroup; @@ -82,7 +83,6 @@ use nexus_types::inventory::RotPageWhich; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Generation; use omicron_common::api::external::MacAddr; -use omicron_common::postgres_config::PostgresConfigWithUrl; use sled_agent_client::types::VolumeConstructionRequest; use std::borrow::Cow; use std::cmp::Ordering; diff --git a/dev-tools/omicron-dev/Cargo.toml b/dev-tools/omicron-dev/Cargo.toml index ce9a6ac32d..6aa480b2c6 100644 --- a/dev-tools/omicron-dev/Cargo.toml +++ b/dev-tools/omicron-dev/Cargo.toml @@ -16,6 +16,7 @@ futures.workspace = true gateway-messages.workspace = true gateway-test-utils.workspace = true libc.workspace = true +nexus-config.workspace = true nexus-test-utils = { workspace = true, features = ["omicron-dev"] } nexus-test-interface.workspace = true omicron-common.workspace = true diff --git a/dev-tools/omicron-dev/src/bin/omicron-dev.rs b/dev-tools/omicron-dev/src/bin/omicron-dev.rs index 0eb421478c..5e0c6486d6 100644 --- a/dev-tools/omicron-dev/src/bin/omicron-dev.rs +++ b/dev-tools/omicron-dev/src/bin/omicron-dev.rs @@ -11,6 +11,7 @@ use clap::Args; use clap::Parser; use dropshot::test_util::LogContext; use futures::stream::StreamExt; +use nexus_config::NexusConfig; use nexus_test_interface::NexusServer; use omicron_common::cmd::fatal; use omicron_common::cmd::CmdError; @@ -473,7 +474,7 @@ async fn cmd_run_all(args: &RunAllArgs) -> Result<(), anyhow::Error> { // Read configuration. let config_str = include_str!("../../../../nexus/examples/config.toml"); - let mut config: omicron_common::nexus_config::Config = + let mut config: NexusConfig = toml::from_str(config_str).context("parsing example config")?; config.pkg.log = dropshot::ConfigLogging::File { // See LogContext::new(), diff --git a/nexus-config/Cargo.toml b/nexus-config/Cargo.toml new file mode 100644 index 0000000000..af7eb70f62 --- /dev/null +++ b/nexus-config/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "nexus-config" +version = "0.1.0" +edition = "2021" + +[dependencies] +anyhow.workspace = true +camino.workspace = true +dropshot.workspace = true +omicron-common.workspace = true +omicron-workspace-hack.workspace = true +schemars.workspace = true +serde.workspace = true +serde_with.workspace = true +tokio-postgres.workspace = true +toml.workspace = true +uuid.workspace = true + +[dev-dependencies] +expectorate.workspace = true +libc.workspace = true +serde_json.workspace = true diff --git a/nexus-config/README.adoc b/nexus-config/README.adoc new file mode 100644 index 0000000000..6f42e2db0a --- /dev/null +++ b/nexus-config/README.adoc @@ -0,0 +1,4 @@ += Nexus config + +This crate contains type definitions for Nexus configuration, as shared by +Nexus and sled-agent. diff --git a/nexus-config/src/lib.rs b/nexus-config/src/lib.rs new file mode 100644 index 0000000000..c3411147e3 --- /dev/null +++ b/nexus-config/src/lib.rs @@ -0,0 +1,13 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Common types used for Nexus configuration. +//! +//! The root structure is [`NexusConfig`]. + +mod nexus_config; +mod postgres_config; + +pub use nexus_config::*; +pub use postgres_config::*; diff --git a/common/src/nexus_config.rs b/nexus-config/src/nexus_config.rs similarity index 96% rename from common/src/nexus_config.rs rename to nexus-config/src/nexus_config.rs index 2545d4cb91..0c40326001 100644 --- a/common/src/nexus_config.rs +++ b/nexus-config/src/nexus_config.rs @@ -5,11 +5,13 @@ //! Configuration parameters to Nexus that are usually only known //! at deployment time. -use crate::address::NEXUS_TECHPORT_EXTERNAL_PORT; -use crate::api::internal::shared::SwitchLocation; +use crate::PostgresConfigWithUrl; + +use omicron_common::address::Ipv6Subnet; +use omicron_common::address::NEXUS_TECHPORT_EXTERNAL_PORT; +use omicron_common::address::RACK_PREFIX; +use omicron_common::api::internal::shared::SwitchLocation; -use super::address::{Ipv6Subnet, RACK_PREFIX}; -use super::postgres_config::PostgresConfigWithUrl; use anyhow::anyhow; use camino::{Utf8Path, Utf8PathBuf}; use dropshot::ConfigDropshot; @@ -28,6 +30,31 @@ use std::net::SocketAddr; use std::time::Duration; use uuid::Uuid; +#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] +pub struct NexusConfig { + /// Configuration parameters known at compile-time. + #[serde(flatten)] + pub pkg: PackageConfig, + + /// A variety of configuration parameters only known at deployment time. + pub deployment: DeploymentConfig, +} + +impl NexusConfig { + /// Load a `Config` from the given TOML file + /// + /// This config object can then be used to create a new `Nexus`. + /// The format is described in the README. + pub fn from_file>(path: P) -> Result { + let path = path.as_ref(); + let file_contents = std::fs::read_to_string(path) + .map_err(|e| (path.to_path_buf(), e))?; + let config_parsed: Self = toml::from_str(&file_contents) + .map_err(|e| (path.to_path_buf(), e))?; + Ok(config_parsed) + } +} + #[derive(Debug)] pub struct LoadError { pub path: Utf8PathBuf, @@ -488,31 +515,6 @@ pub struct PackageConfig { pub default_region_allocation_strategy: RegionAllocationStrategy, } -#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] -pub struct Config { - /// Configuration parameters known at compile-time. - #[serde(flatten)] - pub pkg: PackageConfig, - - /// A variety of configuration parameters only known at deployment time. - pub deployment: DeploymentConfig, -} - -impl Config { - /// Load a `Config` from the given TOML file - /// - /// This config object can then be used to create a new `Nexus`. - /// The format is described in the README. - pub fn from_file>(path: P) -> Result { - let path = path.as_ref(); - let file_contents = std::fs::read_to_string(path) - .map_err(|e| (path.to_path_buf(), e))?; - let config_parsed: Self = toml::from_str(&file_contents) - .map_err(|e| (path.to_path_buf(), e))?; - Ok(config_parsed) - } -} - /// List of supported external authn schemes /// /// Note that the authn subsystem doesn't know about this type. It allows @@ -552,24 +554,16 @@ impl std::fmt::Display for SchemeName { #[cfg(test)] mod test { - use super::{ - default_techport_external_server_port, AuthnConfig, - BackgroundTaskConfig, BlueprintTasksConfig, Config, - ConfigDropshotWithTls, ConsoleConfig, Database, DeploymentConfig, - DnsTasksConfig, DpdConfig, ExternalEndpointsConfig, InternalDns, - InventoryConfig, LoadError, LoadErrorKind, MgdConfig, NatCleanupConfig, - PackageConfig, PhantomDiskConfig, RegionReplacementConfig, SchemeName, - TimeseriesDbConfig, Tunables, UpdatesConfig, - }; - use crate::address::{Ipv6Subnet, RACK_PREFIX}; - use crate::api::internal::shared::SwitchLocation; - use crate::nexus_config::{BfdManagerConfig, SyncServiceZoneNatConfig}; + use super::*; + + use omicron_common::address::{Ipv6Subnet, RACK_PREFIX}; + use omicron_common::api::internal::shared::SwitchLocation; + use camino::{Utf8Path, Utf8PathBuf}; use dropshot::ConfigDropshot; use dropshot::ConfigLogging; use dropshot::ConfigLoggingIfExists; use dropshot::ConfigLoggingLevel; - use libc; use std::collections::HashMap; use std::fs; use std::net::{Ipv6Addr, SocketAddr}; @@ -594,13 +588,16 @@ mod test { /// loads the config from that file, then removes the file. `label` is used /// as a unique string for the filename and error messages. It should be /// unique for each test. - fn read_config(label: &str, contents: &str) -> Result { + fn read_config( + label: &str, + contents: &str, + ) -> Result { let pathbuf = temp_path(label); let path = pathbuf.as_path(); eprintln!("writing test config {}", path); fs::write(path, contents).expect("write to tempfile failed"); - let result = Config::from_file(path); + let result = NexusConfig::from_file(path); fs::remove_file(path).expect("failed to remove temporary file"); eprintln!("{:?}", result); result @@ -610,7 +607,7 @@ mod test { #[test] fn test_config_nonexistent() { - let error = Config::from_file(Utf8Path::new("/nonexistent")) + let error = NexusConfig::from_file(Utf8Path::new("/nonexistent")) .expect_err("expected config to fail from /nonexistent"); let expected = std::io::Error::from_raw_os_error(libc::ENOENT); assert_eq!(error, expected); @@ -726,7 +723,7 @@ mod test { assert_eq!( config, - Config { + NexusConfig { deployment: DeploymentConfig { id: "28b90dc4-c22a-65ba-f49a-f051fe01208f".parse().unwrap(), rack_id: "38b90dc4-c22a-65ba-f49a-f051fe01208f" @@ -1020,7 +1017,7 @@ mod test { // The example config file should be valid. let config_path = "../nexus/examples/config.toml"; println!("checking {:?}", config_path); - let example_config = Config::from_file(config_path) + let example_config = NexusConfig::from_file(config_path) .expect("example config file is not valid"); // The config file used for the tests should also be valid. The tests @@ -1028,7 +1025,7 @@ mod test { // helpful to verify this here explicitly as well. let config_path = "../nexus/examples/config.toml"; println!("checking {:?}", config_path); - let _ = Config::from_file(config_path) + let _ = NexusConfig::from_file(config_path) .expect("test config file is not valid"); // The partial config file that's used to deploy Nexus must also be @@ -1062,7 +1059,7 @@ mod test { \n\n\n", ); contents.push_str(&example_deployment); - let _: Config = toml::from_str(&contents) + let _: NexusConfig = toml::from_str(&contents) .expect("Nexus SMF config file is not valid"); } } diff --git a/common/src/postgres_config.rs b/nexus-config/src/postgres_config.rs similarity index 100% rename from common/src/postgres_config.rs rename to nexus-config/src/postgres_config.rs diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 581e16e84d..de79f3429d 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -39,6 +39,7 @@ macaddr.workspace = true mime_guess.workspace = true # Not under "dev-dependencies"; these also need to be implemented for # integration tests. +nexus-config.workspace = true nexus-test-interface.workspace = true num-integer.workspace = true once_cell.workspace = true diff --git a/nexus/db-model/Cargo.toml b/nexus/db-model/Cargo.toml index 43b6f6a4b0..b1bb68b8f0 100644 --- a/nexus/db-model/Cargo.toml +++ b/nexus/db-model/Cargo.toml @@ -34,6 +34,7 @@ uuid.workspace = true db-macros.workspace = true omicron-certificates.workspace = true omicron-common.workspace = true +nexus-config.workspace = true nexus-defaults.workspace = true nexus-types.workspace = true omicron-passwords.workspace = true diff --git a/nexus/db-model/src/ipv4net.rs b/nexus/db-model/src/ipv4net.rs index fdd9cd3293..eaf8a6eed8 100644 --- a/nexus/db-model/src/ipv4net.rs +++ b/nexus/db-model/src/ipv4net.rs @@ -9,8 +9,8 @@ use diesel::pg::Pg; use diesel::serialize::{self, ToSql}; use diesel::sql_types; use ipnetwork::IpNetwork; +use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use omicron_common::api::external; -use omicron_common::nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use serde::Deserialize; use serde::Serialize; use std::net::Ipv4Addr; diff --git a/nexus/db-model/src/ipv6net.rs b/nexus/db-model/src/ipv6net.rs index e47bbc4d33..d516b67ed9 100644 --- a/nexus/db-model/src/ipv6net.rs +++ b/nexus/db-model/src/ipv6net.rs @@ -8,8 +8,8 @@ use diesel::pg::Pg; use diesel::serialize::{self, ToSql}; use diesel::sql_types; use ipnetwork::IpNetwork; +use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use omicron_common::api::external; -use omicron_common::nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use rand::{rngs::StdRng, SeedableRng}; use serde::Deserialize; use serde::Serialize; diff --git a/nexus/db-model/src/vpc_subnet.rs b/nexus/db-model/src/vpc_subnet.rs index 99f2c5e3ac..407c933ef2 100644 --- a/nexus/db-model/src/vpc_subnet.rs +++ b/nexus/db-model/src/vpc_subnet.rs @@ -10,11 +10,11 @@ use crate::schema::vpc_subnet; use crate::NetworkInterface; use chrono::{DateTime, Utc}; use db_macros::Resource; +use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use nexus_types::external_api::params; use nexus_types::external_api::views; use nexus_types::identity::Resource; use omicron_common::api::external; -use omicron_common::nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use serde::Deserialize; use serde::Serialize; use std::net::IpAddr; diff --git a/nexus/db-queries/Cargo.toml b/nexus/db-queries/Cargo.toml index 9d0c22c373..e673003036 100644 --- a/nexus/db-queries/Cargo.toml +++ b/nexus/db-queries/Cargo.toml @@ -52,6 +52,7 @@ usdt.workspace = true authz-macros.workspace = true db-macros.workspace = true +nexus-config.workspace = true nexus-db-model.workspace = true nexus-types.workspace = true omicron-common.workspace = true diff --git a/nexus/db-queries/src/db/config.rs b/nexus/db-queries/src/db/config.rs index afe51bca66..dc938b07ac 100644 --- a/nexus/db-queries/src/db/config.rs +++ b/nexus/db-queries/src/db/config.rs @@ -4,7 +4,7 @@ //! Nexus database configuration -use omicron_common::postgres_config::PostgresConfigWithUrl; +use nexus_config::PostgresConfigWithUrl; use serde::Deserialize; use serde::Serialize; use serde_with::serde_as; diff --git a/nexus/db-queries/src/db/datastore/db_metadata.rs b/nexus/db-queries/src/db/datastore/db_metadata.rs index 1f9ec2c028..cbbeaf2aa4 100644 --- a/nexus/db-queries/src/db/datastore/db_metadata.rs +++ b/nexus/db-queries/src/db/datastore/db_metadata.rs @@ -12,9 +12,9 @@ use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; use camino::{Utf8Path, Utf8PathBuf}; use chrono::Utc; use diesel::prelude::*; +use nexus_config::SchemaConfig; use omicron_common::api::external::Error; use omicron_common::api::external::SemverVersion; -use omicron_common::nexus_config::SchemaConfig; use slog::Logger; use std::collections::BTreeSet; use std::ops::Bound; diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index b5eff6cb85..dea6fcb997 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -33,6 +33,7 @@ use diesel::prelude::*; use diesel::query_builder::{QueryFragment, QueryId}; use diesel::query_dsl::methods::LoadQuery; use diesel::{ExpressionMethods, QueryDsl}; +use nexus_config::SchemaConfig; use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::LookupType; @@ -41,7 +42,6 @@ use omicron_common::api::external::SemverVersion; use omicron_common::backoff::{ retry_notify, retry_policy_internal_service, BackoffError, }; -use omicron_common::nexus_config::SchemaConfig; use slog::Logger; use std::net::Ipv6Addr; use std::num::NonZeroU32; @@ -392,6 +392,7 @@ mod test { use chrono::{Duration, Utc}; use futures::stream; use futures::StreamExt; + use nexus_config::RegionAllocationStrategy; use nexus_db_model::IpAttachState; use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params; @@ -399,7 +400,6 @@ mod test { use omicron_common::api::external::{ ByteCount, Error, IdentityMetadataCreateParams, LookupType, Name, }; - use omicron_common::nexus_config::RegionAllocationStrategy; use omicron_test_utils::dev; use std::collections::HashMap; use std::collections::HashSet; diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 46a3e0af2d..32e059bf81 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -867,6 +867,7 @@ mod test { use crate::db::model::Sled; use async_bb8_diesel::AsyncSimpleConnection; use internal_params::DnsRecord; + use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use nexus_db_model::{DnsGroup, InitialDnsGroup, SledUpdate}; use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::shared::SiloIdentityMode; @@ -880,7 +881,6 @@ mod test { IdentityMetadataCreateParams, MacAddr, }; use omicron_common::api::internal::shared::SourceNatConfig; - use omicron_common::nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use omicron_test_utils::dev; use std::collections::HashMap; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddrV6}; diff --git a/nexus/db-queries/src/db/datastore/region.rs b/nexus/db-queries/src/db/datastore/region.rs index b055a3e85c..912565cb2c 100644 --- a/nexus/db-queries/src/db/datastore/region.rs +++ b/nexus/db-queries/src/db/datastore/region.rs @@ -16,11 +16,11 @@ use crate::db::model::Region; use crate::transaction_retry::OptionalError; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::prelude::*; +use nexus_config::RegionAllocationStrategy; use nexus_types::external_api::params; use omicron_common::api::external; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; -use omicron_common::nexus_config::RegionAllocationStrategy; use slog::Logger; use uuid::Uuid; diff --git a/nexus/db-queries/src/db/queries/network_interface.rs b/nexus/db-queries/src/db/queries/network_interface.rs index d96b26c9b3..f6ce3e31e3 100644 --- a/nexus/db-queries/src/db/queries/network_interface.rs +++ b/nexus/db-queries/src/db/queries/network_interface.rs @@ -25,11 +25,11 @@ use diesel::QueryResult; use diesel::RunQueryDsl; use ipnetwork::IpNetwork; use ipnetwork::Ipv4Network; +use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use nexus_db_model::{NetworkInterfaceKind, MAX_NICS_PER_INSTANCE}; use nexus_db_model::{NetworkInterfaceKindEnum, SqlU8}; use omicron_common::api::external; use omicron_common::api::external::MacAddr; -use omicron_common::nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use once_cell::sync::Lazy; use std::net::IpAddr; use uuid::Uuid; diff --git a/nexus/db-queries/src/db/queries/region_allocation.rs b/nexus/db-queries/src/db/queries/region_allocation.rs index 43e1750812..96fa2058af 100644 --- a/nexus/db-queries/src/db/queries/region_allocation.rs +++ b/nexus/db-queries/src/db/queries/region_allocation.rs @@ -21,6 +21,7 @@ use diesel::{ Insertable, IntoSql, JoinOnDsl, NullableExpressionMethods, QueryDsl, RunQueryDsl, }; +use nexus_config::RegionAllocationStrategy; use nexus_db_model::queries::region_allocation::{ candidate_datasets, candidate_regions, candidate_zpools, cockroach_md5, do_insert, inserted_regions, old_regions, old_zpool_usage, @@ -31,7 +32,6 @@ use nexus_db_model::to_db_sled_policy; use nexus_db_model::SledState; use nexus_types::external_api::views::SledPolicy; use omicron_common::api::external; -use omicron_common::nexus_config::RegionAllocationStrategy; const NOT_ENOUGH_DATASETS_SENTINEL: &'static str = "Not enough datasets"; const NOT_ENOUGH_ZPOOL_SPACE_SENTINEL: &'static str = "Not enough space"; diff --git a/nexus/reconfigurator/execution/Cargo.toml b/nexus/reconfigurator/execution/Cargo.toml index 2f4807d38d..62155d9783 100644 --- a/nexus/reconfigurator/execution/Cargo.toml +++ b/nexus/reconfigurator/execution/Cargo.toml @@ -12,6 +12,7 @@ dns-service-client.workspace = true futures.workspace = true illumos-utils.workspace = true internal-dns.workspace = true +nexus-config.workspace = true nexus-db-model.workspace = true nexus-db-queries.workspace = true nexus-types.workspace = true diff --git a/nexus/reconfigurator/execution/src/resource_allocation.rs b/nexus/reconfigurator/execution/src/resource_allocation.rs index 7f3ebb9876..8ca44df39e 100644 --- a/nexus/reconfigurator/execution/src/resource_allocation.rs +++ b/nexus/reconfigurator/execution/src/resource_allocation.rs @@ -486,6 +486,7 @@ impl<'a> ResourceAllocator<'a> { #[cfg(test)] mod tests { use super::*; + use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use nexus_test_utils_macros::nexus_test; use nexus_types::deployment::OmicronZoneConfig; use nexus_types::deployment::OmicronZoneDataset; @@ -499,7 +500,6 @@ mod tests { use omicron_common::api::external::IpNet; use omicron_common::api::external::MacAddr; use omicron_common::api::external::Vni; - use omicron_common::nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use std::net::IpAddr; use std::net::Ipv6Addr; use std::net::SocketAddrV6; diff --git a/nexus/reconfigurator/planning/Cargo.toml b/nexus/reconfigurator/planning/Cargo.toml index 35ead9494e..3ec616168c 100644 --- a/nexus/reconfigurator/planning/Cargo.toml +++ b/nexus/reconfigurator/planning/Cargo.toml @@ -9,6 +9,7 @@ chrono.workspace = true internal-dns.workspace = true ipnet.workspace = true ipnetwork.workspace = true +nexus-config.workspace = true nexus-inventory.workspace = true nexus-types.workspace = true omicron-common.workspace = true diff --git a/nexus/reconfigurator/planning/src/blueprint_builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder.rs index 2263a99ce0..d58d798770 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder.rs @@ -10,6 +10,7 @@ use anyhow::bail; use internal_dns::config::Host; use internal_dns::config::ZoneVariant; use ipnet::IpAdd; +use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use nexus_inventory::now_db_precision; use nexus_types::deployment::Blueprint; use nexus_types::deployment::NetworkInterface; @@ -34,7 +35,6 @@ use omicron_common::api::external::Generation; use omicron_common::api::external::IpNet; use omicron_common::api::external::MacAddr; use omicron_common::api::external::Vni; -use omicron_common::nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::collections::HashSet; diff --git a/nexus/src/app/background/init.rs b/nexus/src/app/background/init.rs index 9ba30bab64..a213f7da72 100644 --- a/nexus/src/app/background/init.rs +++ b/nexus/src/app/background/init.rs @@ -18,12 +18,12 @@ use super::phantom_disks; use super::region_replacement; use super::sync_service_zone_nat::ServiceZoneNatTracker; use crate::app::sagas::SagaRequest; +use nexus_config::BackgroundTaskConfig; +use nexus_config::DnsTasksConfig; use nexus_db_model::DnsGroup; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; use omicron_common::api::internal::shared::SwitchLocation; -use omicron_common::nexus_config::BackgroundTaskConfig; -use omicron_common::nexus_config::DnsTasksConfig; use std::collections::BTreeMap; use std::collections::HashMap; use std::sync::Arc; diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 969320617f..781e39ac83 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -7,7 +7,6 @@ use self::external_endpoints::NexusCertResolver; use crate::app::oximeter::LazyTimeseriesClient; use crate::app::sagas::SagaRequest; -use crate::config; use crate::populate::populate_start; use crate::populate::PopulateArgs; use crate::populate::PopulateStatus; @@ -16,6 +15,10 @@ use crate::DropshotServer; use ::oximeter::types::ProducerRegistry; use anyhow::anyhow; use internal_dns::ServiceName; +use nexus_config::NexusConfig; +use nexus_config::RegionAllocationStrategy; +use nexus_config::Tunables; +use nexus_config::UpdatesConfig; use nexus_db_queries::authn; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; @@ -25,7 +28,6 @@ use omicron_common::address::MGD_PORT; use omicron_common::address::MGS_PORT; use omicron_common::api::external::Error; use omicron_common::api::internal::shared::SwitchLocation; -use omicron_common::nexus_config::RegionAllocationStrategy; use slog::Logger; use std::collections::HashMap; use std::net::{IpAddr, Ipv6Addr}; @@ -147,10 +149,10 @@ pub struct Nexus { /// Contents of the trusted root role for the TUF repository. #[allow(dead_code)] - updates_config: Option, + updates_config: Option, /// The tunable parameters from a configuration file - tunables: config::Tunables, + tunables: Tunables, /// Operational context used for Instance allocation opctx_alloc: OpContext, @@ -200,7 +202,7 @@ impl Nexus { resolver: internal_dns::resolver::Resolver, pool: db::Pool, producer_registry: &ProducerRegistry, - config: &config::Config, + config: &NexusConfig, authz: Arc, ) -> Result, String> { let pool = Arc::new(pool); @@ -515,7 +517,7 @@ impl Nexus { } /// Return the tunable configuration parameters, e.g. for use in tests. - pub fn tunables(&self) -> &config::Tunables { + pub fn tunables(&self) -> &Tunables { &self.tunables } diff --git a/nexus/src/app/vpc_subnet.rs b/nexus/src/app/vpc_subnet.rs index b93b90e716..4c5a569201 100644 --- a/nexus/src/app/vpc_subnet.rs +++ b/nexus/src/app/vpc_subnet.rs @@ -5,6 +5,7 @@ //! VPC Subnets and their network interfaces use crate::external_api::params; +use nexus_config::MIN_VPC_IPV4_SUBNET_PREFIX; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; @@ -22,7 +23,6 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::NameOrId; use omicron_common::api::external::UpdateResult; -use omicron_common::nexus_config::MIN_VPC_IPV4_SUBNET_PREFIX; use uuid::Uuid; impl super::Nexus { diff --git a/nexus/src/bin/nexus.rs b/nexus/src/bin/nexus.rs index 59a77e7ca4..452e033ce6 100644 --- a/nexus/src/bin/nexus.rs +++ b/nexus/src/bin/nexus.rs @@ -13,12 +13,12 @@ use anyhow::anyhow; use camino::Utf8PathBuf; use clap::Parser; +use nexus_config::NexusConfig; use omicron_common::cmd::fatal; use omicron_common::cmd::CmdError; use omicron_nexus::run_openapi_external; use omicron_nexus::run_openapi_internal; use omicron_nexus::run_server; -use omicron_nexus::Config; #[derive(Debug, Parser)] #[clap(name = "nexus", about = "See README.adoc for more information")] @@ -70,7 +70,7 @@ async fn do_run() -> Result<(), CmdError> { )); } }; - let config = Config::from_file(config_path) + let config = NexusConfig::from_file(config_path) .map_err(|e| CmdError::Failure(anyhow!(e)))?; run_server(&config).await.map_err(|err| CmdError::Failure(anyhow!(err))) diff --git a/nexus/src/bin/schema-updater.rs b/nexus/src/bin/schema-updater.rs index d016bd0421..6c7fe5adf3 100644 --- a/nexus/src/bin/schema-updater.rs +++ b/nexus/src/bin/schema-updater.rs @@ -8,12 +8,12 @@ use anyhow::{anyhow, bail}; use camino::Utf8PathBuf; use clap::Parser; use clap::Subcommand; +use nexus_config::PostgresConfigWithUrl; +use nexus_config::SchemaConfig; use nexus_db_model::schema::SCHEMA_VERSION; use nexus_db_queries::db; use nexus_db_queries::db::DataStore; use omicron_common::api::external::SemverVersion; -use omicron_common::nexus_config::SchemaConfig; -use omicron_common::postgres_config::PostgresConfigWithUrl; use slog::Drain; use slog::Level; use slog::LevelFilter; diff --git a/nexus/src/config.rs b/nexus/src/config.rs deleted file mode 100644 index 51d745c959..0000000000 --- a/nexus/src/config.rs +++ /dev/null @@ -1,13 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! Interfaces for parsing configuration files and working with a nexus server -//! configuration - -// TODO: Use them directly? No need for this file - -pub use omicron_common::nexus_config::Config; -pub(crate) use omicron_common::nexus_config::SchemeName; -pub(crate) use omicron_common::nexus_config::Tunables; -pub(crate) use omicron_common::nexus_config::UpdatesConfig; diff --git a/nexus/src/context.rs b/nexus/src/context.rs index d34a04437f..cf2b9d6f17 100644 --- a/nexus/src/context.rs +++ b/nexus/src/context.rs @@ -2,7 +2,6 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. //! Shared state used by API request handlers -use super::config; use super::Nexus; use crate::saga_interface::SagaContext; use async_trait::async_trait; @@ -13,14 +12,15 @@ use authn::external::HttpAuthnScheme; use camino::Utf8PathBuf; use chrono::Duration; use internal_dns::ServiceName; +use nexus_config::NexusConfig; +use nexus_config::PostgresConfigWithUrl; +use nexus_config::SchemeName; use nexus_db_queries::authn::external::session_cookie::SessionStore; use nexus_db_queries::authn::ConsoleSessionWithSiloId; use nexus_db_queries::context::{OpContext, OpKind}; use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::{authn, authz, db}; use omicron_common::address::{Ipv6Subnet, AZ_PREFIX}; -use omicron_common::nexus_config; -use omicron_common::postgres_config::PostgresConfigWithUrl; use oximeter::types::ProducerRegistry; use oximeter_instruments::http::{HttpService, LatencyTracker}; use slog::Logger; @@ -69,7 +69,7 @@ impl ServerContext { pub async fn new( rack_id: Uuid, log: Logger, - config: &config::Config, + config: &NexusConfig, ) -> Result, String> { let nexus_schemes = config .pkg @@ -78,11 +78,11 @@ impl ServerContext { .iter() .map::>, _>( |name| match name { - config::SchemeName::Spoof => Box::new(HttpAuthnSpoof), - config::SchemeName::SessionCookie => { + SchemeName::Spoof => Box::new(HttpAuthnSpoof), + SchemeName::SessionCookie => { Box::new(HttpAuthnSessionCookie) } - config::SchemeName::AccessToken => Box::new(HttpAuthnToken), + SchemeName::AccessToken => Box::new(HttpAuthnToken), }, ) .collect(); diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index 1b80bc4c3c..771a78f0b1 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -14,7 +14,6 @@ pub mod app; // Public for documentation examples mod cidata; -mod config; mod context; // Public for documentation examples pub mod external_api; // Public for testing mod internal_api; @@ -23,11 +22,11 @@ mod saga_interface; pub use app::test_interfaces::TestInterfaces; pub use app::Nexus; -pub use config::Config; use context::ServerContext; use dropshot::ConfigDropshot; use external_api::http_entrypoints::external_api; use internal_api::http_entrypoints::internal_api; +use nexus_config::NexusConfig; use nexus_types::internal_api::params::ServiceKind; use omicron_common::address::IpRange; use omicron_common::api::internal::shared::{ @@ -73,14 +72,14 @@ pub struct InternalServer { /// dropshot server for internal API http_server_internal: dropshot::HttpServer>, - config: Config, + config: NexusConfig, log: Logger, } impl InternalServer { /// Start a nexus server. pub async fn start( - config: &Config, + config: &NexusConfig, log: &Logger, ) -> Result { let log = log.new(o!("name" => config.deployment.id.to_string())); @@ -219,7 +218,7 @@ impl nexus_test_interface::NexusServer for Server { type InternalServer = InternalServer; async fn start_internal( - config: &Config, + config: &NexusConfig, log: &Logger, ) -> (InternalServer, SocketAddr) { let internal_server = @@ -231,7 +230,7 @@ impl nexus_test_interface::NexusServer for Server { async fn start( internal_server: InternalServer, - config: &Config, + config: &NexusConfig, services: Vec, datasets: Vec, internal_dns_zone_config: nexus_types::internal_api::params::DnsConfigParams, @@ -343,7 +342,7 @@ impl nexus_test_interface::NexusServer for Server { } /// Run an instance of the Nexus server. -pub async fn run_server(config: &Config) -> Result<(), String> { +pub async fn run_server(config: &NexusConfig) -> Result<(), String> { use slog::Drain; let (drain, registration) = slog_dtrace::with_drain( diff --git a/nexus/test-interface/Cargo.toml b/nexus/test-interface/Cargo.toml index b96afa6dbf..004ce28545 100644 --- a/nexus/test-interface/Cargo.toml +++ b/nexus/test-interface/Cargo.toml @@ -6,6 +6,7 @@ license = "MPL-2.0" [dependencies] async-trait.workspace = true +nexus-config.workspace = true nexus-types.workspace = true omicron-common.workspace = true slog.workspace = true diff --git a/nexus/test-interface/src/lib.rs b/nexus/test-interface/src/lib.rs index 23326a5ecb..0f53ac6445 100644 --- a/nexus/test-interface/src/lib.rs +++ b/nexus/test-interface/src/lib.rs @@ -32,7 +32,7 @@ //! - integration tests -> nexus-test-utils use async_trait::async_trait; -use omicron_common::nexus_config::Config; +use nexus_config::NexusConfig; use slog::Logger; use std::net::{SocketAddr, SocketAddrV6}; use uuid::Uuid; @@ -42,14 +42,14 @@ pub trait NexusServer: Send + Sync + 'static { type InternalServer: Send + Sync + 'static; async fn start_internal( - config: &Config, + config: &NexusConfig, log: &Logger, ) -> (Self::InternalServer, SocketAddr); #[allow(clippy::too_many_arguments)] async fn start( internal_server: Self::InternalServer, - config: &Config, + config: &NexusConfig, services: Vec, datasets: Vec, internal_dns_config: nexus_types::internal_api::params::DnsConfigParams, diff --git a/nexus/test-utils/Cargo.toml b/nexus/test-utils/Cargo.toml index 5605f33f75..e612547fa8 100644 --- a/nexus/test-utils/Cargo.toml +++ b/nexus/test-utils/Cargo.toml @@ -21,6 +21,7 @@ headers.workspace = true http.workspace = true hyper.workspace = true internal-dns.workspace = true +nexus-config.workspace = true nexus-db-queries.workspace = true nexus-test-interface.workspace = true nexus-types.workspace = true diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 271025f7a7..4ef77b3352 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -17,6 +17,12 @@ use dropshot::HandlerTaskMode; use futures::future::BoxFuture; use futures::FutureExt; use gateway_test_utils::setup::GatewayTestContext; +use nexus_config::Database; +use nexus_config::DpdConfig; +use nexus_config::InternalDns; +use nexus_config::MgdConfig; +use nexus_config::NexusConfig; +use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use nexus_test_interface::NexusServer; use nexus_types::external_api::params::UserId; use nexus_types::internal_api::params::Certificate; @@ -34,8 +40,6 @@ use omicron_common::api::external::{IdentityMetadata, Name}; use omicron_common::api::internal::nexus::ProducerEndpoint; use omicron_common::api::internal::nexus::ProducerKind; use omicron_common::api::internal::shared::SwitchLocation; -use omicron_common::nexus_config; -use omicron_common::nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use omicron_sled_agent::sim; use omicron_test_utils::dev; use oximeter_collector::Oximeter; @@ -119,7 +123,7 @@ impl ControlPlaneTestContext { } } -pub fn load_test_config() -> omicron_common::nexus_config::Config { +pub fn load_test_config() -> NexusConfig { // We load as much configuration as we can from the test suite configuration // file. In practice, TestContext requires that: // @@ -136,9 +140,8 @@ pub fn load_test_config() -> omicron_common::nexus_config::Config { // configuration options, we expect many of those can be usefully configured // (and reconfigured) for the test suite. let config_file_path = Utf8Path::new("tests/config.test.toml"); - let mut config = - omicron_common::nexus_config::Config::from_file(config_file_path) - .expect("failed to load config.test.toml"); + let mut config = NexusConfig::from_file(config_file_path) + .expect("failed to load config.test.toml"); config.deployment.id = Uuid::new_v4(); config } @@ -227,7 +230,7 @@ impl RackInitRequestBuilder { } pub struct ControlPlaneTestContextBuilder<'a, N: NexusServer> { - pub config: &'a mut omicron_common::nexus_config::Config, + pub config: &'a mut NexusConfig, test_name: &'a str, rack_init_builder: RackInitRequestBuilder, @@ -269,10 +272,7 @@ type StepInitFn<'a, N> = Box< >; impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { - pub fn new( - test_name: &'a str, - config: &'a mut omicron_common::nexus_config::Config, - ) -> Self { + pub fn new(test_name: &'a str, config: &'a mut NexusConfig) -> Self { let start_time = chrono::Utc::now(); let logctx = LogContext::new(test_name, &config.pkg.log); @@ -463,9 +463,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { // Update the configuration options for Nexus, if it's launched later. // // NOTE: If dendrite is started after Nexus, this is ignored. - let config = omicron_common::nexus_config::DpdConfig { - address: std::net::SocketAddr::V6(address), - }; + let config = DpdConfig { address: std::net::SocketAddr::V6(address) }; self.config.pkg.dendrite.insert(switch_location, config); let sled_id = Uuid::parse_str(SLED_AGENT_UUID).unwrap(); @@ -489,9 +487,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { debug!(log, "mgd port is {port}"); - let config = omicron_common::nexus_config::MgdConfig { - address: std::net::SocketAddr::V6(address), - }; + let config = MgdConfig { address: std::net::SocketAddr::V6(address) }; self.config.pkg.mgd.insert(switch_location, config); let sled_id = Uuid::parse_str(SLED_AGENT_UUID).unwrap(); @@ -553,16 +549,15 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { let log = &self.logctx.log; debug!(log, "Starting Nexus (internal API)"); - self.config.deployment.internal_dns = - nexus_config::InternalDns::FromAddress { - address: self - .internal_dns - .as_ref() - .expect("Must initialize internal DNS server first") - .dns_server - .local_address(), - }; - self.config.deployment.database = nexus_config::Database::FromUrl { + self.config.deployment.internal_dns = InternalDns::FromAddress { + address: self + .internal_dns + .as_ref() + .expect("Must initialize internal DNS server first") + .dns_server + .local_address(), + }; + self.config.deployment.database = Database::FromUrl { url: self .database .as_ref() @@ -926,7 +921,7 @@ enum PopulateCrdb { /// should be done in the `crdb-seed` setup script. #[cfg(feature = "omicron-dev")] pub async fn omicron_dev_setup_with_config( - config: &mut omicron_common::nexus_config::Config, + config: &mut NexusConfig, ) -> Result> { let builder = ControlPlaneTestContextBuilder::::new("omicron-dev", config); @@ -959,7 +954,7 @@ pub async fn omicron_dev_setup_with_config( /// Setup routine to use for tests. pub async fn test_setup_with_config( test_name: &str, - config: &mut omicron_common::nexus_config::Config, + config: &mut NexusConfig, sim_mode: sim::SimMode, initial_cert: Option, ) -> ControlPlaneTestContext { diff --git a/nexus/tests/integration_tests/initialization.rs b/nexus/tests/integration_tests/initialization.rs index 43a4ac8f2e..b77a121080 100644 --- a/nexus/tests/integration_tests/initialization.rs +++ b/nexus/tests/integration_tests/initialization.rs @@ -7,6 +7,8 @@ use std::net::{Ipv6Addr, SocketAddrV6}; use gateway_messages::SpPort; use gateway_test_utils::setup as mgs_setup; +use nexus_config::Database; +use nexus_config::InternalDns; use nexus_test_interface::NexusServer; use nexus_test_utils::{load_test_config, ControlPlaneTestContextBuilder}; use omicron_common::address::MGS_PORT; @@ -39,17 +41,15 @@ async fn test_nexus_boots_before_cockroach() { // This call won't return successfully until we can... // 1. Contact the internal DNS system to find Cockroach // 2. Contact Cockroach to ensure the database has been populated - builder.config.deployment.database = - omicron_common::nexus_config::Database::FromDns; - builder.config.deployment.internal_dns = - omicron_common::nexus_config::InternalDns::FromAddress { - address: builder - .internal_dns - .as_ref() - .expect("Must start Internal DNS before acquiring an address") - .dns_server - .local_address(), - }; + builder.config.deployment.database = Database::FromDns; + builder.config.deployment.internal_dns = InternalDns::FromAddress { + address: builder + .internal_dns + .as_ref() + .expect("Must start Internal DNS before acquiring an address") + .dns_server + .local_address(), + }; let nexus_config = builder.config.clone(); let nexus_log = log.clone(); let nexus_handle = tokio::task::spawn(async move { @@ -111,25 +111,23 @@ async fn test_nexus_boots_before_dendrite() { // This call won't return successfully until we can... // 1. Contact the internal DNS system to find Dendrite // 2. Contact Dendrite - builder.config.deployment.database = - omicron_common::nexus_config::Database::FromUrl { - url: builder - .database - .as_ref() - .expect("Must start CRDB first") - .pg_config() - .clone(), - }; + builder.config.deployment.database = Database::FromUrl { + url: builder + .database + .as_ref() + .expect("Must start CRDB first") + .pg_config() + .clone(), + }; builder.config.pkg.dendrite = HashMap::new(); - builder.config.deployment.internal_dns = - omicron_common::nexus_config::InternalDns::FromAddress { - address: builder - .internal_dns - .as_ref() - .expect("Must start Internal DNS before acquiring an address") - .dns_server - .local_address(), - }; + builder.config.deployment.internal_dns = InternalDns::FromAddress { + address: builder + .internal_dns + .as_ref() + .expect("Must start Internal DNS before acquiring an address") + .dns_server + .local_address(), + }; let nexus_config = builder.config.clone(); let nexus_log = log.clone(); let nexus_handle = tokio::task::spawn(async move { diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index 380ec1c975..f9bc4e1da7 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -6,6 +6,8 @@ use camino::Utf8PathBuf; use chrono::{DateTime, Utc}; use dropshot::test_util::LogContext; use futures::future::BoxFuture; +use nexus_config::NexusConfig; +use nexus_config::SchemaConfig; use nexus_db_model::schema::SCHEMA_VERSION as LATEST_SCHEMA_VERSION; use nexus_db_queries::db::datastore::{ all_sql_for_version_migration, EARLIEST_SUPPORTED_VERSION, @@ -14,8 +16,6 @@ use nexus_db_queries::db::DISALLOW_FULL_TABLE_SCAN_SQL; use nexus_test_utils::{db, load_test_config, ControlPlaneTestContextBuilder}; use omicron_common::api::external::SemverVersion; use omicron_common::api::internal::shared::SwitchLocation; -use omicron_common::nexus_config::Config; -use omicron_common::nexus_config::SchemaConfig; use omicron_test_utils::dev::db::{Client, CockroachInstance}; use pretty_assertions::{assert_eq, assert_ne}; use similar_asserts; @@ -45,7 +45,7 @@ async fn test_setup_just_crdb<'a>( // Helper to ensure we perform the same setup for the positive and negative test // cases. async fn test_setup<'a>( - config: &'a mut Config, + config: &'a mut NexusConfig, name: &'static str, ) -> ControlPlaneTestContextBuilder<'a, omicron_nexus::Server> { let mut builder = diff --git a/nexus/tests/integration_tests/subnet_allocation.rs b/nexus/tests/integration_tests/subnet_allocation.rs index 3362d5a4ac..d9d015bf26 100644 --- a/nexus/tests/integration_tests/subnet_allocation.rs +++ b/nexus/tests/integration_tests/subnet_allocation.rs @@ -10,6 +10,7 @@ use dropshot::HttpErrorResponseBody; use http::method::Method; use http::StatusCode; use ipnetwork::Ipv4Network; +use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; @@ -23,7 +24,6 @@ use omicron_common::api::external::{ ByteCount, IdentityMetadataCreateParams, InstanceCpuCount, InstanceNetworkInterface, Ipv4Net, }; -use omicron_common::nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use std::net::Ipv4Addr; type ControlPlaneTestContext = diff --git a/nexus/tests/integration_tests/updates.rs b/nexus/tests/integration_tests/updates.rs index e830348103..9a50f9b4d5 100644 --- a/nexus/tests/integration_tests/updates.rs +++ b/nexus/tests/integration_tests/updates.rs @@ -13,6 +13,7 @@ use camino_tempfile::{Builder, Utf8TempDir, Utf8TempPath}; use clap::Parser; use dropshot::test_util::LogContext; use http::{Method, StatusCode}; +use nexus_config::UpdatesConfig; use nexus_test_utils::http_testing::{AuthnMode, NexusRequest, RequestBuilder}; use nexus_test_utils::{load_test_config, test_setup, test_setup_with_config}; use omicron_common::api::external::{ @@ -20,7 +21,6 @@ use omicron_common::api::external::{ TufRepoInsertStatus, }; use omicron_common::api::internal::nexus::KnownArtifactKind; -use omicron_common::nexus_config::UpdatesConfig; use omicron_sled_agent::sim; use pretty_assertions::assert_eq; use serde::Deserialize; diff --git a/sled-agent/Cargo.toml b/sled-agent/Cargo.toml index a70b3b4638..d61f2d09e4 100644 --- a/sled-agent/Cargo.toml +++ b/sled-agent/Cargo.toml @@ -44,6 +44,7 @@ libc.workspace = true macaddr.workspace = true mg-admin-client.workspace = true nexus-client.workspace = true +nexus-config.workspace = true omicron-common.workspace = true once_cell.workspace = true oximeter.workspace = true diff --git a/sled-agent/src/rack_setup/plan/service.rs b/sled-agent/src/rack_setup/plan/service.rs index 77fd8a39de..144388a2de 100644 --- a/sled-agent/src/rack_setup/plan/service.rs +++ b/sled-agent/src/rack_setup/plan/service.rs @@ -857,11 +857,11 @@ struct ServicePortBuilder { impl ServicePortBuilder { fn new(config: &Config) -> Self { + use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use omicron_common::address::{ DNS_OPTE_IPV4_SUBNET, DNS_OPTE_IPV6_SUBNET, NEXUS_OPTE_IPV4_SUBNET, NEXUS_OPTE_IPV6_SUBNET, NTP_OPTE_IPV4_SUBNET, NTP_OPTE_IPV6_SUBNET, }; - use omicron_common::nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; let external_dns_ips_set = config .external_dns_ips diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index f3ddfdbf89..67e4a48901 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -62,6 +62,7 @@ use illumos_utils::zpool::ZpoolName; use illumos_utils::{execute, PFEXEC}; use internal_dns::resolver::Resolver; use itertools::Itertools; +use nexus_config::{ConfigDropshotWithTls, DeploymentConfig}; use omicron_common::address::BOOTSTRAP_ARTIFACT_PORT; use omicron_common::address::CLICKHOUSE_KEEPER_PORT; use omicron_common::address::CLICKHOUSE_PORT; @@ -86,9 +87,6 @@ use omicron_common::backoff::{ retry_notify, retry_policy_internal_service_aggressive, BackoffError, }; use omicron_common::ledger::{self, Ledger, Ledgerable}; -use omicron_common::nexus_config::{ - self, ConfigDropshotWithTls, DeploymentConfig as NexusDeploymentConfig, -}; use once_cell::sync::OnceCell; use rand::prelude::SliceRandom; use sled_hardware::is_gimlet; @@ -2090,7 +2088,7 @@ impl ServiceManager { // Nexus takes a separate config file for parameters // which cannot be known at packaging time. let nexus_port = if *external_tls { 443 } else { 80 }; - let deployment_config = NexusDeploymentConfig { + let deployment_config = DeploymentConfig { id: zone_config.zone.id, rack_id: sled_info.rack_id, techport_external_server_port: diff --git a/sled-agent/src/sim/server.rs b/sled-agent/src/sim/server.rs index 8854aee05c..f9db89135a 100644 --- a/sled-agent/src/sim/server.rs +++ b/sled-agent/src/sim/server.rs @@ -15,13 +15,13 @@ use crucible_agent_client::types::State as RegionState; use internal_dns::ServiceName; use nexus_client::types as NexusTypes; use nexus_client::types::{IpRange, Ipv4Range, Ipv6Range}; +use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use omicron_common::address::DNS_OPTE_IPV4_SUBNET; use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; use omicron_common::api::external::MacAddr; use omicron_common::backoff::{ retry_notify, retry_policy_internal_service_aggressive, BackoffError, }; -use omicron_common::nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use omicron_common::FileKv; use slog::{info, Drain, Logger}; use std::collections::HashMap; diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index 48223e0291..0a607d5115 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -15,6 +15,7 @@ headers.workspace = true hex.workspace = true http.workspace = true libc.workspace = true +nexus-config.workspace = true omicron-common.workspace = true pem.workspace = true ring.workspace = true diff --git a/test-utils/src/dev/db.rs b/test-utils/src/dev/db.rs index 3c1b46b4c2..c148a60e1c 100644 --- a/test-utils/src/dev/db.rs +++ b/test-utils/src/dev/db.rs @@ -8,7 +8,7 @@ use crate::dev::poll; use anyhow::anyhow; use anyhow::bail; use anyhow::Context; -use omicron_common::postgres_config::PostgresConfigWithUrl; +use nexus_config::PostgresConfigWithUrl; use std::collections::BTreeMap; use std::ffi::{OsStr, OsString}; use std::fmt; From c3f385ebe105ad7d2b281bfedfa20bf71ba5b434 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karen=20C=C3=A1rcamo?= Date: Thu, 7 Mar 2024 09:36:19 +1300 Subject: [PATCH 076/157] NTP self assembling zone (#5168) --- illumos-utils/src/running_zone.rs | 12 +- package-manifest.toml | 13 + sled-agent/src/services.rs | 462 ++++++++++++++++++------------ sled-agent/src/sled_agent.rs | 2 +- sled-agent/src/smf_helper.rs | 15 - smf/ntp/manifest/manifest.xml | 180 ++++++------ 6 files changed, 385 insertions(+), 299 deletions(-) diff --git a/illumos-utils/src/running_zone.rs b/illumos-utils/src/running_zone.rs index 1c1df01980..d86a27e3f7 100644 --- a/illumos-utils/src/running_zone.rs +++ b/illumos-utils/src/running_zone.rs @@ -1139,7 +1139,7 @@ impl InstalledZone { #[derive(Clone)] pub struct FakeZoneBuilderConfig { - temp_dir: Arc, + temp_dir: Arc, } #[derive(Clone, Default)] @@ -1157,10 +1157,14 @@ pub struct ZoneBuilderFactory { impl ZoneBuilderFactory { /// For use in unit tests that don't require actual zone creation to occur. - pub fn fake() -> Self { + pub fn fake(temp_dir: Option<&String>) -> Self { + let temp_dir = match temp_dir { + Some(dir) => Utf8PathBuf::from(dir), + None => Utf8TempDir::new().unwrap().into_path(), + }; Self { fake_cfg: Some(FakeZoneBuilderConfig { - temp_dir: Arc::new(Utf8TempDir::new().unwrap()), + temp_dir: Arc::new(temp_dir), }), } } @@ -1280,7 +1284,7 @@ impl<'a> ZoneBuilder<'a> { .new_control(None) .map_err(move |err| InstallZoneError::CreateVnic { zone, err })?; let fake_cfg = self.fake_cfg.unwrap(); - let temp_dir = fake_cfg.temp_dir.path().to_path_buf(); + let temp_dir = fake_cfg.temp_dir; (|| { let full_zone_name = InstalledZone::get_zone_name( self.zone_type?, diff --git a/package-manifest.toml b/package-manifest.toml index 065da4bf42..13123df9d0 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -273,12 +273,25 @@ output.type = "zone" [package.ntp] service_name = "ntp" only_for_targets.image = "standard" +source.type = "composite" +source.packages = [ + "ntp-svc.tar.gz", + "opte-interface-setup.tar.gz", + "zone-network-setup.tar.gz", + "zone-network-install.tar.gz" +] +output.type = "zone" + +[package.ntp-svc] +service_name = "ntp-svc" +only_for_targets.image = "standard" source.type = "local" source.paths = [ { from = "smf/ntp/manifest", to = "/var/svc/manifest/site/ntp" }, { from = "smf/ntp/method", to = "/var/svc/method" }, { from = "smf/ntp/etc", to = "/etc" }, ] +output.intermediate_only = true output.type = "zone" [package.omicron-gateway] diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index 67e4a48901..41acf6c079 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -36,7 +36,6 @@ use crate::params::{ }; use crate::profile::*; use crate::services_migration::{AllZoneRequests, SERVICES_LEDGER_FILENAME}; -use crate::smf_helper::Service; use crate::smf_helper::SmfHelper; use crate::zone_bundle::BundleError; use crate::zone_bundle::ZoneBundler; @@ -960,6 +959,7 @@ impl ServiceManager { self.ensure_all_omicron_zones( &mut existing_zones, omicron_zones_config, + None, ) .await?; Ok(()) @@ -1335,64 +1335,10 @@ impl ServiceManager { needed } - async fn configure_dns_client( - &self, - running_zone: &RunningZone, - dns_servers: &[IpAddr], - domain: &Option, - ) -> Result<(), Error> { - struct DnsClient {} - - impl crate::smf_helper::Service for DnsClient { - fn service_name(&self) -> String { - "dns_client".to_string() - } - fn smf_name(&self) -> String { - "svc:/network/dns/client".to_string() - } - fn should_import(&self) -> bool { - false - } - } - - let service = DnsClient {}; - let smfh = SmfHelper::new(&running_zone, &service); - - let etc = running_zone.root().join("etc"); - let resolv_conf = etc.join("resolv.conf"); - let nsswitch_conf = etc.join("nsswitch.conf"); - let nsswitch_dns = etc.join("nsswitch.dns"); - - if dns_servers.is_empty() { - // Disable the dns/client service - smfh.disable()?; - } else { - debug!(self.inner.log, "enabling {:?}", service.service_name()); - let mut config = String::new(); - if let Some(d) = domain { - config.push_str(&format!("domain {d}\n")); - } - for s in dns_servers { - config.push_str(&format!("nameserver {s}\n")); - } - - debug!(self.inner.log, "creating {resolv_conf}"); - tokio::fs::write(&resolv_conf, config) - .await - .map_err(|err| Error::io_path(&resolv_conf, err))?; - - tokio::fs::copy(&nsswitch_dns, &nsswitch_conf) - .await - .map_err(|err| Error::io_path(&nsswitch_dns, err))?; - - smfh.refresh()?; - smfh.enable()?; - } - Ok(()) - } - async fn dns_install( info: &SledAgentInfo, + ip_addrs: Option>, + domain: &Option, ) -> Result { // We want to configure the dns/install SMF service inside the // zone with the list of DNS nameservers. This will cause @@ -1405,18 +1351,45 @@ impl ServiceManager { // supplying values for an existing property group on the SMF // *service*. We're not creating a new property group, nor are // we configuring a property group on the instance. - let all_nameservers = info - .resolver - .lookup_all_ipv6(internal_dns::ServiceName::InternalDns) - .await?; + + // Users may decide to provide specific addresses to set as + // nameservers, or this information can be retrieved from + // from SledAgentInfo. + let nameservers = match ip_addrs { + None => { + let addrs = info + .resolver + .lookup_all_ipv6(internal_dns::ServiceName::InternalDns) + .await?; + + let mut servers: Vec = vec![]; + for a in addrs { + let ip = IpAddr::V6(a); + servers.push(ip); + } + + servers + } + Some(servers) => servers, + }; + let mut dns_config_builder = PropertyGroupBuilder::new("install_props"); - for ns_addr in &all_nameservers { + for ns_addr in &nameservers { dns_config_builder = dns_config_builder.add_property( "nameserver", "net_address", &ns_addr.to_string(), ); } + + match domain { + Some(d) => { + dns_config_builder = + dns_config_builder.add_property("domain", "astring", &d) + } + None => (), + } + Ok(ServiceBuilder::new("network/dns/install") .add_property_group(dns_config_builder) // We do need to enable the default instance of the @@ -1476,6 +1449,7 @@ impl ServiceManager { request: ZoneArgs<'_>, filesystems: &[zone::Fs], data_links: &[String], + fake_install_dir: Option<&String>, ) -> Result { let device_names = Self::devices_needed(&request)?; let (bootstrap_vnic, bootstrap_name_and_address) = @@ -1541,7 +1515,11 @@ impl ServiceManager { ZoneArgs::Switch(_) => "switch".to_string(), }; - let mut zone_builder = ZoneBuilderFactory::default().builder(); + // We use the fake initialiser for testing + let mut zone_builder = match fake_install_dir { + None => ZoneBuilderFactory::default().builder(), + Some(dir) => ZoneBuilderFactory::fake(Some(dir)).builder(), + }; if let Some(uuid) = unique_name { zone_builder = zone_builder.with_unique_name(uuid); } @@ -1564,12 +1542,21 @@ impl ServiceManager { .install() .await?; + let disabled_ssh_service = ServiceBuilder::new("network/ssh") + .add_instance(ServiceInstanceBuilder::new("default").disable()); + + let disabled_dns_client_service = + ServiceBuilder::new("network/dns/client") + .add_instance(ServiceInstanceBuilder::new("default").disable()); + + let enabled_dns_client_service = + ServiceBuilder::new("network/dns/client") + .add_instance(ServiceInstanceBuilder::new("default")); + // TODO(https://github.com/oxidecomputer/omicron/issues/1898): // // These zones are self-assembling -- after they boot, there should // be no "zlogin" necessary to initialize. - let disabled_ssh_service = ServiceBuilder::new("network/ssh") - .add_instance(ServiceInstanceBuilder::new("default").disable()); match &request { ZoneArgs::Omicron(OmicronZoneConfigLocal { zone: @@ -1593,7 +1580,7 @@ impl ServiceManager { listen_addr, )?; - let dns_service = Self::dns_install(info).await?; + let dns_service = Self::dns_install(info, None, &None).await?; let config = PropertyGroupBuilder::new("config") .add_property("listen_addr", "astring", listen_addr) @@ -1609,7 +1596,8 @@ impl ServiceManager { .add_service(nw_setup_service) .add_service(disabled_ssh_service) .add_service(clickhouse_service) - .add_service(dns_service); + .add_service(dns_service) + .add_service(enabled_dns_client_service); profile .add_to_zone(&self.inner.log, &installed_zone) .await @@ -1641,7 +1629,7 @@ impl ServiceManager { listen_addr, )?; - let dns_service = Self::dns_install(info).await?; + let dns_service = Self::dns_install(info, None, &None).await?; let config = PropertyGroupBuilder::new("config") .add_property("listen_addr", "astring", listen_addr) @@ -1657,7 +1645,8 @@ impl ServiceManager { .add_service(nw_setup_service) .add_service(disabled_ssh_service) .add_service(clickhouse_keeper_service) - .add_service(dns_service); + .add_service(dns_service) + .add_service(enabled_dns_client_service); profile .add_to_zone(&self.inner.log, &installed_zone) .await @@ -1696,7 +1685,7 @@ impl ServiceManager { listen_addr, )?; - let dns_service = Self::dns_install(info).await?; + let dns_service = Self::dns_install(info, None, &None).await?; // Configure the CockroachDB service. let cockroachdb_config = PropertyGroupBuilder::new("config") @@ -1713,7 +1702,8 @@ impl ServiceManager { .add_service(nw_setup_service) .add_service(disabled_ssh_service) .add_service(cockroachdb_service) - .add_service(dns_service); + .add_service(dns_service) + .add_service(enabled_dns_client_service); profile .add_to_zone(&self.inner.log, &installed_zone) .await @@ -1760,6 +1750,7 @@ impl ServiceManager { let profile = ProfileBuilder::new("omicron") .add_service(nw_setup_service) .add_service(disabled_ssh_service) + .add_service(disabled_dns_client_service) .add_service( ServiceBuilder::new("oxide/crucible/agent") .add_instance( @@ -1805,6 +1796,7 @@ impl ServiceManager { let profile = ProfileBuilder::new("omicron") .add_service(nw_setup_service) .add_service(disabled_ssh_service) + .add_service(disabled_dns_client_service) .add_service( ServiceBuilder::new("oxide/crucible/pantry") .add_instance( @@ -1859,7 +1851,8 @@ impl ServiceManager { let profile = ProfileBuilder::new("omicron") .add_service(nw_setup_service) .add_service(disabled_ssh_service) - .add_service(oximeter_service); + .add_service(oximeter_service) + .add_service(disabled_dns_client_service); profile .add_to_zone(&self.inner.log, &installed_zone) .await @@ -1926,7 +1919,8 @@ impl ServiceManager { .add_service(nw_setup_service) .add_service(opte_interface_setup) .add_service(disabled_ssh_service) - .add_service(external_dns_service); + .add_service(external_dns_service) + .add_service(disabled_dns_client_service); profile .add_to_zone(&self.inner.log, &installed_zone) .await @@ -1935,6 +1929,115 @@ impl ServiceManager { })?; return Ok(RunningZone::boot(installed_zone).await?); } + ZoneArgs::Omicron(OmicronZoneConfigLocal { + zone: + OmicronZoneConfig { + zone_type: + OmicronZoneType::BoundaryNtp { + dns_servers, + ntp_servers, + domain, + .. + }, + underlay_address, + .. + }, + .. + }) + | ZoneArgs::Omicron(OmicronZoneConfigLocal { + zone: + OmicronZoneConfig { + zone_type: + OmicronZoneType::InternalNtp { + dns_servers, + ntp_servers, + domain, + .. + }, + underlay_address, + .. + }, + .. + }) => { + let Some(info) = self.inner.sled_info.get() else { + return Err(Error::SledAgentNotReady); + }; + + let static_addr = underlay_address.to_string(); + + let nw_setup_service = Self::zone_network_setup_install( + info, + &installed_zone, + &static_addr.clone(), + )?; + + let is_boundary = matches!( + // It's safe to unwrap here as we already know it's one of InternalNTP or BoundaryNtp. + request.omicron_type().unwrap(), + OmicronZoneType::BoundaryNtp { .. } + ); + + let rack_net = + Ipv6Subnet::::new(info.underlay_address) + .net() + .to_string(); + + let dns_install_service = + Self::dns_install(info, Some(dns_servers.to_vec()), domain) + .await?; + + let mut ntp_config = PropertyGroupBuilder::new("config") + .add_property("allow", "astring", &rack_net) + .add_property( + "boundary", + "boolean", + &is_boundary.to_string(), + ); + + for s in ntp_servers { + ntp_config = ntp_config.add_property( + "server", + "astring", + &s.to_string(), + ); + } + + let dns_client_service; + if dns_servers.is_empty() { + dns_client_service = disabled_dns_client_service; + } else { + dns_client_service = enabled_dns_client_service; + } + + let ntp_service = ServiceBuilder::new("oxide/ntp") + .add_instance( + ServiceInstanceBuilder::new("default") + .add_property_group(ntp_config), + ); + + let mut profile = ProfileBuilder::new("omicron") + .add_service(nw_setup_service) + .add_service(disabled_ssh_service) + .add_service(dns_install_service) + .add_service(dns_client_service) + .add_service(ntp_service); + + // Only Boundary NTP needs an OPTE interface and port configured. + if is_boundary { + let opte_interface_setup = + Self::opte_interface_set_up_install(&installed_zone)?; + profile = profile.add_service(opte_interface_setup) + } + + profile + .add_to_zone(&self.inner.log, &installed_zone) + .await + .map_err(|err| { + Error::io("Failed to set up NTP profile", err) + })?; + + return Ok(RunningZone::boot(installed_zone).await?); + } _ => {} } @@ -2249,73 +2352,14 @@ impl ServiceManager { // service is enabled. smfh.refresh()?; } - OmicronZoneType::BoundaryNtp { - ntp_servers, - dns_servers, - domain, - .. - } - | OmicronZoneType::InternalNtp { - ntp_servers, - dns_servers, - domain, - .. - } => { - let boundary = matches!( - &zone_config.zone.zone_type, - OmicronZoneType::BoundaryNtp { .. } - ); - info!( - self.inner.log, - "Set up NTP service boundary={}, Servers={:?}", - boundary, - ntp_servers - ); - - let sled_info = - if let Some(info) = self.inner.sled_info.get() { - info - } else { - return Err(Error::SledAgentNotReady); - }; - - let rack_net = Ipv6Subnet::::new( - sled_info.underlay_address, - ) - .net(); - - smfh.setprop("config/allow", &format!("{}", rack_net))?; - smfh.setprop( - "config/boundary", - if boundary { "true" } else { "false" }, - )?; - - if boundary { - // Configure OPTE port for boundary NTP - running_zone - .ensure_address_for_port("public", 0) - .await?; - } - - smfh.delpropvalue("config/server", "*")?; - for server in ntp_servers { - smfh.addpropvalue("config/server", server)?; - } - self.configure_dns_client( - &running_zone, - &dns_servers, - &domain, - ) - .await?; - - smfh.refresh()?; - } - OmicronZoneType::Clickhouse { .. } + OmicronZoneType::BoundaryNtp { .. } + | OmicronZoneType::Clickhouse { .. } | OmicronZoneType::ClickhouseKeeper { .. } | OmicronZoneType::CockroachDb { .. } | OmicronZoneType::Crucible { .. } | OmicronZoneType::CruciblePantry { .. } | OmicronZoneType::ExternalDns { .. } + | OmicronZoneType::InternalNtp { .. } | OmicronZoneType::Oximeter { .. } => { panic!( "{} is a service which exists as part of a \ @@ -2755,6 +2799,7 @@ impl ServiceManager { zone: &OmicronZoneConfig, time_is_synchronized: bool, all_u2_pools: &Vec, + fake_install_dir: Option<&String>, ) -> Result { // Ensure the zone has been fully removed before we try to boot it. // @@ -2781,6 +2826,7 @@ impl ServiceManager { &[], // data_links= &[], + fake_install_dir, ) .await?; @@ -2800,6 +2846,7 @@ impl ServiceManager { requests: impl Iterator + Clone, time_is_synchronized: bool, all_u2_pools: &Vec, + fake_install_dir: Option<&String>, ) -> Result { if let Some(name) = requests.clone().map(|zone| zone.zone_name()).duplicates().next() @@ -2811,9 +2858,14 @@ impl ServiceManager { } let futures = requests.map(|zone| async move { - self.start_omicron_zone(&zone, time_is_synchronized, all_u2_pools) - .await - .map_err(|err| (zone.zone_name().to_string(), err)) + self.start_omicron_zone( + &zone, + time_is_synchronized, + all_u2_pools, + fake_install_dir, + ) + .await + .map_err(|err| (zone.zone_name().to_string(), err)) }); let results = futures::future::join_all(futures).await; @@ -2895,6 +2947,7 @@ impl ServiceManager { pub async fn ensure_all_omicron_zones_persistent( &self, mut request: OmicronZonesConfig, + fake_install_dir: Option<&String>, ) -> Result<(), Error> { let log = &self.inner.log; @@ -2957,7 +3010,12 @@ impl ServiceManager { let omicron_generation = request.generation; let ledger_generation = ledger_zone_config.ledger_generation; - self.ensure_all_omicron_zones(&mut existing_zones, request).await?; + self.ensure_all_omicron_zones( + &mut existing_zones, + request, + fake_install_dir, + ) + .await?; let zones = existing_zones .values() .map(|omicron_zone| omicron_zone.config.clone()) @@ -3003,6 +3061,7 @@ impl ServiceManager { // lock held when calling this function. existing_zones: &mut MutexGuard<'_, ZoneMap>, new_request: OmicronZonesConfig, + fake_install_dir: Option<&String>, ) -> Result<(), Error> { // Do some data-normalization to ensure we can compare the "requested // set" vs the "existing set" as HashSets. @@ -3017,7 +3076,6 @@ impl ServiceManager { old_zone_configs.difference(&requested_zones_set); let zones_to_be_added = requested_zones_set.difference(&old_zone_configs); - // Destroy zones that should not be running for zone in zones_to_be_removed { self.zone_bundle_and_try_remove(existing_zones, &zone).await; @@ -3040,6 +3098,7 @@ impl ServiceManager { zones_to_be_added, time_is_synchronized, &all_u2_pools, + fake_install_dir, ) .await?; @@ -3854,8 +3913,9 @@ impl ServiceManager { let zone_request = SwitchZoneConfigLocal { root, zone: request.clone() }; let zone_args = ZoneArgs::Switch(&zone_request); - let zone = - self.initialize_zone(zone_args, filesystems, data_links).await?; + let zone = self + .initialize_zone(zone_args, filesystems, data_links, None) + .await?; *sled_zone = SledLocalZone::Running { request: request.clone(), zone }; Ok(()) } @@ -4097,6 +4157,7 @@ mod test { mgr: &ServiceManager, id: Uuid, generation: Generation, + tmp_dir: String, ) { let address = SocketAddrV6::new(Ipv6Addr::LOCALHOST, EXPECTED_PORT, 0, 0); @@ -4110,6 +4171,7 @@ mod test { dns_servers: vec![], domain: None, }, + tmp_dir, ) .await .expect("Could not create service"); @@ -4120,17 +4182,21 @@ mod test { id: Uuid, generation: Generation, zone_type: OmicronZoneType, + tmp_dir: String, ) -> Result<(), Error> { let zone_prefix = format!("oxz_{}", zone_type.zone_type_str()); let _expectations = expect_new_service(&zone_prefix); - mgr.ensure_all_omicron_zones_persistent(OmicronZonesConfig { - generation, - zones: vec![OmicronZoneConfig { - id, - underlay_address: Ipv6Addr::LOCALHOST, - zone_type, - }], - }) + mgr.ensure_all_omicron_zones_persistent( + OmicronZonesConfig { + generation, + zones: vec![OmicronZoneConfig { + id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type, + }], + }, + Some(&tmp_dir), + ) .await } @@ -4140,22 +4206,26 @@ mod test { mgr: &ServiceManager, id: Uuid, generation: Generation, + tmp_dir: String, ) { let address = SocketAddrV6::new(Ipv6Addr::LOCALHOST, EXPECTED_PORT, 0, 0); - mgr.ensure_all_omicron_zones_persistent(OmicronZonesConfig { - generation, - zones: vec![OmicronZoneConfig { - id, - underlay_address: Ipv6Addr::LOCALHOST, - zone_type: OmicronZoneType::InternalNtp { - address, - ntp_servers: vec![], - dns_servers: vec![], - domain: None, - }, - }], - }) + mgr.ensure_all_omicron_zones_persistent( + OmicronZonesConfig { + generation, + zones: vec![OmicronZoneConfig { + id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: OmicronZoneType::InternalNtp { + address, + ntp_servers: vec![], + dns_servers: vec![], + domain: None, + }, + }], + }, + Some(&tmp_dir), + ) .await .unwrap(); } @@ -4326,7 +4396,13 @@ mod test { let v2 = v1.next(); let id = Uuid::new_v4(); - ensure_new_service(&mgr, id, v2).await; + ensure_new_service( + &mgr, + id, + v2, + String::from(test_config.config_dir.path().as_str()), + ) + .await; let found = mgr.omicron_zones_list().await.expect("failed to list zones"); @@ -4369,6 +4445,7 @@ mod test { id, v2, OmicronZoneType::Oximeter { address }, + String::from(test_config.config_dir.path().as_str()), ) .await; @@ -4403,6 +4480,7 @@ mod test { dns_servers: vec![], domain: None, }, + String::from(test_config.config_dir.path().as_str()), ) .await .unwrap(); @@ -4424,9 +4502,10 @@ mod test { let v2 = Generation::new().next(); let id = Uuid::new_v4(); - ensure_new_service(&mgr, id, v2).await; + let dir = String::from(test_config.config_dir.path().as_str()); + ensure_new_service(&mgr, id, v2, dir.clone()).await; let v3 = v2.next(); - ensure_existing_service(&mgr, id, v3).await; + ensure_existing_service(&mgr, id, v3, dir).await; let found = mgr.omicron_zones_list().await.expect("failed to list zones"); assert_eq!(found.generation, v3); @@ -4454,7 +4533,13 @@ mod test { let v2 = Generation::new().next(); let id = Uuid::new_v4(); - ensure_new_service(&mgr, id, v2).await; + ensure_new_service( + &mgr, + id, + v2, + String::from(test_config.config_dir.path().as_str()), + ) + .await; drop_service_manager(mgr); // Before we re-create the service manager - notably, using the same @@ -4491,7 +4576,13 @@ mod test { let v1 = Generation::new(); let v2 = v1.next(); let id = Uuid::new_v4(); - ensure_new_service(&mgr, id, v2).await; + ensure_new_service( + &mgr, + id, + v2, + String::from(test_config.config_dir.path().as_str()), + ) + .await; drop_service_manager(mgr); // Next, delete the ledger. This means the zone we just created will not @@ -4544,10 +4635,12 @@ mod test { domain: None, }, }]; - mgr.ensure_all_omicron_zones_persistent(OmicronZonesConfig { - generation: v2, - zones: zones.clone(), - }) + + let tmp_dir = String::from(test_config.config_dir.path().as_str()); + mgr.ensure_all_omicron_zones_persistent( + OmicronZonesConfig { generation: v2, zones: zones.clone() }, + Some(&tmp_dir), + ) .await .unwrap(); @@ -4573,11 +4666,12 @@ mod test { // Now try to apply that list with an older generation number. This // shouldn't work and the reported state should be unchanged. + let tmp_dir = String::from(test_config.config_dir.path().as_str()); let error = mgr - .ensure_all_omicron_zones_persistent(OmicronZonesConfig { - generation: v1, - zones: zones.clone(), - }) + .ensure_all_omicron_zones_persistent( + OmicronZonesConfig { generation: v1, zones: zones.clone() }, + Some(&tmp_dir), + ) .await .expect_err("unexpectedly went backwards in zones generation"); assert!(matches!( @@ -4592,10 +4686,10 @@ mod test { // Now try to apply that list with the same generation number that we // used before. This shouldn't work either. let error = mgr - .ensure_all_omicron_zones_persistent(OmicronZonesConfig { - generation: v2, - zones: zones.clone(), - }) + .ensure_all_omicron_zones_persistent( + OmicronZonesConfig { generation: v2, zones: zones.clone() }, + Some(&tmp_dir), + ) .await .expect_err("unexpectedly changed a single zone generation"); assert!(matches!( @@ -4609,10 +4703,10 @@ mod test { // But we should be able to apply this new list of zones as long as we // advance the generation number. let v3 = v2.next(); - mgr.ensure_all_omicron_zones_persistent(OmicronZonesConfig { - generation: v3, - zones: zones.clone(), - }) + mgr.ensure_all_omicron_zones_persistent( + OmicronZonesConfig { generation: v3, zones: zones.clone() }, + Some(&tmp_dir), + ) .await .expect("failed to remove all zones in a new generation"); let found4 = diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index 07c4dbbf4e..39ba1045f7 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -908,7 +908,7 @@ impl SledAgent { self.inner .services - .ensure_all_omicron_zones_persistent(requested_zones) + .ensure_all_omicron_zones_persistent(requested_zones, None) .await?; Ok(()) } diff --git a/sled-agent/src/smf_helper.rs b/sled-agent/src/smf_helper.rs index d9ec4f02d6..837aa59157 100644 --- a/sled-agent/src/smf_helper.rs +++ b/sled-agent/src/smf_helper.rs @@ -236,19 +236,4 @@ impl<'t> SmfHelper<'t> { })?; Ok(()) } - - pub fn disable(&self) -> Result<(), Error> { - self.running_zone - .run_cmd(&[ - illumos_utils::zone::SVCADM, - "disable", - "-t", - &self.default_smf_name, - ]) - .map_err(|err| Error::ZoneCommand { - intent: format!("Disable {} service", self.default_smf_name), - err, - })?; - Ok(()) - } } diff --git a/smf/ntp/manifest/manifest.xml b/smf/ntp/manifest/manifest.xml index 4b3827e0e6..7783bbe76c 100644 --- a/smf/ntp/manifest/manifest.xml +++ b/smf/ntp/manifest/manifest.xml @@ -1,105 +1,95 @@ + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From e103a5e433bf3cae5664e08db6aa7fbfc6c76a36 Mon Sep 17 00:00:00 2001 From: "oxide-reflector-bot[bot]" <130185838+oxide-reflector-bot[bot]@users.noreply.github.com> Date: Wed, 6 Mar 2024 21:40:38 +0000 Subject: [PATCH 077/157] Update maghemite to ee0896f (#5209) Updated maghemite to commit ee0896f. Co-authored-by: reflector[bot] <130185838+reflector[bot]@users.noreply.github.com> --- package-manifest.toml | 8 ++++---- tools/maghemite_ddm_openapi_version | 2 +- tools/maghemite_mg_openapi_version | 2 +- tools/maghemite_mgd_checksums | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/package-manifest.toml b/package-manifest.toml index 13123df9d0..5575efd285 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -497,7 +497,7 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "4b0e584eec455a43c36af08ae207086965cef833" +source.commit = "ee0896f5cf9ba0799f155dd8c95f6e1dabaf80ba" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//maghemite.sha256.txt source.sha256 = "f1407cb9aac188d6493d2b0f948c75aad2c36668ddf4ae2a1ed80e9dd395b35d" @@ -513,7 +513,7 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "4b0e584eec455a43c36af08ae207086965cef833" +source.commit = "ee0896f5cf9ba0799f155dd8c95f6e1dabaf80ba" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt source.sha256 = "fae53cb39536dc92d97cb9610de65b0acbce285e685d7167b719ea6311844fec" @@ -528,10 +528,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "4b0e584eec455a43c36af08ae207086965cef833" +source.commit = "ee0896f5cf9ba0799f155dd8c95f6e1dabaf80ba" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt -source.sha256 = "22996a6f3353296b848be729f14e78a42e7d3d6e62a4a918a5c2358ae011c8eb" +source.sha256 = "151aeb26414989cad571b3886786efbeeafd91c41a93a747c784cdc654d5876d" output.type = "zone" output.intermediate_only = true diff --git a/tools/maghemite_ddm_openapi_version b/tools/maghemite_ddm_openapi_version index d161091fa8..14ff749506 100644 --- a/tools/maghemite_ddm_openapi_version +++ b/tools/maghemite_ddm_openapi_version @@ -1,2 +1,2 @@ -COMMIT="4b0e584eec455a43c36af08ae207086965cef833" +COMMIT="ee0896f5cf9ba0799f155dd8c95f6e1dabaf80ba" SHA2="0b0dbc2f8bbc5d2d9be92d64c4865f8f9335355aae62f7de9f67f81dfb3f1803" diff --git a/tools/maghemite_mg_openapi_version b/tools/maghemite_mg_openapi_version index 475d273f4a..a2becb4efc 100644 --- a/tools/maghemite_mg_openapi_version +++ b/tools/maghemite_mg_openapi_version @@ -1,2 +1,2 @@ -COMMIT="4b0e584eec455a43c36af08ae207086965cef833" +COMMIT="ee0896f5cf9ba0799f155dd8c95f6e1dabaf80ba" SHA2="0ac038bbaa54d0ae0ac5ccaeff48f03070618372cca26c9d09b716b909bf9355" diff --git a/tools/maghemite_mgd_checksums b/tools/maghemite_mgd_checksums index ab84fafc01..2f58a88808 100644 --- a/tools/maghemite_mgd_checksums +++ b/tools/maghemite_mgd_checksums @@ -1,2 +1,2 @@ -CIDL_SHA256="22996a6f3353296b848be729f14e78a42e7d3d6e62a4a918a5c2358ae011c8eb" +CIDL_SHA256="151aeb26414989cad571b3886786efbeeafd91c41a93a747c784cdc654d5876d" MGD_LINUX_SHA256="943b0a52d279bde55a419e2cdb24873acc32703bc97bd599376117ee0edc1511" \ No newline at end of file From 390b51376ac592cc4f68bb421a7f304c1f2c144a Mon Sep 17 00:00:00 2001 From: Rain Date: Wed, 6 Mar 2024 16:39:53 -0800 Subject: [PATCH 078/157] [reconfigurator] add logger to BlueprintBuilder (#5208) I wanted to log a few debug statements and maybe errors, and this is pretty straightforward. --- .../db-queries/src/db/datastore/deployment.rs | 4 ++ .../planning/src/blueprint_builder.rs | 56 +++++++++++++++++++ nexus/reconfigurator/planning/src/planner.rs | 1 + 3 files changed, 61 insertions(+) diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index 020916928d..a00318c9dc 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -1349,6 +1349,7 @@ mod tests { // different DNS version to test that that works. let new_dns_version = blueprint1.internal_dns_version.next(); let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, &blueprint1, new_dns_version, &policy, @@ -1500,6 +1501,7 @@ mod tests { ) .unwrap(); let blueprint2 = BlueprintBuilder::new_based_on( + &logctx.log, &blueprint1, Generation::new(), &EMPTY_POLICY, @@ -1508,6 +1510,7 @@ mod tests { .expect("failed to create builder") .build(); let blueprint3 = BlueprintBuilder::new_based_on( + &logctx.log, &blueprint1, Generation::new(), &EMPTY_POLICY, @@ -1604,6 +1607,7 @@ mod tests { // Create a child of blueprint3, and ensure when we set it as the target // with enabled=false, that status is serialized. let blueprint4 = BlueprintBuilder::new_based_on( + &logctx.log, &blueprint3, Generation::new(), &EMPTY_POLICY, diff --git a/nexus/reconfigurator/planning/src/blueprint_builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder.rs index d58d798770..ebb64c36f9 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder.rs @@ -35,6 +35,8 @@ use omicron_common::api::external::Generation; use omicron_common::api::external::IpNet; use omicron_common::api::external::MacAddr; use omicron_common::api::external::Vni; +use slog::o; +use slog::Logger; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::collections::HashSet; @@ -98,6 +100,9 @@ pub enum EnsureMultiple { /// However, the new blueprint can only be made the system's target if its /// parent is the current target. pub struct BlueprintBuilder<'a> { + #[allow(dead_code)] + log: Logger, + /// previous blueprint, on which this one will be based parent_blueprint: &'a Blueprint, internal_dns_version: Generation, @@ -186,11 +191,17 @@ impl<'a> BlueprintBuilder<'a> { /// Construct a new `BlueprintBuilder` based on a previous blueprint, /// starting with no changes from that state pub fn new_based_on( + log: &Logger, parent_blueprint: &'a Blueprint, internal_dns_version: Generation, policy: &'a Policy, creator: &str, ) -> anyhow::Result> { + let log = log.new(o!( + "component" => "BlueprintBuilder", + "parent_id" => parent_blueprint.id.to_string(), + )); + // Scan through the parent blueprint and build several sets of "used // resources". When adding new control plane zones to a sled, we may // need to allocate new resources to that zone. However, allocation at @@ -287,6 +298,7 @@ impl<'a> BlueprintBuilder<'a> { ); Ok(BlueprintBuilder { + log, parent_blueprint, internal_dns_version, policy, @@ -731,6 +743,7 @@ pub mod test { use omicron_common::address::Ipv6Subnet; use omicron_common::address::SLED_PREFIX; use omicron_common::api::external::ByteCount; + use omicron_test_utils::dev::test_setup_log; use sled_agent_client::types::{ Baseboard, Inventory, OmicronZoneConfig, OmicronZoneDataset, OmicronZoneType, OmicronZonesConfig, SledRole, @@ -945,6 +958,7 @@ pub mod test { fn test_initial() { // Test creating a blueprint from a collection and verifying that it // describes no changes. + let logctx = test_setup_log("blueprint_builder_test_initial"); let (collection, policy) = example(DEFAULT_N_SLEDS); let blueprint_initial = BlueprintBuilder::build_initial_from_collection( @@ -971,6 +985,7 @@ pub mod test { // Test a no-op blueprint. let builder = BlueprintBuilder::new_based_on( + &logctx.log, &blueprint_initial, Generation::new(), &policy, @@ -987,10 +1002,13 @@ pub mod test { assert_eq!(diff.sleds_added().count(), 0); assert_eq!(diff.sleds_removed().count(), 0); assert_eq!(diff.sleds_changed().count(), 0); + + logctx.cleanup_successful(); } #[test] fn test_basic() { + let logctx = test_setup_log("blueprint_builder_test_basic"); let (collection, mut policy) = example(DEFAULT_N_SLEDS); let blueprint1 = BlueprintBuilder::build_initial_from_collection( &collection, @@ -1002,6 +1020,7 @@ pub mod test { verify_blueprint(&blueprint1); let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, &blueprint1, Generation::new(), &policy, @@ -1036,6 +1055,7 @@ pub mod test { let new_sled_id = Uuid::new_v4(); let _ = policy_add_sled(&mut policy, new_sled_id); let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, &blueprint2, Generation::new(), &policy, @@ -1106,10 +1126,15 @@ pub mod test { }) .collect::>(); assert_eq!(crucible_pool_names, new_sled_resources.zpools); + + logctx.cleanup_successful(); } #[test] fn test_add_nexus_with_no_existing_nexus_zones() { + let logctx = test_setup_log( + "blueprint_builder_test_add_nexus_with_no_existing_nexus_zones", + ); let (mut collection, policy) = example(DEFAULT_N_SLEDS); // We don't care about the internal DNS version here. @@ -1134,6 +1159,7 @@ pub mod test { .expect("failed to create initial blueprint"); let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, &parent, internal_dns_version, &policy, @@ -1157,10 +1183,14 @@ pub mod test { matches!(err, Error::NoNexusZonesInParentBlueprint), "unexpected error {err}" ); + + logctx.cleanup_successful(); } #[test] fn test_add_nexus_error_cases() { + let logctx = + test_setup_log("blueprint_builder_test_add_nexus_error_cases"); let (mut collection, policy) = example(DEFAULT_N_SLEDS); // We don't care about the internal DNS version here. @@ -1196,6 +1226,7 @@ pub mod test { // Attempting to add Nexus to the sled we removed it from (with no // other changes to the environment) should succeed. let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, &parent, internal_dns_version, &policy, @@ -1214,6 +1245,7 @@ pub mod test { // from (with no other changes to the environment) should also // succeed. let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, &parent, internal_dns_version, &policy, @@ -1246,6 +1278,7 @@ pub mod test { policy.service_ip_pool_ranges = used_ip_ranges; let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, &parent, internal_dns_version, &policy, @@ -1269,10 +1302,16 @@ pub mod test { // `NEXUS_OPTE_*_SUBNET`. We could hack around that by creating the // `BlueprintBuilder` and mucking with its internals, but that doesn't // seem like a particularly useful test either. + + logctx.cleanup_successful(); } #[test] fn test_invalid_parent_blueprint_two_zones_with_same_external_ip() { + let logctx = test_setup_log( + "blueprint_builder_test_invalid_parent_blueprint_\ + two_zones_with_same_external_ip", + ); let (mut collection, policy) = example(DEFAULT_N_SLEDS); // We should fail if the parent blueprint claims to contain two @@ -1309,6 +1348,7 @@ pub mod test { .unwrap(); match BlueprintBuilder::new_based_on( + &logctx.log, &parent, Generation::new(), &policy, @@ -1320,10 +1360,16 @@ pub mod test { "unexpected error: {err:#}" ), }; + + logctx.cleanup_successful(); } #[test] fn test_invalid_parent_blueprint_two_nexus_zones_with_same_nic_ip() { + let logctx = test_setup_log( + "blueprint_builder_test_invalid_parent_blueprint_\ + two_nexus_zones_with_same_nic_ip", + ); let (mut collection, policy) = example(DEFAULT_N_SLEDS); // We should fail if the parent blueprint claims to contain two @@ -1358,6 +1404,7 @@ pub mod test { .unwrap(); match BlueprintBuilder::new_based_on( + &logctx.log, &parent, Generation::new(), &policy, @@ -1369,10 +1416,16 @@ pub mod test { "unexpected error: {err:#}" ), }; + + logctx.cleanup_successful(); } #[test] fn test_invalid_parent_blueprint_two_zones_with_same_vnic_mac() { + let logctx = test_setup_log( + "blueprint_builder_test_invalid_parent_blueprint_\ + two_zones_with_same_vnic_mac", + ); let (mut collection, policy) = example(DEFAULT_N_SLEDS); // We should fail if the parent blueprint claims to contain two @@ -1407,6 +1460,7 @@ pub mod test { .unwrap(); match BlueprintBuilder::new_based_on( + &logctx.log, &parent, Generation::new(), &policy, @@ -1418,5 +1472,7 @@ pub mod test { "unexpected error: {err:#}" ), }; + + logctx.cleanup_successful(); } } diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 0773fec2bf..9f4b653507 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -48,6 +48,7 @@ impl<'a> Planner<'a> { inventory: &'a Collection, ) -> anyhow::Result> { let blueprint = BlueprintBuilder::new_based_on( + &log, parent_blueprint, internal_dns_version, policy, From 9791b34127ccbcb16a170d22706cccd972532d62 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Thu, 7 Mar 2024 00:15:05 -0800 Subject: [PATCH 079/157] chore(deps): update rust crate rustls-pemfile to 2.1.1 (#5185) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 10 +++++----- Cargo.toml | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 18c783037f..461aef7c90 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1985,7 +1985,7 @@ dependencies = [ "percent-encoding", "proc-macro2", "rustls 0.22.2", - "rustls-pemfile 2.1.0", + "rustls-pemfile 2.1.1", "schemars", "serde", "serde_json", @@ -5099,7 +5099,7 @@ dependencies = [ "reqwest", "ring 0.17.8", "rustls 0.22.2", - "rustls-pemfile 2.1.0", + "rustls-pemfile 2.1.1", "samael", "schemars", "semver 1.0.22", @@ -7449,7 +7449,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" dependencies = [ "openssl-probe", - "rustls-pemfile 2.1.0", + "rustls-pemfile 2.1.1", "rustls-pki-types", "schannel", "security-framework", @@ -7466,9 +7466,9 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c333bb734fcdedcea57de1602543590f545f127dc8b533324318fd492c5c70b" +checksum = "f48172685e6ff52a556baa527774f61fcaa884f59daf3375c62a3f1cd2549dab" dependencies = [ "base64", "rustls-pki-types", diff --git a/Cargo.toml b/Cargo.toml index 474739a932..9969ee68fb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -336,7 +336,7 @@ rpassword = "7.3.1" rstest = "0.18.2" rustfmt-wrapper = "0.2" rustls = "0.22.2" -rustls-pemfile = "2.1.0" +rustls-pemfile = "2.1.1" rustyline = "13.0.0" samael = { version = "0.0.14", features = ["xmlsec"] } schemars = "0.8.16" From 57ad1045a69fc260573a9418423a8dff6bfb729e Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Thu, 7 Mar 2024 00:18:17 -0800 Subject: [PATCH 080/157] chore(deps): update rust crate http to 0.2.12 (#5200) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 58 +++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 461aef7c90..00a8fb0074 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1856,7 +1856,7 @@ dependencies = [ "dns-service-client", "dropshot", "expectorate", - "http 0.2.11", + "http 0.2.12", "omicron-test-utils", "omicron-workspace-hack", "openapi-lint", @@ -1889,7 +1889,7 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", - "http 0.2.11", + "http 0.2.12", "omicron-workspace-hack", "progenitor", "reqwest", @@ -1941,7 +1941,7 @@ dependencies = [ "anyhow", "chrono", "futures", - "http 0.2.11", + "http 0.2.12", "ipnetwork", "omicron-workspace-hack", "omicron-zone-package", @@ -1976,7 +1976,7 @@ dependencies = [ "form_urlencoded", "futures", "hostname", - "http 0.2.11", + "http 0.2.12", "hyper 0.14.27", "indexmap 2.2.5", "multer", @@ -2158,7 +2158,7 @@ dependencies = [ "async-trait", "base64", "chrono", - "http 0.2.11", + "http 0.2.12", "hyper 0.14.27", "omicron-sled-agent", "omicron-test-utils", @@ -2812,7 +2812,7 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http 0.2.11", + "http 0.2.12", "indexmap 2.2.5", "slab", "tokio", @@ -2882,7 +2882,7 @@ dependencies = [ "base64", "bytes", "headers-core", - "http 0.2.11", + "http 0.2.12", "httpdate", "mime", "sha1", @@ -2894,7 +2894,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" dependencies = [ - "http 0.2.11", + "http 0.2.12", ] [[package]] @@ -3011,9 +3011,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -3038,7 +3038,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", - "http 0.2.11", + "http 0.2.12", "pin-project-lite", ] @@ -3081,7 +3081,7 @@ dependencies = [ "crossbeam-channel", "form_urlencoded", "futures", - "http 0.2.11", + "http 0.2.12", "hyper 0.14.27", "log", "once_cell", @@ -3168,7 +3168,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.5", "httparse", "httpdate", @@ -3206,7 +3206,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http 0.2.11", + "http 0.2.12", "hyper 0.14.27", "rustls 0.21.9", "tokio", @@ -3239,7 +3239,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "318ca89e4827e7fe4ddd2824f52337239796ae8ecc761a663324407dc3d8d7e7" dependencies = [ "futures-util", - "http 0.2.11", + "http 0.2.12", "http-range", "httpdate", "hyper 0.14.27", @@ -3469,7 +3469,7 @@ dependencies = [ "futures", "hex", "hex-literal", - "http 0.2.11", + "http 0.2.12", "illumos-utils", "installinator-artifact-client", "installinator-common", @@ -4364,7 +4364,7 @@ dependencies = [ "futures", "gateway-client", "headers", - "http 0.2.11", + "http 0.2.12", "hyper 0.14.27", "hyper-rustls 0.26.0", "illumos-utils", @@ -4555,7 +4555,7 @@ dependencies = [ "gateway-messages", "gateway-test-utils", "headers", - "http 0.2.11", + "http 0.2.12", "hyper 0.14.27", "internal-dns", "nexus-config", @@ -4907,7 +4907,7 @@ dependencies = [ "expectorate", "futures", "hex", - "http 0.2.11", + "http 0.2.12", "ipnetwork", "libc", "macaddr", @@ -4984,7 +4984,7 @@ dependencies = [ "gateway-sp-comms", "gateway-test-utils", "hex", - "http 0.2.11", + "http 0.2.12", "hyper 0.14.27", "illumos-utils", "ipcc", @@ -5044,7 +5044,7 @@ dependencies = [ "gateway-test-utils", "headers", "hex", - "http 0.2.11", + "http 0.2.12", "httptest", "hubtools", "hyper 0.14.27", @@ -5273,7 +5273,7 @@ dependencies = [ "glob", "guppy", "hex", - "http 0.2.11", + "http 0.2.12", "hyper 0.14.27", "hyper-staticfile", "illumos-utils", @@ -5347,7 +5347,7 @@ dependencies = [ "filetime", "headers", "hex", - "http 0.2.11", + "http 0.2.12", "libc", "nexus-config", "omicron-common", @@ -5698,7 +5698,7 @@ dependencies = [ "base64", "chrono", "futures", - "http 0.2.11", + "http 0.2.12", "hyper 0.14.27", "omicron-workspace-hack", "progenitor", @@ -5855,7 +5855,7 @@ dependencies = [ "chrono", "dropshot", "futures", - "http 0.2.11", + "http 0.2.12", "kstat-rs", "omicron-workspace-hack", "oximeter", @@ -6599,7 +6599,7 @@ source = "git+https://github.com/oxidecomputer/progenitor?branch=main#08bbafc251 dependencies = [ "getopts", "heck 0.4.1", - "http 0.2.11", + "http 0.2.12", "indexmap 2.2.5", "openapiv3", "proc-macro2", @@ -7094,7 +7094,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.5", "hyper 0.14.27", "hyper-rustls 0.24.2", @@ -9675,7 +9675,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http 0.2.11", + "http 0.2.12", "httparse", "log", "rand 0.8.5", @@ -10401,7 +10401,7 @@ dependencies = [ "gateway-messages", "gateway-test-utils", "hex", - "http 0.2.11", + "http 0.2.12", "hubtools", "hyper 0.14.27", "illumos-utils", diff --git a/Cargo.toml b/Cargo.toml index 9969ee68fb..534c33e713 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -225,7 +225,7 @@ hex = "0.4.3" hex-literal = "0.4.1" highway = "1.1.0" hkdf = "0.12.4" -http = "0.2.11" +http = "0.2.12" httptest = "0.15.5" hubtools = { git = "https://github.com/oxidecomputer/hubtools.git", branch = "main" } humantime = "2.1.0" From 5620f9c900ebfd18dde3a9150c682f4ea3063132 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karen=20C=C3=A1rcamo?= Date: Thu, 7 Mar 2024 22:19:21 +1300 Subject: [PATCH 081/157] [sled-agent] Nexus self assembling zone (#5215) Related: https://github.com/oxidecomputer/omicron/issues/1898 Closes: https://github.com/oxidecomputer/omicron/issues/2882 --- .github/buildomat/jobs/deploy.sh | 2 +- .github/buildomat/jobs/package.sh | 6 +- .github/buildomat/jobs/tuf-repo.sh | 2 +- illumos-utils/src/running_zone.rs | 10 +- package-manifest.toml | 15 +- sled-agent/src/services.rs | 261 ++++++++++-------- smf/nexus/manifest.xml | 17 +- tufaceous/README.adoc | 4 +- .../tests/integration-tests/command_tests.rs | 6 +- 9 files changed, 196 insertions(+), 127 deletions(-) diff --git a/.github/buildomat/jobs/deploy.sh b/.github/buildomat/jobs/deploy.sh index 5e43ff7f7c..c16d523b03 100755 --- a/.github/buildomat/jobs/deploy.sh +++ b/.github/buildomat/jobs/deploy.sh @@ -161,7 +161,7 @@ cd /opt/oxide/work ptime -m tar xvzf /input/package/work/package.tar.gz cp /input/package/work/zones/* out/ -mv out/omicron-nexus-single-sled.tar.gz out/omicron-nexus.tar.gz +mv out/nexus-single-sled.tar.gz out/nexus.tar.gz mkdir tests for p in /input/ci-tools/work/end-to-end-tests/*.gz; do ptime -m gunzip < "$p" > "tests/$(basename "${p%.gz}")" diff --git a/.github/buildomat/jobs/package.sh b/.github/buildomat/jobs/package.sh index 13f374779c..dc89bc787b 100755 --- a/.github/buildomat/jobs/package.sh +++ b/.github/buildomat/jobs/package.sh @@ -84,7 +84,7 @@ stamp_packages() { # Keep the single-sled Nexus zone around for the deploy job. (The global zone # build below overwrites the file.) -mv out/omicron-nexus.tar.gz out/omicron-nexus-single-sled.tar.gz +mv out/nexus.tar.gz out/nexus-single-sled.tar.gz # Build necessary for the global zone ptime -m cargo run --locked --release --bin omicron-package -- \ @@ -115,8 +115,8 @@ zones=( out/crucible-zone.tar.gz out/external-dns.tar.gz out/internal-dns.tar.gz - out/omicron-nexus.tar.gz - out/omicron-nexus-single-sled.tar.gz + out/nexus.tar.gz + out/nexus-single-sled.tar.gz out/oximeter.tar.gz out/propolis-server.tar.gz out/switch-*.tar.gz diff --git a/.github/buildomat/jobs/tuf-repo.sh b/.github/buildomat/jobs/tuf-repo.sh index 14c2293f5b..aca43422d9 100644 --- a/.github/buildomat/jobs/tuf-repo.sh +++ b/.github/buildomat/jobs/tuf-repo.sh @@ -93,7 +93,7 @@ target/release/omicron-package -t default target create -i standard -m gimlet -s ln -s /input/package/work/zones/* out/ rm out/switch-softnpu.tar.gz # not used when target switch=asic rm out/omicron-gateway-softnpu.tar.gz # not used when target switch=asic -rm out/omicron-nexus-single-sled.tar.gz # only used for deploy tests +rm out/nexus-single-sled.tar.gz # only used for deploy tests for zone in out/*.tar.gz; do target/release/omicron-package stamp "$(basename "${zone%.tar.gz}")" "$VERSION" done diff --git a/illumos-utils/src/running_zone.rs b/illumos-utils/src/running_zone.rs index d86a27e3f7..02302347cd 100644 --- a/illumos-utils/src/running_zone.rs +++ b/illumos-utils/src/running_zone.rs @@ -404,7 +404,7 @@ impl RunningZone { /// Returns the filesystem path to the zone's root in the GZ. pub fn root(&self) -> Utf8PathBuf { - self.inner.zonepath.join(Self::ROOT_FS_PATH) + self.inner.root() } pub fn control_interface(&self) -> AddrObject { @@ -1094,6 +1094,9 @@ pub struct InstalledZone { } impl InstalledZone { + /// The path to the zone's root filesystem (i.e., `/`), within zonepath. + pub const ROOT_FS_PATH: &'static str = "root"; + /// Returns the name of a zone, based on the base zone name plus any unique /// identifying info. /// @@ -1135,6 +1138,11 @@ impl InstalledZone { pub fn opte_ports(&self) -> impl Iterator { self.opte_ports.iter().map(|(port, _)| port) } + + /// Returns the filesystem path to the zone's root in the GZ. + pub fn root(&self) -> Utf8PathBuf { + self.zonepath.join(Self::ROOT_FS_PATH) + } } #[derive(Clone)] diff --git a/package-manifest.toml b/package-manifest.toml index 5575efd285..76f52ebc35 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -97,9 +97,21 @@ source.paths = [ output.type = "zone" output.intermediate_only = true -[package.omicron-nexus] +[package.nexus] service_name = "nexus" only_for_targets.image = "standard" +source.type = "composite" +source.packages = [ + "omicron-nexus.tar.gz", + "zone-network-setup.tar.gz", + "zone-network-install.tar.gz", + "opte-interface-setup.tar.gz" +] +output.type = "zone" + +[package.omicron-nexus] +service_name = "omicron-nexus" +only_for_targets.image = "standard" source.type = "local" source.rust.binary_names = ["nexus", "schema-updater"] source.rust.release = true @@ -115,6 +127,7 @@ setup_hint = """ - Run `./tools/ci_download_console` to download the web console assets - Run `pkg install library/postgresql-13` to download Postgres libraries """ +output.intermediate_only = true [package.oximeter] service_name = "oximeter" diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index 41acf6c079..fed0114bc6 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -2038,6 +2038,153 @@ impl ServiceManager { return Ok(RunningZone::boot(installed_zone).await?); } + ZoneArgs::Omicron(OmicronZoneConfigLocal { + zone: + OmicronZoneConfig { + zone_type: + OmicronZoneType::Nexus { + internal_address, + external_tls, + external_dns_servers, + .. + }, + underlay_address, + id, + }, + .. + }) => { + let Some(info) = self.inner.sled_info.get() else { + return Err(Error::SledAgentNotReady); + }; + + let static_addr = underlay_address.to_string(); + + let nw_setup_service = Self::zone_network_setup_install( + &info, + &installed_zone, + &static_addr.clone(), + )?; + + // While Nexus will be reachable via `external_ip`, it + // communicates atop an OPTE port which operates on a + // VPC private IP. OPTE will map the private IP to the + // external IP automatically. + let opte_interface_setup = + Self::opte_interface_set_up_install(&installed_zone)?; + + let port_idx = 0; + let port = installed_zone + .opte_ports() + .nth(port_idx) + .ok_or_else(|| { + Error::ZoneEnsureAddress( + EnsureAddressError::MissingOptePort { + zone: String::from(installed_zone.name()), + port_idx, + }, + ) + })?; + let opte_ip = port.ip(); + + // Nexus takes a separate config file for parameters + // which cannot be known at packaging time. + let nexus_port = if *external_tls { 443 } else { 80 }; + let deployment_config = DeploymentConfig { + id: *id, + rack_id: info.rack_id, + techport_external_server_port: NEXUS_TECHPORT_EXTERNAL_PORT, + + dropshot_external: ConfigDropshotWithTls { + tls: *external_tls, + dropshot: dropshot::ConfigDropshot { + bind_address: SocketAddr::new(*opte_ip, nexus_port), + // This has to be large enough to support: + // - bulk writes to disks + request_body_max_bytes: 8192 * 1024, + default_handler_task_mode: + HandlerTaskMode::Detached, + }, + }, + dropshot_internal: dropshot::ConfigDropshot { + bind_address: (*internal_address).into(), + // This has to be large enough to support, among + // other things, the initial list of TLS + // certificates provided by the customer during + // rack setup. + request_body_max_bytes: 10 * 1024 * 1024, + default_handler_task_mode: HandlerTaskMode::Detached, + }, + internal_dns: nexus_config::InternalDns::FromSubnet { + subnet: Ipv6Subnet::::new( + info.underlay_address, + ), + }, + database: nexus_config::Database::FromDns, + external_dns_servers: external_dns_servers.clone(), + }; + + // Copy the partial config file to the expected + // location. + let config_dir = Utf8PathBuf::from(format!( + "{}/var/svc/manifest/site/nexus", + installed_zone.root() + )); + // The filename of a half-completed config, in need of + // parameters supplied at runtime. + const PARTIAL_LEDGER_FILENAME: &str = "config-partial.toml"; + // The filename of a completed config, merging the + // partial config with additional appended parameters + // known at runtime. + const COMPLETE_LEDGER_FILENAME: &str = "config.toml"; + let partial_config_path = + config_dir.join(PARTIAL_LEDGER_FILENAME); + let config_path = config_dir.join(COMPLETE_LEDGER_FILENAME); + tokio::fs::copy(partial_config_path, &config_path) + .await + .map_err(|err| Error::io_path(&config_path, err))?; + + // Serialize the configuration and append it into the + // file. + let serialized_cfg = toml::Value::try_from(&deployment_config) + .expect("Cannot serialize config"); + let mut map = toml::map::Map::new(); + map.insert("deployment".to_string(), serialized_cfg); + let config_str = toml::to_string(&map).map_err(|err| { + Error::TomlSerialize { path: config_path.clone(), err } + })?; + let mut file = tokio::fs::OpenOptions::new() + .append(true) + .open(&config_path) + .await + .map_err(|err| Error::io_path(&config_path, err))?; + file.write_all(b"\n\n") + .await + .map_err(|err| Error::io_path(&config_path, err))?; + file.write_all(config_str.as_bytes()) + .await + .map_err(|err| Error::io_path(&config_path, err))?; + + let nexus_config = PropertyGroupBuilder::new("config"); + let nexus_service = ServiceBuilder::new("oxide/nexus") + .add_instance( + ServiceInstanceBuilder::new("default") + .add_property_group(nexus_config), + ); + + let profile = ProfileBuilder::new("omicron") + .add_service(nw_setup_service) + .add_service(opte_interface_setup) + .add_service(disabled_ssh_service) + .add_service(nexus_service) + .add_service(disabled_dns_client_service); + profile + .add_to_zone(&self.inner.log, &installed_zone) + .await + .map_err(|err| { + Error::io("Failed to setup Nexus profile", err) + })?; + return Ok(RunningZone::boot(installed_zone).await?); + } _ => {} } @@ -2165,119 +2312,6 @@ impl ServiceManager { smfh.import_manifest()?; match &zone_config.zone.zone_type { - OmicronZoneType::Nexus { - internal_address, - external_tls, - external_dns_servers, - .. - } => { - info!(self.inner.log, "Setting up Nexus service"); - - let sled_info = self - .inner - .sled_info - .get() - .ok_or(Error::SledAgentNotReady)?; - - // While Nexus will be reachable via `external_ip`, it - // communicates atop an OPTE port which operates on a - // VPC private IP. OPTE will map the private IP to the - // external IP automatically. - let port_ip = running_zone - .ensure_address_for_port("public", 0) - .await? - .ip(); - - // Nexus takes a separate config file for parameters - // which cannot be known at packaging time. - let nexus_port = if *external_tls { 443 } else { 80 }; - let deployment_config = DeploymentConfig { - id: zone_config.zone.id, - rack_id: sled_info.rack_id, - techport_external_server_port: - NEXUS_TECHPORT_EXTERNAL_PORT, - - dropshot_external: ConfigDropshotWithTls { - tls: *external_tls, - dropshot: dropshot::ConfigDropshot { - bind_address: SocketAddr::new( - port_ip, nexus_port, - ), - // This has to be large enough to support: - // - bulk writes to disks - request_body_max_bytes: 8192 * 1024, - default_handler_task_mode: - HandlerTaskMode::Detached, - }, - }, - dropshot_internal: dropshot::ConfigDropshot { - bind_address: (*internal_address).into(), - // This has to be large enough to support, among - // other things, the initial list of TLS - // certificates provided by the customer during - // rack setup. - request_body_max_bytes: 10 * 1024 * 1024, - default_handler_task_mode: - HandlerTaskMode::Detached, - }, - internal_dns: - nexus_config::InternalDns::FromSubnet { - subnet: Ipv6Subnet::::new( - sled_info.underlay_address, - ), - }, - database: nexus_config::Database::FromDns, - external_dns_servers: external_dns_servers.clone(), - }; - - // Copy the partial config file to the expected - // location. - let config_dir = Utf8PathBuf::from(format!( - "{}/var/svc/manifest/site/nexus", - running_zone.root() - )); - // The filename of a half-completed config, in need of - // parameters supplied at runtime. - const PARTIAL_LEDGER_FILENAME: &str = - "config-partial.toml"; - // The filename of a completed config, merging the - // partial config with additional appended parameters - // known at runtime. - const COMPLETE_LEDGER_FILENAME: &str = "config.toml"; - let partial_config_path = - config_dir.join(PARTIAL_LEDGER_FILENAME); - let config_path = - config_dir.join(COMPLETE_LEDGER_FILENAME); - tokio::fs::copy(partial_config_path, &config_path) - .await - .map_err(|err| Error::io_path(&config_path, err))?; - - // Serialize the configuration and append it into the - // file. - let serialized_cfg = - toml::Value::try_from(&deployment_config) - .expect("Cannot serialize config"); - let mut map = toml::map::Map::new(); - map.insert("deployment".to_string(), serialized_cfg); - let config_str = - toml::to_string(&map).map_err(|err| { - Error::TomlSerialize { - path: config_path.clone(), - err, - } - })?; - let mut file = tokio::fs::OpenOptions::new() - .append(true) - .open(&config_path) - .await - .map_err(|err| Error::io_path(&config_path, err))?; - file.write_all(b"\n\n") - .await - .map_err(|err| Error::io_path(&config_path, err))?; - file.write_all(config_str.as_bytes()) - .await - .map_err(|err| Error::io_path(&config_path, err))?; - } OmicronZoneType::InternalDns { http_address, dns_address, @@ -2360,6 +2394,7 @@ impl ServiceManager { | OmicronZoneType::CruciblePantry { .. } | OmicronZoneType::ExternalDns { .. } | OmicronZoneType::InternalNtp { .. } + | OmicronZoneType::Nexus { .. } | OmicronZoneType::Oximeter { .. } => { panic!( "{} is a service which exists as part of a \ diff --git a/smf/nexus/manifest.xml b/smf/nexus/manifest.xml index 5a72df8a22..7f1a3e76ff 100644 --- a/smf/nexus/manifest.xml +++ b/smf/nexus/manifest.xml @@ -4,28 +4,41 @@ - + + + + + + + + + + + + diff --git a/tufaceous/README.adoc b/tufaceous/README.adoc index 973a37aa90..86e4cfc4e5 100644 --- a/tufaceous/README.adoc +++ b/tufaceous/README.adoc @@ -31,6 +31,6 @@ tuftool [-r PATH/TO/REPO] add-zone [--name NAME] ZONE_TAR_GZ VERSION Example: ---- -$ tuftool add-zone out/omicron-nexus.tar.gz 0.0.0 -added zone omicron-nexus, version 0.0.0 +$ tuftool add-zone out/nexus.tar.gz 0.0.0 +added zone nexus, version 0.0.0 ---- diff --git a/tufaceous/tests/integration-tests/command_tests.rs b/tufaceous/tests/integration-tests/command_tests.rs index 72c3a1a13a..20c6a06d66 100644 --- a/tufaceous/tests/integration-tests/command_tests.rs +++ b/tufaceous/tests/integration-tests/command_tests.rs @@ -25,7 +25,7 @@ async fn test_init_and_add() -> Result<()> { cmd.assert().success(); // Create a couple of stub files on disk. - let nexus_path = tempdir.path().join("omicron-nexus.tar.gz"); + let nexus_path = tempdir.path().join("nexus.tar.gz"); fs_err::write(&nexus_path, "test")?; let unknown_path = tempdir.path().join("my-unknown-kind.tar.gz"); fs_err::write(&unknown_path, "unknown test")?; @@ -65,7 +65,7 @@ async fn test_init_and_add() -> Result<()> { let mut artifacts_iter = artifacts.artifacts.into_iter(); let artifact = artifacts_iter.next().unwrap(); - assert_eq!(artifact.name, "omicron-nexus", "artifact name"); + assert_eq!(artifact.name, "nexus", "artifact name"); assert_eq!(artifact.version, "42.0.0".parse().unwrap(), "artifact version"); assert_eq!( artifact.kind, @@ -73,7 +73,7 @@ async fn test_init_and_add() -> Result<()> { "artifact kind" ); assert_eq!( - artifact.target, "gimlet_sp-omicron-nexus-42.0.0.tar.gz", + artifact.target, "gimlet_sp-nexus-42.0.0.tar.gz", "artifact target" ); From bcad9a301d74d3bf1ad13130a58776bfdc708ba5 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Thu, 7 Mar 2024 09:57:16 +0000 Subject: [PATCH 082/157] chore(deps): update taiki-e/install-action digest to 4978b3c (#5216) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`dabb9c1` -> `4978b3c`](https://togithub.com/taiki-e/install-action/compare/dabb9c1...4978b3c) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. â™» **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index cac987b277..557cfedbd3 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@dabb9c1ee51c21c545764a0d517f069ff52e6477 # v2 + uses: taiki-e/install-action@4978b3c8549425a60eb2d68446d0b58cef63ad0a # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From d91a7b2faf7be43609a1f9c81f895678a5cbf912 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Thu, 7 Mar 2024 04:06:36 -0600 Subject: [PATCH 083/157] Bump web console (floating IP edit, fix button styles) (#5206) https://github.com/oxidecomputer/console/compare/269c2c82...784e8aac * [784e8aac](https://github.com/oxidecomputer/console/commit/784e8aac) oxidecomputer/console#2035 * [da07ce01](https://github.com/oxidecomputer/console/commit/da07ce01) oxidecomputer/console#2039 * [04b56b02](https://github.com/oxidecomputer/console/commit/04b56b02) oxidecomputer/console#2037 * [55a72c86](https://github.com/oxidecomputer/console/commit/55a72c86) oxidecomputer/console#2036 * [f0cf2be5](https://github.com/oxidecomputer/console/commit/f0cf2be5) oxidecomputer/console#2033 * [3ad46af4](https://github.com/oxidecomputer/console/commit/3ad46af4) oxidecomputer/console#2032 * [cd1bff5c](https://github.com/oxidecomputer/console/commit/cd1bff5c) bump playwright and react query and stuff * [08bd3fe5](https://github.com/oxidecomputer/console/commit/08bd3fe5) oxidecomputer/console#2030 * [02701738](https://github.com/oxidecomputer/console/commit/02701738) forgot to delete app/ui/index.ts --- tools/console_version | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/console_version b/tools/console_version index 24d16b17eb..b3ccaf80b8 100644 --- a/tools/console_version +++ b/tools/console_version @@ -1,2 +1,2 @@ -COMMIT="269c2c82018d09d442a932474d84366a54837068" -SHA2="7a6a04f43c6189384065675ebb7c429de2b8369ffbb7c1989a13aafe779be2d6" +COMMIT="784e8aac273dc282b6218994d09042aa5928a198" +SHA2="b5afd1243e40e86d3a779f089fe61ed9cb3fbbd6d59e547e522a59c1b0e4b7d8" From e2e70722c23f5866b5516c94fa70b2fa4ccedc53 Mon Sep 17 00:00:00 2001 From: Kyle Simpson Date: Thu, 7 Mar 2024 10:08:50 +0000 Subject: [PATCH 084/157] Chore: Bump OPTE to `b85995f` (#5205) Includes fixes for panics on bad packets, and reduces the performance impact of handling non-OPTE underlay packets. --- .github/buildomat/jobs/deploy.sh | 13 ------------ Cargo.lock | 35 ++++++++++++++++++++++++++------ Cargo.toml | 4 ++-- tools/opte_version | 2 +- 4 files changed, 32 insertions(+), 22 deletions(-) diff --git a/.github/buildomat/jobs/deploy.sh b/.github/buildomat/jobs/deploy.sh index c16d523b03..a2957d35b4 100755 --- a/.github/buildomat/jobs/deploy.sh +++ b/.github/buildomat/jobs/deploy.sh @@ -102,19 +102,6 @@ z_swadm () { pfexec zlogin oxz_switch /opt/oxide/dendrite/bin/swadm $@ } -# XXX remove. This is just to test against a development branch of OPTE in CI. -set +x -OPTE_COMMIT="73d4669ea213d0b7aca35c4babb6fd09ed51d29e" -curl -sSfOL https://buildomat.eng.oxide.computer/public/file/oxidecomputer/opte/module/$OPTE_COMMIT/xde -pfexec rem_drv xde || true -pfexec mv xde /kernel/drv/amd64/xde -pfexec add_drv xde || true -curl -sSfOL https://buildomat.eng.oxide.computer/wg/0/artefact/01HM09S4M15WNXB2B2MX8R1GBT/yLalJU5vT4S4IEpwSeY4hPuspxw3JcINokZmlfNU14npHkzG/01HM09SJ2RQSFGW7MVKC9JKZ8D/01HM0A58D888AJ7YP6N1Q6T6ZD/opteadm -chmod +x opteadm -cp opteadm /tmp/opteadm -pfexec mv opteadm /opt/oxide/opte/bin/opteadm -set -x - # # XXX work around 14537 (UFS should not allow directories to be unlinked) which # is probably not yet fixed in xde branch? Once the xde branch merges from diff --git a/Cargo.lock b/Cargo.lock index 00a8fb0074..7471aa2dc4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1708,6 +1708,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derror-macro" +version = "0.1.0" +source = "git+https://github.com/oxidecomputer/opte?rev=b85995f92ae94cdc78b97b0a610c69e103e00423#b85995f92ae94cdc78b97b0a610c69e103e00423" +dependencies = [ + "darling 0.20.3", + "proc-macro2", + "quote", + "syn 2.0.51", +] + [[package]] name = "diesel" version = "2.1.4" @@ -3347,7 +3358,7 @@ dependencies = [ [[package]] name = "illumos-sys-hdrs" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=1d29ef60a18179babfb44f0f7a3c2fe71034a2c1#1d29ef60a18179babfb44f0f7a3c2fe71034a2c1" +source = "git+https://github.com/oxidecomputer/opte?rev=b85995f92ae94cdc78b97b0a610c69e103e00423#b85995f92ae94cdc78b97b0a610c69e103e00423" [[package]] name = "illumos-utils" @@ -3736,7 +3747,7 @@ dependencies = [ [[package]] name = "kstat-macro" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=1d29ef60a18179babfb44f0f7a3c2fe71034a2c1#1d29ef60a18179babfb44f0f7a3c2fe71034a2c1" +source = "git+https://github.com/oxidecomputer/opte?rev=b85995f92ae94cdc78b97b0a610c69e103e00423#b85995f92ae94cdc78b97b0a610c69e103e00423" dependencies = [ "quote", "syn 2.0.51", @@ -5614,9 +5625,10 @@ dependencies = [ [[package]] name = "opte" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=1d29ef60a18179babfb44f0f7a3c2fe71034a2c1#1d29ef60a18179babfb44f0f7a3c2fe71034a2c1" +source = "git+https://github.com/oxidecomputer/opte?rev=b85995f92ae94cdc78b97b0a610c69e103e00423#b85995f92ae94cdc78b97b0a610c69e103e00423" dependencies = [ "cfg-if", + "derror-macro", "dyn-clone", "illumos-sys-hdrs", "kstat-macro", @@ -5624,13 +5636,14 @@ dependencies = [ "postcard", "serde", "smoltcp 0.11.0", + "tabwriter", "version_check", ] [[package]] name = "opte-api" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=1d29ef60a18179babfb44f0f7a3c2fe71034a2c1#1d29ef60a18179babfb44f0f7a3c2fe71034a2c1" +source = "git+https://github.com/oxidecomputer/opte?rev=b85995f92ae94cdc78b97b0a610c69e103e00423#b85995f92ae94cdc78b97b0a610c69e103e00423" dependencies = [ "illumos-sys-hdrs", "ipnetwork", @@ -5642,7 +5655,7 @@ dependencies = [ [[package]] name = "opte-ioctl" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=1d29ef60a18179babfb44f0f7a3c2fe71034a2c1#1d29ef60a18179babfb44f0f7a3c2fe71034a2c1" +source = "git+https://github.com/oxidecomputer/opte?rev=b85995f92ae94cdc78b97b0a610c69e103e00423#b85995f92ae94cdc78b97b0a610c69e103e00423" dependencies = [ "libc", "libnet", @@ -5716,7 +5729,7 @@ dependencies = [ [[package]] name = "oxide-vpc" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=1d29ef60a18179babfb44f0f7a3c2fe71034a2c1#1d29ef60a18179babfb44f0f7a3c2fe71034a2c1" +source = "git+https://github.com/oxidecomputer/opte?rev=b85995f92ae94cdc78b97b0a610c69e103e00423#b85995f92ae94cdc78b97b0a610c69e103e00423" dependencies = [ "cfg-if", "illumos-sys-hdrs", @@ -5724,6 +5737,7 @@ dependencies = [ "poptrie", "serde", "smoltcp 0.11.0", + "tabwriter", "zerocopy 0.7.32", ] @@ -8862,6 +8876,15 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "tabwriter" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a327282c4f64f6dc37e3bba4c2b6842cc3a992f204fa58d917696a89f691e5f6" +dependencies = [ + "unicode-width", +] + [[package]] name = "take_mut" version = "0.2.2" diff --git a/Cargo.toml b/Cargo.toml index 534c33e713..9cd22cbbcc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -283,7 +283,7 @@ omicron-sled-agent = { path = "sled-agent" } omicron-test-utils = { path = "test-utils" } omicron-zone-package = "0.11.0" oxide-client = { path = "clients/oxide-client" } -oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "1d29ef60a18179babfb44f0f7a3c2fe71034a2c1", features = [ "api", "std" ] } +oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "b85995f92ae94cdc78b97b0a610c69e103e00423", features = [ "api", "std" ] } once_cell = "1.19.0" openapi-lint = { git = "https://github.com/oxidecomputer/openapi-lint", branch = "main" } openapiv3 = "2.0.0" @@ -291,7 +291,7 @@ openapiv3 = "2.0.0" openssl = "0.10" openssl-sys = "0.9" openssl-probe = "0.1.5" -opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "1d29ef60a18179babfb44f0f7a3c2fe71034a2c1" } +opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "b85995f92ae94cdc78b97b0a610c69e103e00423" } oso = "0.27" owo-colors = "4.0.0" oximeter = { path = "oximeter/oximeter" } diff --git a/tools/opte_version b/tools/opte_version index 0a04873e11..23d9da8fa3 100644 --- a/tools/opte_version +++ b/tools/opte_version @@ -1 +1 @@ -0.28.215 +0.28.227 From dcdf9bb25112bd2740bfda31922bfe62db8b9f72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karen=20C=C3=A1rcamo?= Date: Fri, 8 Mar 2024 00:09:57 +1300 Subject: [PATCH 085/157] [sled-agent] Internal DNS self assembling zone (#5213) Closes: https://github.com/oxidecomputer/omicron/issues/2880 Related: https://github.com/oxidecomputer/omicron/issues/1898 --- package-manifest.toml | 7 +- sled-agent/src/services.rs | 189 +++++++++++++++++----------------- smf/internal-dns/manifest.xml | 7 +- smf/nexus/manifest.xml | 1 - 4 files changed, 104 insertions(+), 100 deletions(-) diff --git a/package-manifest.toml b/package-manifest.toml index 76f52ebc35..c6f39d2ecd 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -241,7 +241,12 @@ output.intermediate_only = true service_name = "internal_dns" only_for_targets.image = "standard" source.type = "composite" -source.packages = [ "dns-server.tar.gz", "internal-dns-customizations.tar.gz" ] +source.packages = [ + "dns-server.tar.gz", + "internal-dns-customizations.tar.gz", + "zone-network-setup.tar.gz", + "zone-network-install.tar.gz" +] output.type = "zone" [package.external-dns] diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index fed0114bc6..9273e0ef3b 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -1400,12 +1400,12 @@ impl ServiceManager { } fn zone_network_setup_install( - info: &SledAgentInfo, + gw_addr: &Ipv6Addr, zone: &InstalledZone, static_addr: &String, ) -> Result { let datalink = zone.get_control_vnic_name(); - let gateway = &info.underlay_address.to_string(); + let gateway = &gw_addr.to_string(); let mut config_builder = PropertyGroupBuilder::new("config"); config_builder = config_builder @@ -1575,7 +1575,7 @@ impl ServiceManager { let listen_port = &CLICKHOUSE_PORT.to_string(); let nw_setup_service = Self::zone_network_setup_install( - info, + &info.underlay_address, &installed_zone, listen_addr, )?; @@ -1624,7 +1624,7 @@ impl ServiceManager { let listen_port = &CLICKHOUSE_KEEPER_PORT.to_string(); let nw_setup_service = Self::zone_network_setup_install( - info, + &info.underlay_address, &installed_zone, listen_addr, )?; @@ -1680,7 +1680,7 @@ impl ServiceManager { let listen_port = &address.port().to_string(); let nw_setup_service = Self::zone_network_setup_install( - info, + &info.underlay_address, &installed_zone, listen_addr, )?; @@ -1729,7 +1729,7 @@ impl ServiceManager { let listen_port = &CRUCIBLE_PORT.to_string(); let nw_setup_service = Self::zone_network_setup_install( - info, + &info.underlay_address, &installed_zone, listen_addr, )?; @@ -1784,7 +1784,7 @@ impl ServiceManager { let listen_port = &CRUCIBLE_PANTRY_PORT.to_string(); let nw_setup_service = Self::zone_network_setup_install( - info, + &info.underlay_address, &installed_zone, listen_addr, )?; @@ -1834,7 +1834,7 @@ impl ServiceManager { let listen_addr = &address.ip().to_string(); let nw_setup_service = Self::zone_network_setup_install( - info, + &info.underlay_address, &installed_zone, listen_addr, )?; @@ -1877,7 +1877,7 @@ impl ServiceManager { let static_addr = underlay_address.to_string(); let nw_setup_service = Self::zone_network_setup_install( - info, + &info.underlay_address, &installed_zone, &static_addr.clone(), )?; @@ -1966,7 +1966,7 @@ impl ServiceManager { let static_addr = underlay_address.to_string(); let nw_setup_service = Self::zone_network_setup_install( - info, + &info.underlay_address, &installed_zone, &static_addr.clone(), )?; @@ -2038,6 +2038,86 @@ impl ServiceManager { return Ok(RunningZone::boot(installed_zone).await?); } + ZoneArgs::Omicron(OmicronZoneConfigLocal { + zone: + OmicronZoneConfig { + zone_type: + OmicronZoneType::InternalDns { + http_address, + dns_address, + gz_address, + gz_address_index, + .. + }, + underlay_address, + .. + }, + .. + }) => { + let nw_setup_service = Self::zone_network_setup_install( + gz_address, + &installed_zone, + &underlay_address.to_string(), + )?; + + // Internal DNS zones require a special route through + // the global zone, since they are not on the same part + // of the underlay as most other services on this sled + // (the sled's subnet). + // + // We create an IP address in the dedicated portion of + // the underlay used for internal DNS servers, but we + // *also* add a number ("which DNS server is this") to + // ensure these addresses are given unique names. In the + // unlikely case that two internal DNS servers end up on + // the same machine (which is effectively a + // developer-only environment -- we wouldn't want this + // in prod!), they need to be given distinct names. + let addr_name = format!("internaldns{gz_address_index}"); + Zones::ensure_has_global_zone_v6_address( + self.inner.underlay_vnic.clone(), + *gz_address, + &addr_name, + ) + .map_err(|err| Error::GzAddress { + message: format!( + "Failed to create address {} for Internal DNS zone", + addr_name + ), + err, + })?; + + // If this address is in a new ipv6 prefix, notify + // maghemite so it can advertise it to other sleds. + self.advertise_prefix_of_address(*gz_address).await; + + let http_addr = + format!("[{}]:{}", http_address.ip(), http_address.port()); + let dns_addr = + format!("[{}]:{}", dns_address.ip(), dns_address.port()); + + let internal_dns_config = PropertyGroupBuilder::new("config") + .add_property("http_address", "astring", &http_addr) + .add_property("dns_address", "astring", &dns_addr); + let internal_dns_service = + ServiceBuilder::new("oxide/internal_dns").add_instance( + ServiceInstanceBuilder::new("default") + .add_property_group(internal_dns_config), + ); + + let profile = ProfileBuilder::new("omicron") + .add_service(nw_setup_service) + .add_service(disabled_ssh_service) + .add_service(internal_dns_service) + .add_service(disabled_dns_client_service); + profile + .add_to_zone(&self.inner.log, &installed_zone) + .await + .map_err(|err| { + Error::io("Failed to setup Internal DNS profile", err) + })?; + return Ok(RunningZone::boot(installed_zone).await?); + } ZoneArgs::Omicron(OmicronZoneConfigLocal { zone: OmicronZoneConfig { @@ -2060,7 +2140,7 @@ impl ServiceManager { let static_addr = underlay_address.to_string(); let nw_setup_service = Self::zone_network_setup_install( - &info, + &info.underlay_address, &installed_zone, &static_addr.clone(), )?; @@ -2302,90 +2382,7 @@ impl ServiceManager { match &request { ZoneArgs::Omicron(zone_config) => { - // TODO: Related to - // https://github.com/oxidecomputer/omicron/pull/1124 , should we - // avoid importing this manifest? - debug!(self.inner.log, "importing manifest"); - - let smfh = - SmfHelper::new(&running_zone, &zone_config.zone.zone_type); - smfh.import_manifest()?; - match &zone_config.zone.zone_type { - OmicronZoneType::InternalDns { - http_address, - dns_address, - gz_address, - gz_address_index, - .. - } => { - info!( - self.inner.log, - "Setting up internal-dns service" - ); - - // Internal DNS zones require a special route through - // the global zone, since they are not on the same part - // of the underlay as most other services on this sled - // (the sled's subnet). - // - // We create an IP address in the dedicated portion of - // the underlay used for internal DNS servers, but we - // *also* add a number ("which DNS server is this") to - // ensure these addresses are given unique names. In the - // unlikely case that two internal DNS servers end up on - // the same machine (which is effectively a - // developer-only environment -- we wouldn't want this - // in prod!), they need to be given distinct names. - let addr_name = - format!("internaldns{gz_address_index}"); - Zones::ensure_has_global_zone_v6_address( - self.inner.underlay_vnic.clone(), - *gz_address, - &addr_name, - ) - .map_err(|err| { - Error::GzAddress { - message: format!( - "Failed to create address {} for Internal DNS zone", - addr_name - ), - err, - } - })?; - // If this address is in a new ipv6 prefix, notify - // maghemite so it can advertise it to other sleds. - self.advertise_prefix_of_address(*gz_address).await; - - running_zone.add_default_route(*gz_address).map_err( - |err| Error::ZoneCommand { - intent: "Adding Route".to_string(), - err, - }, - )?; - - smfh.setprop( - "config/http_address", - format!( - "[{}]:{}", - http_address.ip(), - http_address.port(), - ), - )?; - smfh.setprop( - "config/dns_address", - &format!( - "[{}]:{}", - dns_address.ip(), - dns_address.port(), - ), - )?; - - // Refresh the manifest with the new properties we set, - // so they become "effective" properties when the - // service is enabled. - smfh.refresh()?; - } OmicronZoneType::BoundaryNtp { .. } | OmicronZoneType::Clickhouse { .. } | OmicronZoneType::ClickhouseKeeper { .. } @@ -2393,6 +2390,7 @@ impl ServiceManager { | OmicronZoneType::Crucible { .. } | OmicronZoneType::CruciblePantry { .. } | OmicronZoneType::ExternalDns { .. } + | OmicronZoneType::InternalDns { .. } | OmicronZoneType::InternalNtp { .. } | OmicronZoneType::Nexus { .. } | OmicronZoneType::Oximeter { .. } => { @@ -2403,9 +2401,6 @@ impl ServiceManager { ) } }; - - debug!(self.inner.log, "enabling service"); - smfh.enable()?; } ZoneArgs::Switch(request) => { for service in &request.zone.services { diff --git a/smf/internal-dns/manifest.xml b/smf/internal-dns/manifest.xml index 213f861b43..07cedc4ad5 100644 --- a/smf/internal-dns/manifest.xml +++ b/smf/internal-dns/manifest.xml @@ -4,13 +4,18 @@ - + + + + + diff --git a/smf/nexus/manifest.xml b/smf/nexus/manifest.xml index 7f1a3e76ff..0f43079cb9 100644 --- a/smf/nexus/manifest.xml +++ b/smf/nexus/manifest.xml @@ -14,7 +14,6 @@ - Date: Thu, 7 Mar 2024 11:20:43 -0500 Subject: [PATCH 086/157] Expose uninitialized sleds and "add sled" via internal API and omdb (#5204) This will let us drive the add sled process from a support context. (Prior to this change adding a sled required interaction with the external API.) I want to test this before merging, but it's small enough that if there's anything that needs tweaking it's unlikely to affect any review comments. One change is that we had two `Baseboard` structs that were identical except for a minor different in field names (`{serial,part}` vs `{serial_number,part_number}`). I removed the internal one and replaced it with the external one to avoid unnecessary churn in the public API, but if I'm missing something intentional that led to having both, please say so. Closes #5133. --- dev-tools/omdb/src/bin/omdb/main.rs | 13 +++ dev-tools/omdb/src/bin/omdb/nexus.rs | 111 ++++++++++++++++++- dev-tools/omdb/tests/usage_errors.out | 7 +- nexus/src/app/sled.rs | 4 +- nexus/src/app/switch.rs | 4 +- nexus/src/internal_api/http_entrypoints.rs | 47 ++++++++ nexus/test-utils/src/resource_helpers.rs | 6 +- nexus/tests/integration_tests/rack.rs | 7 +- nexus/types/src/internal_api/params.rs | 10 +- openapi/nexus-internal.json | 121 ++++++++++++++++++++- sled-agent/src/nexus.rs | 4 +- sled-agent/src/sim/server.rs | 4 +- 12 files changed, 302 insertions(+), 36 deletions(-) diff --git a/dev-tools/omdb/src/bin/omdb/main.rs b/dev-tools/omdb/src/bin/omdb/main.rs index 32141d2809..0fb9ba0121 100644 --- a/dev-tools/omdb/src/bin/omdb/main.rs +++ b/dev-tools/omdb/src/bin/omdb/main.rs @@ -84,11 +84,24 @@ struct Omdb { #[arg(env = "OMDB_DNS_SERVER", long)] dns_server: Option, + /// allow potentially-destructive subcommands + #[arg(short = 'w', long = "destructive")] + allow_destructive: bool, + #[command(subcommand)] command: OmdbCommands, } impl Omdb { + fn check_allow_destructive(&self) -> anyhow::Result<()> { + anyhow::ensure!( + self.allow_destructive, + "This command is potentially destructive. \ + Pass the `-w` / `--destructive` flag to allow it." + ); + Ok(()) + } + async fn dns_lookup_all( &self, log: slog::Logger, diff --git a/dev-tools/omdb/src/bin/omdb/nexus.rs b/dev-tools/omdb/src/bin/omdb/nexus.rs index aed7d86ba0..03d6fd6e80 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus.rs @@ -16,6 +16,7 @@ use nexus_client::types::ActivationReason; use nexus_client::types::BackgroundTask; use nexus_client::types::CurrentStatus; use nexus_client::types::LastResult; +use nexus_client::types::UninitializedSledId; use serde::Deserialize; use slog_error_chain::InlineErrorChain; use std::collections::BTreeMap; @@ -38,8 +39,10 @@ pub struct NexusArgs { enum NexusCommands { /// print information about background tasks BackgroundTasks(BackgroundTasksArgs), - /// print information about blueprints + /// interact with blueprints Blueprints(BlueprintsArgs), + /// interact with sleds + Sleds(SledsArgs), } #[derive(Debug, Args)] @@ -116,6 +119,28 @@ enum BlueprintTargetCommands { Set(BlueprintIdArgs), } +#[derive(Debug, Args)] +struct SledsArgs { + #[command(subcommand)] + command: SledsCommands, +} + +#[derive(Debug, Subcommand)] +enum SledsCommands { + /// List all uninitialized sleds + ListUninitialized, + /// Add an uninitialized sled + Add(SledAddArgs), +} + +#[derive(Debug, Args)] +struct SledAddArgs { + /// sled's serial number + serial: String, + /// sled's part number + part: String, +} + impl NexusArgs { /// Run a `omdb nexus` subcommand. pub(crate) async fn run_cmd( @@ -167,7 +192,10 @@ impl NexusArgs { }) => cmd_nexus_blueprints_diff(&client, args).await, NexusCommands::Blueprints(BlueprintsArgs { command: BlueprintsCommands::Delete(args), - }) => cmd_nexus_blueprints_delete(&client, args).await, + }) => { + omdb.check_allow_destructive()?; + cmd_nexus_blueprints_delete(&client, args).await + } NexusCommands::Blueprints(BlueprintsArgs { command: BlueprintsCommands::Target(BlueprintsTargetArgs { @@ -179,16 +207,33 @@ impl NexusArgs { BlueprintsCommands::Target(BlueprintsTargetArgs { command: BlueprintTargetCommands::Set(args), }), - }) => cmd_nexus_blueprints_target_set(&client, args).await, + }) => { + omdb.check_allow_destructive()?; + cmd_nexus_blueprints_target_set(&client, args).await + } NexusCommands::Blueprints(BlueprintsArgs { command: BlueprintsCommands::Regenerate, - }) => cmd_nexus_blueprints_regenerate(&client).await, + }) => { + omdb.check_allow_destructive()?; + cmd_nexus_blueprints_regenerate(&client).await + } NexusCommands::Blueprints(BlueprintsArgs { command: BlueprintsCommands::GenerateFromCollection(args), }) => { + omdb.check_allow_destructive()?; cmd_nexus_blueprints_generate_from_collection(&client, args) .await } + + NexusCommands::Sleds(SledsArgs { + command: SledsCommands::ListUninitialized, + }) => cmd_nexus_sleds_list_uninitialized(&client).await, + NexusCommands::Sleds(SledsArgs { + command: SledsCommands::Add(args), + }) => { + omdb.check_allow_destructive()?; + cmd_nexus_sled_add(&client, args).await + } } } } @@ -946,3 +991,61 @@ async fn cmd_nexus_blueprints_regenerate( eprintln!("generated new blueprint {}", blueprint.id); Ok(()) } + +/// Runs `omdb nexus sleds list-uninitialized` +async fn cmd_nexus_sleds_list_uninitialized( + client: &nexus_client::Client, +) -> Result<(), anyhow::Error> { + let response = client + .sled_list_uninitialized() + .await + .context("listing uninitialized sleds")?; + let sleds = response.into_inner(); + if sleds.next_page.is_some() { + eprintln!( + "warning: response includes next_page token; \ + pagination not implemented" + ); + } + let mut sleds = sleds.items; + sleds.sort_by_key(|sled| sled.cubby); + + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct UninitializedSledRow { + rack_id: Uuid, + cubby: u16, + serial: String, + part: String, + revision: i64, + } + let rows = sleds.into_iter().map(|sled| UninitializedSledRow { + rack_id: sled.rack_id, + cubby: sled.cubby, + serial: sled.baseboard.serial, + part: sled.baseboard.part, + revision: sled.baseboard.revision, + }); + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + println!("{}", table); + Ok(()) +} + +/// Runs `omdb nexus sleds add` +async fn cmd_nexus_sled_add( + client: &nexus_client::Client, + args: &SledAddArgs, +) -> Result<(), anyhow::Error> { + client + .sled_add(&UninitializedSledId { + part: args.part.clone(), + serial: args.serial.clone(), + }) + .await + .context("adding sled")?; + eprintln!("added sled {} ({})", args.serial, args.part); + Ok(()) +} diff --git a/dev-tools/omdb/tests/usage_errors.out b/dev-tools/omdb/tests/usage_errors.out index c10f95e23d..2f9001671d 100644 --- a/dev-tools/omdb/tests/usage_errors.out +++ b/dev-tools/omdb/tests/usage_errors.out @@ -19,6 +19,7 @@ Commands: Options: --log-level log level filter [env: LOG_LEVEL=] [default: warn] --dns-server [env: OMDB_DNS_SERVER=] + -w, --destructive allow potentially-destructive subcommands -h, --help Print help (see more with '--help') ============================================= EXECUTING COMMAND: omdb ["--help"] @@ -50,6 +51,9 @@ Options: --dns-server [env: OMDB_DNS_SERVER=] + -w, --destructive + allow potentially-destructive subcommands + -h, --help Print help (see a summary with '-h') --------------------------------------------- @@ -294,7 +298,8 @@ Usage: omdb nexus [OPTIONS] Commands: background-tasks print information about background tasks - blueprints print information about blueprints + blueprints interact with blueprints + sleds interact with sleds help Print this message or the help of the given subcommand(s) Options: diff --git a/nexus/src/app/sled.rs b/nexus/src/app/sled.rs index 88955d78e9..a9341472e0 100644 --- a/nexus/src/app/sled.rs +++ b/nexus/src/app/sled.rs @@ -58,8 +58,8 @@ impl super::Nexus { id, info.sa_address, db::model::SledBaseboard { - serial_number: info.baseboard.serial_number, - part_number: info.baseboard.part_number, + serial_number: info.baseboard.serial, + part_number: info.baseboard.part, revision: info.baseboard.revision, }, db::model::SledSystemHardware { diff --git a/nexus/src/app/switch.rs b/nexus/src/app/switch.rs index 362d56553a..cea16ba7af 100644 --- a/nexus/src/app/switch.rs +++ b/nexus/src/app/switch.rs @@ -32,8 +32,8 @@ impl super::Nexus { ) -> Result { let switch = db::model::Switch::new( id, - request.baseboard.serial_number, - request.baseboard.part_number, + request.baseboard.serial, + request.baseboard.part, request.baseboard.revision, request.rack_id, ); diff --git a/nexus/src/internal_api/http_entrypoints.rs b/nexus/src/internal_api/http_entrypoints.rs index eddc834a2a..5702539a4b 100644 --- a/nexus/src/internal_api/http_entrypoints.rs +++ b/nexus/src/internal_api/http_entrypoints.rs @@ -29,6 +29,8 @@ use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintMetadata; use nexus_types::deployment::BlueprintTarget; use nexus_types::deployment::BlueprintTargetSet; +use nexus_types::external_api::params::UninitializedSledId; +use nexus_types::external_api::shared::UninitializedSled; use nexus_types::internal_api::params::SwitchPutRequest; use nexus_types::internal_api::params::SwitchPutResponse; use nexus_types::internal_api::views::to_list; @@ -88,6 +90,9 @@ pub(crate) fn internal_api() -> NexusApiDescription { api.register(blueprint_generate_from_collection)?; api.register(blueprint_regenerate)?; + api.register(sled_list_uninitialized)?; + api.register(sled_add)?; + Ok(()) } @@ -795,3 +800,45 @@ async fn blueprint_regenerate( }; apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await } + +/// List uninitialized sleds +#[endpoint { + method = GET, + path = "/sleds/uninitialized", +}] +async fn sled_list_uninitialized( + rqctx: RequestContext>, +) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.nexus; + let opctx = crate::context::op_context_for_internal_api(&rqctx).await; + let sleds = nexus.sled_list_uninitialized(&opctx).await?; + Ok(HttpResponseOk(ResultsPage { items: sleds, next_page: None })) + }; + apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Add sled to initialized rack +// +// TODO: In the future this should really be a PUT request, once we resolve +// https://github.com/oxidecomputer/omicron/issues/4494. It should also +// explicitly be tied to a rack via a `rack_id` path param. For now we assume +// we are only operating on single rack systems. +#[endpoint { + method = POST, + path = "/sleds/add", +}] +async fn sled_add( + rqctx: RequestContext>, + sled: TypedBody, +) -> Result { + let apictx = rqctx.context(); + let nexus = &apictx.nexus; + let handler = async { + let opctx = crate::context::op_context_for_internal_api(&rqctx).await; + nexus.sled_add(&opctx, sled.into_inner()).await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await +} diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 25e58d093d..7331843000 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -18,6 +18,7 @@ use nexus_types::external_api::params; use nexus_types::external_api::params::PhysicalDiskKind; use nexus_types::external_api::params::UserId; use nexus_types::external_api::shared; +use nexus_types::external_api::shared::Baseboard; use nexus_types::external_api::shared::IdentityType; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::views; @@ -29,7 +30,6 @@ use nexus_types::external_api::views::User; use nexus_types::external_api::views::{Project, Silo, Vpc, VpcRouter}; use nexus_types::identity::Resource; use nexus_types::internal_api::params as internal_params; -use nexus_types::internal_api::params::Baseboard; use omicron_common::api::external::ByteCount; use omicron_common::api::external::Disk; use omicron_common::api::external::IdentityMetadataCreateParams; @@ -325,8 +325,8 @@ pub async fn create_switch( "/switches", &internal_params::SwitchPutRequest { baseboard: Baseboard { - serial_number: serial.to_string(), - part_number: part.to_string(), + serial: serial.to_string(), + part: part.to_string(), revision, }, rack_id, diff --git a/nexus/tests/integration_tests/rack.rs b/nexus/tests/integration_tests/rack.rs index a58871ee71..2098ff660d 100644 --- a/nexus/tests/integration_tests/rack.rs +++ b/nexus/tests/integration_tests/rack.rs @@ -13,7 +13,6 @@ use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; use nexus_types::external_api::shared::UninitializedSled; use nexus_types::external_api::views::Rack; -use nexus_types::internal_api::params::Baseboard; use nexus_types::internal_api::params::SledAgentStartupInfo; use nexus_types::internal_api::params::SledRole; use omicron_common::api::external::ByteCount; @@ -112,11 +111,7 @@ async fn test_sled_list_uninitialized(cptestctx: &ControlPlaneTestContext) { let sa = SledAgentStartupInfo { sa_address: "[fd00:1122:3344:0100::1]:8080".parse().unwrap(), role: SledRole::Gimlet, - baseboard: Baseboard { - serial_number: baseboard.serial, - part_number: baseboard.part, - revision: baseboard.revision, - }, + baseboard, usable_hardware_threads: 32, usable_physical_ram: ByteCount::from_gibibytes_u32(100), reservoir_size: ByteCount::from_mebibytes_u32(100), diff --git a/nexus/types/src/internal_api/params.rs b/nexus/types/src/internal_api/params.rs index 987efbf6b7..31ea86907a 100644 --- a/nexus/types/src/internal_api/params.rs +++ b/nexus/types/src/internal_api/params.rs @@ -6,6 +6,7 @@ use crate::external_api::params::PhysicalDiskKind; use crate::external_api::params::UserId; +use crate::external_api::shared::Baseboard; use crate::external_api::shared::IpRange; use omicron_common::api::external::ByteCount; use omicron_common::api::external::MacAddr; @@ -35,15 +36,6 @@ pub enum SledRole { Scrimlet, } -// TODO: We need a unified representation of these hardware identifiers -/// Describes properties that should uniquely identify Oxide manufactured hardware -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] -pub struct Baseboard { - pub serial_number: String, - pub part_number: String, - pub revision: i64, -} - /// Sent by a sled agent on startup to Nexus to request further instruction #[derive(Serialize, Deserialize, Debug, JsonSchema)] pub struct SledAgentStartupInfo { diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 53a53fb219..65c167f177 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -918,6 +918,57 @@ } } }, + "/sleds/add": { + "post": { + "summary": "Add sled to initialized rack", + "operationId": "sled_add", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UninitializedSledId" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/sleds/uninitialized": { + "get": { + "summary": "List uninitialized sleds", + "operationId": "sled_list_uninitialized", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UninitializedSledResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/switch/{switch_id}": { "put": { "operationId": "switch_put", @@ -1049,24 +1100,24 @@ ] }, "Baseboard": { - "description": "Describes properties that should uniquely identify Oxide manufactured hardware", + "description": "Properties that uniquely identify an Oxide hardware component", "type": "object", "properties": { - "part_number": { + "part": { "type": "string" }, "revision": { "type": "integer", "format": "int64" }, - "serial_number": { + "serial": { "type": "string" } }, "required": [ - "part_number", + "part", "revision", - "serial_number" + "serial" ] }, "BgpConfig": { @@ -6546,6 +6597,66 @@ "SwitchPutResponse": { "type": "object" }, + "UninitializedSled": { + "description": "A sled that has not been added to an initialized rack yet", + "type": "object", + "properties": { + "baseboard": { + "$ref": "#/components/schemas/Baseboard" + }, + "cubby": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "rack_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "baseboard", + "cubby", + "rack_id" + ] + }, + "UninitializedSledId": { + "description": "The unique hardware ID for a sled", + "type": "object", + "properties": { + "part": { + "type": "string" + }, + "serial": { + "type": "string" + } + }, + "required": [ + "part", + "serial" + ] + }, + "UninitializedSledResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/UninitializedSled" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, "UserId": { "title": "A name unique within the parent collection", "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", diff --git a/sled-agent/src/nexus.rs b/sled-agent/src/nexus.rs index cc715f4010..4cd97d9ba8 100644 --- a/sled-agent/src/nexus.rs +++ b/sled-agent/src/nexus.rs @@ -178,8 +178,8 @@ impl ConvertInto impl ConvertInto for sled_hardware::Baseboard { fn convert(self) -> nexus_client::types::Baseboard { nexus_client::types::Baseboard { - serial_number: self.identifier().to_string(), - part_number: self.model().to_string(), + serial: self.identifier().to_string(), + part: self.model().to_string(), revision: self.revision(), } } diff --git a/sled-agent/src/sim/server.rs b/sled-agent/src/sim/server.rs index f9db89135a..784e3f4938 100644 --- a/sled-agent/src/sim/server.rs +++ b/sled-agent/src/sim/server.rs @@ -105,11 +105,11 @@ impl Server { sa_address: sa_address.to_string(), role: NexusTypes::SledRole::Scrimlet, baseboard: NexusTypes::Baseboard { - serial_number: format!( + serial: format!( "sim-{}", &config.id.to_string()[0..8] ), - part_number: String::from("Unknown"), + part: String::from("Unknown"), revision: 0, }, usable_hardware_threads: config From 6120b6fcec70cb8a4b8e9b4d04898843f6b7e78f Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Thu, 7 Mar 2024 11:00:21 -0800 Subject: [PATCH 087/157] chore(deps): update rust crate reedline to 0.30.0 (#5218) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7471aa2dc4..605c1c0466 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6986,9 +6986,9 @@ dependencies = [ [[package]] name = "reedline" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e01ebfbdb1a88963121d3c928c97be7f10fec7795bec8b918c8cda1db7c29e6" +checksum = "413a9fa6a5d8c937d3ae1e975bfb6a918bb0b6cdfae6a10416218c837a31b8fc" dependencies = [ "chrono", "crossterm", diff --git a/Cargo.toml b/Cargo.toml index 9cd22cbbcc..deda5a8dd8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -326,7 +326,7 @@ rand = "0.8.5" ratatui = "0.26.1" rayon = "1.9" rcgen = "0.12.1" -reedline = "0.29.0" +reedline = "0.30.0" ref-cast = "1.0" regex = "1.10.3" regress = "0.8.0" From 81447ec02274245256a393bd38cf48c8e8a29766 Mon Sep 17 00:00:00 2001 From: bnaecker Date: Thu, 7 Mar 2024 12:38:42 -0800 Subject: [PATCH 088/157] Groundwork for oximeter instance vCPU metrics (#4900) - Adds the silo and project IDs to the instance-ensure request from Nexus to the sled-agent. These are used as fields on the instance-related statistics. This metadata is currently unused, but will be forwarded to Propolis in the instance-ensure request once the server is updated to accept it. - Defines a `VirtualMachine` oximeter target and `VcpuUsage` metric. The latter has a `state` field which corresponds to the named kstats published by the hypervisor that accumulate the time spent in a number of vCPU microstates. The combination of these should allow us to aggregate or break down vCPU usage by silo, project, instance, vCPU ID, and CPU state. Adds some simple mocks and tests for these. - Adds more fine-grained feature flags to the `oximeter-instruments` crate. --- Cargo.lock | 4 +- nexus/src/app/instance.rs | 20 ++++ openapi/sled-agent.json | 27 ++++++ oximeter/instruments/Cargo.toml | 43 ++++++--- oximeter/instruments/src/kstat/mod.rs | 9 +- oximeter/instruments/src/kstat/sampler.rs | 4 +- oximeter/instruments/src/lib.rs | 2 +- oximeter/producer/src/lib.rs | 22 +++-- sled-agent/src/http_entrypoints.rs | 1 + sled-agent/src/instance.rs | 11 ++- sled-agent/src/instance_manager.rs | 14 ++- sled-agent/src/metrics.rs | 106 +++++++++++++--------- sled-agent/src/params.rs | 14 +++ sled-agent/src/sim/http_entrypoints.rs | 1 + sled-agent/src/sim/sled_agent.rs | 5 +- sled-agent/src/sled_agent.rs | 95 ++++++++++--------- 16 files changed, 261 insertions(+), 117 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 605c1c0466..5d773a9abc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1974,7 +1974,7 @@ dependencies = [ [[package]] name = "dropshot" version = "0.9.1-dev" -source = "git+https://github.com/oxidecomputer/dropshot?branch=main#711a7490d81416731cfe0f9fef366ed5f266a0ee" +source = "git+https://github.com/oxidecomputer/dropshot?branch=main#29ae98d1f909c6832661408a4c03f929e8afa6e9" dependencies = [ "async-stream", "async-trait", @@ -2020,7 +2020,7 @@ dependencies = [ [[package]] name = "dropshot_endpoint" version = "0.9.1-dev" -source = "git+https://github.com/oxidecomputer/dropshot?branch=main#711a7490d81416731cfe0f9fef366ed5f266a0ee" +source = "git+https://github.com/oxidecomputer/dropshot?branch=main#29ae98d1f909c6832661408a4c03f929e8afa6e9" dependencies = [ "proc-macro2", "quote", diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index f6cf90718f..2d09078e18 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -1183,6 +1183,25 @@ impl super::Nexus { let ssh_keys: Vec = ssh_keys.map(|ssh_key| ssh_key.public_key).collect(); + // Construct instance metadata used to track its statistics. + // + // This requires another fetch on the silo and project, to extract their + // IDs. + let (.., db_project) = self + .project_lookup( + opctx, + params::ProjectSelector { + project: NameOrId::Id(db_instance.project_id), + }, + )? + .fetch() + .await?; + let (_, db_silo) = self.current_silo_lookup(opctx)?.fetch().await?; + let metadata = sled_agent_client::types::InstanceMetadata { + silo_id: db_silo.id(), + project_id: db_project.id(), + }; + // Ask the sled agent to begin the state change. Then update the // database to reflect the new intermediate state. If this update is // not the newest one, that's fine. That might just mean the sled agent @@ -1226,6 +1245,7 @@ impl super::Nexus { initial_vmm.propolis_port.into(), ) .to_string(), + metadata, }, ) .await diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index 99156fffd4..238b5832ca 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -4708,6 +4708,14 @@ } ] }, + "metadata": { + "description": "Metadata used to track instance statistics.", + "allOf": [ + { + "$ref": "#/components/schemas/InstanceMetadata" + } + ] + }, "propolis_addr": { "description": "The address at which this VMM should serve a Propolis server API.", "type": "string" @@ -4729,6 +4737,7 @@ "required": [ "hardware", "instance_runtime", + "metadata", "propolis_addr", "propolis_id", "vmm_runtime" @@ -4860,6 +4869,24 @@ "snapshot_id" ] }, + "InstanceMetadata": { + "description": "Metadata used to track statistics about an instance.", + "type": "object", + "properties": { + "project_id": { + "type": "string", + "format": "uuid" + }, + "silo_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "project_id", + "silo_id" + ] + }, "InstanceMigrationSourceParams": { "description": "Instance runtime state to update for a migration.", "type": "object", diff --git a/oximeter/instruments/Cargo.toml b/oximeter/instruments/Cargo.toml index 8372b7c560..c49d3b976e 100644 --- a/oximeter/instruments/Cargo.toml +++ b/oximeter/instruments/Cargo.toml @@ -5,27 +5,46 @@ edition = "2021" license = "MPL-2.0" [dependencies] -cfg-if.workspace = true -chrono.workspace = true -dropshot.workspace = true -futures.workspace = true +cfg-if = { workspace = true, optional = true } +chrono = { workspace = true, optional = true } +dropshot = { workspace = true, optional = true } +futures = { workspace = true, optional = true } http = { workspace = true, optional = true } -oximeter.workspace = true -slog.workspace = true -tokio.workspace = true -thiserror.workspace = true -uuid.workspace = true +oximeter = { workspace = true, optional = true } +slog = { workspace = true, optional = true } +tokio = { workspace = true, optional = true } +thiserror = { workspace = true, optional = true } +uuid = { workspace = true, optional = true } omicron-workspace-hack.workspace = true [features] -default = ["http-instruments", "kstat"] -http-instruments = ["http"] -kstat = ["kstat-rs"] +default = ["http-instruments", "datalink"] +http-instruments = [ + "dep:chrono", + "dep:dropshot", + "dep:futures", + "dep:http", + "dep:oximeter", + "dep:uuid" +] +kstat = [ + "dep:cfg-if", + "dep:chrono", + "dep:futures", + "dep:kstat-rs", + "dep:oximeter", + "dep:slog", + "dep:tokio", + "dep:thiserror", + "dep:uuid" +] +datalink = ["kstat"] [dev-dependencies] rand.workspace = true slog-async.workspace = true slog-term.workspace = true +oximeter.workspace = true [target.'cfg(target_os = "illumos")'.dependencies] kstat-rs = { workspace = true, optional = true } diff --git a/oximeter/instruments/src/kstat/mod.rs b/oximeter/instruments/src/kstat/mod.rs index 90f34acae8..c792a51408 100644 --- a/oximeter/instruments/src/kstat/mod.rs +++ b/oximeter/instruments/src/kstat/mod.rs @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -// Copyright 2023 Oxide Computer Company +// Copyright 2024 Oxide Computer Company //! Types for publishing kernel statistics via oximeter. //! @@ -87,6 +87,7 @@ use std::cmp::Ordering; use std::collections::BTreeMap; use std::time::Duration; +#[cfg(any(feature = "datalink", test))] pub mod link; mod sampler; @@ -206,9 +207,9 @@ pub fn hrtime_to_utc(hrtime: i64) -> Result, Error> { } } -// Helper trait for converting a `NamedData` item into a specific contained data -// type, if possible. -pub(crate) trait ConvertNamedData { +/// Helper trait for converting a `NamedData` item into a specific contained data +/// type, if possible. +pub trait ConvertNamedData { fn as_i32(&self) -> Result; fn as_u32(&self) -> Result; fn as_i64(&self) -> Result; diff --git a/oximeter/instruments/src/kstat/sampler.rs b/oximeter/instruments/src/kstat/sampler.rs index bab8ad0ba5..af1b3ba7cf 100644 --- a/oximeter/instruments/src/kstat/sampler.rs +++ b/oximeter/instruments/src/kstat/sampler.rs @@ -671,7 +671,7 @@ impl KstatSamplerWorker { let n_current_samples = current_samples.len(); let n_total_samples = n_new_samples + n_current_samples; let n_overflow_samples = - n_total_samples.checked_sub(self.sample_limit).unwrap_or(0); + n_total_samples.saturating_sub(self.sample_limit); if n_overflow_samples > 0 { warn!( self.log, @@ -788,7 +788,7 @@ impl KstatSamplerWorker { // or the time we added the kstat if not. let start = sampled_kstat .time_of_last_collection - .unwrap_or_else(|| sampled_kstat.time_added); + .unwrap_or(sampled_kstat.time_added); let expire_at = start + duration; cfg_if::cfg_if! { if #[cfg(test)] { diff --git a/oximeter/instruments/src/lib.rs b/oximeter/instruments/src/lib.rs index d003e71739..c1f839c85d 100644 --- a/oximeter/instruments/src/lib.rs +++ b/oximeter/instruments/src/lib.rs @@ -4,7 +4,7 @@ //! General-purpose types for instrumenting code to producer oximeter metrics. -// Copyright 2023 Oxide Computer Company +// Copyright 2024 Oxide Computer Company #[cfg(feature = "http-instruments")] pub mod http; diff --git a/oximeter/producer/src/lib.rs b/oximeter/producer/src/lib.rs index 2354f9c217..3fecaadf4f 100644 --- a/oximeter/producer/src/lib.rs +++ b/oximeter/producer/src/lib.rs @@ -38,8 +38,8 @@ pub enum Error { #[error("Error running producer HTTP server: {0}")] Server(String), - #[error("Error registering as metric producer: {0}")] - RegistrationError(String), + #[error("Error registering as metric producer: {msg}")] + RegistrationError { retryable: bool, msg: String }, #[error("Producer registry and config UUIDs do not match")] UuidMismatch, @@ -251,11 +251,19 @@ pub async fn register( ) -> Result<(), Error> { let client = nexus_client::Client::new(&format!("http://{}", address), log.clone()); - client - .cpapi_producers_post(&server_info.into()) - .await - .map(|_| ()) - .map_err(|msg| Error::RegistrationError(msg.to_string())) + client.cpapi_producers_post(&server_info.into()).await.map(|_| ()).map_err( + |err| { + let retryable = match &err { + nexus_client::Error::CommunicationError(..) => true, + nexus_client::Error::ErrorResponse(resp) => { + resp.status().is_server_error() + } + _ => false, + }; + let msg = err.to_string(); + Error::RegistrationError { retryable, msg } + }, + ) } /// Handle a request to pull available metric data from a [`ProducerRegistry`]. diff --git a/sled-agent/src/http_entrypoints.rs b/sled-agent/src/http_entrypoints.rs index 5f888504db..6ce9d59b1c 100644 --- a/sled-agent/src/http_entrypoints.rs +++ b/sled-agent/src/http_entrypoints.rs @@ -413,6 +413,7 @@ async fn instance_register( body_args.instance_runtime, body_args.vmm_runtime, body_args.propolis_addr, + body_args.metadata, ) .await?, )) diff --git a/sled-agent/src/instance.rs b/sled-agent/src/instance.rs index 7a6033b4bb..5ddca90403 100644 --- a/sled-agent/src/instance.rs +++ b/sled-agent/src/instance.rs @@ -15,7 +15,7 @@ use crate::nexus::NexusClientWithResolver; use crate::params::ZoneBundleMetadata; use crate::params::{InstanceExternalIpBody, ZoneBundleCause}; use crate::params::{ - InstanceHardware, InstanceMigrationSourceParams, + InstanceHardware, InstanceMetadata, InstanceMigrationSourceParams, InstanceMigrationTargetParams, InstancePutStateResponse, InstanceStateRequested, InstanceUnregisterResponse, VpcFirewallRule, }; @@ -311,6 +311,11 @@ struct InstanceRunner { // Properties visible to Propolis properties: propolis_client::types::InstanceProperties, + // This is currently unused, but will be sent to Propolis as part of the + // work tracked in https://github.com/oxidecomputer/omicron/issues/4851. It + // will be included in the InstanceProperties above, most likely. + _metadata: InstanceMetadata, + // The ID of the Propolis server (and zone) running this instance propolis_id: Uuid, @@ -928,6 +933,7 @@ impl Instance { ticket: InstanceTicket, state: InstanceInitialState, services: InstanceManagerServices, + metadata: InstanceMetadata, ) -> Result { info!(log, "initializing new Instance"; "instance_id" => %id, @@ -1005,6 +1011,9 @@ impl Instance { // InstanceCpuCount here, to avoid any casting... vcpus: hardware.properties.ncpus.0 as u8, }, + // This will be used in a follow up, tracked under + // https://github.com/oxidecomputer/omicron/issues/4851. + _metadata: metadata, propolis_id, propolis_addr, vnic_allocator, diff --git a/sled-agent/src/instance_manager.rs b/sled-agent/src/instance_manager.rs index 80c62be234..666d970538 100644 --- a/sled-agent/src/instance_manager.rs +++ b/sled-agent/src/instance_manager.rs @@ -8,6 +8,7 @@ use crate::instance::propolis_zone_name; use crate::instance::Instance; use crate::nexus::NexusClientWithResolver; use crate::params::InstanceExternalIpBody; +use crate::params::InstanceMetadata; use crate::params::ZoneBundleMetadata; use crate::params::{ InstanceHardware, InstanceMigrationSourceParams, InstancePutStateResponse, @@ -227,6 +228,7 @@ impl InstanceManager { *self.inner.reservoir_size.lock().unwrap() } + #[allow(clippy::too_many_arguments)] pub async fn ensure_registered( &self, instance_id: Uuid, @@ -235,6 +237,7 @@ impl InstanceManager { instance_runtime: InstanceRuntimeState, vmm_runtime: VmmRuntimeState, propolis_addr: SocketAddr, + metadata: InstanceMetadata, ) -> Result { let (tx, rx) = oneshot::channel(); self.inner @@ -246,6 +249,7 @@ impl InstanceManager { instance_runtime, vmm_runtime, propolis_addr, + metadata, tx, }) .await @@ -396,6 +400,7 @@ enum InstanceManagerRequest { instance_runtime: InstanceRuntimeState, vmm_runtime: VmmRuntimeState, propolis_addr: SocketAddr, + metadata: InstanceMetadata, tx: oneshot::Sender>, }, EnsureUnregistered { @@ -509,9 +514,10 @@ impl InstanceManagerRunner { instance_runtime, vmm_runtime, propolis_addr, + metadata, tx, }) => { - tx.send(self.ensure_registered(instance_id, propolis_id, hardware, instance_runtime, vmm_runtime, propolis_addr).await).map_err(|_| Error::FailedSendClientClosed) + tx.send(self.ensure_registered(instance_id, propolis_id, hardware, instance_runtime, vmm_runtime, propolis_addr, metadata).await).map_err(|_| Error::FailedSendClientClosed) }, Some(EnsureUnregistered { instance_id, tx }) => { self.ensure_unregistered(tx, instance_id).await @@ -574,7 +580,8 @@ impl InstanceManagerRunner { /// (instance ID, Propolis ID) pair multiple times, but will fail if the /// instance is registered with a Propolis ID different from the one the /// caller supplied. - async fn ensure_registered( + #[allow(clippy::too_many_arguments)] + pub async fn ensure_registered( &mut self, instance_id: Uuid, propolis_id: Uuid, @@ -582,6 +589,7 @@ impl InstanceManagerRunner { instance_runtime: InstanceRuntimeState, vmm_runtime: VmmRuntimeState, propolis_addr: SocketAddr, + metadata: InstanceMetadata, ) -> Result { info!( &self.log, @@ -592,6 +600,7 @@ impl InstanceManagerRunner { "instance_runtime" => ?instance_runtime, "vmm_runtime" => ?vmm_runtime, "propolis_addr" => ?propolis_addr, + "metadata" => ?metadata, ); let instance = { @@ -646,6 +655,7 @@ impl InstanceManagerRunner { ticket, state, services, + metadata, )?; let _old = self.instances.insert(instance_id, (propolis_id, instance)); diff --git a/sled-agent/src/metrics.rs b/sled-agent/src/metrics.rs index 6c3383c88f..a9d5acfff8 100644 --- a/sled-agent/src/metrics.rs +++ b/sled-agent/src/metrics.rs @@ -8,6 +8,7 @@ use oximeter::types::MetricsError; use oximeter::types::ProducerRegistry; use sled_hardware::Baseboard; use slog::Logger; +use std::sync::Arc; use std::time::Duration; use uuid::Uuid; @@ -19,6 +20,7 @@ cfg_if::cfg_if! { use oximeter_instruments::kstat::KstatSampler; use oximeter_instruments::kstat::TargetId; use std::collections::BTreeMap; + use std::sync::Mutex; } else { use anyhow::anyhow; } @@ -46,6 +48,21 @@ pub enum Error { #[error("Failed to fetch hostname")] Hostname(#[source] std::io::Error), + + #[error("Non-UTF8 hostname")] + NonUtf8Hostname, + + #[error("Missing NULL byte in hostname")] + HostnameMissingNull, +} + +// Basic metadata about the sled agent used when publishing metrics. +#[derive(Clone, Debug)] +#[cfg_attr(not(target_os = "illumos"), allow(dead_code))] +struct SledIdentifiers { + sled_id: Uuid, + rack_id: Uuid, + baseboard: Baseboard, } /// Type managing all oximeter metrics produced by the sled-agent. @@ -61,10 +78,7 @@ pub enum Error { // the name of fields that are not yet used. #[cfg_attr(not(target_os = "illumos"), allow(dead_code))] pub struct MetricsManager { - sled_id: Uuid, - rack_id: Uuid, - baseboard: Baseboard, - hostname: Option, + metadata: Arc, _log: Logger, #[cfg(target_os = "illumos")] kstat_sampler: KstatSampler, @@ -78,7 +92,7 @@ pub struct MetricsManager { // namespace them internally, e.g., `"datalink:{link_name}"` would be the // real key. #[cfg(target_os = "illumos")] - tracked_links: BTreeMap, + tracked_links: Arc>>, registry: ProducerRegistry, } @@ -101,14 +115,11 @@ impl MetricsManager { registry .register_producer(kstat_sampler.clone()) .map_err(Error::Registry)?; - let tracked_links = BTreeMap::new(); + let tracked_links = Arc::new(Mutex::new(BTreeMap::new())); } } Ok(Self { - sled_id, - rack_id, - baseboard, - hostname: None, + metadata: Arc::new(SledIdentifiers { sled_id, rack_id, baseboard }), _log: log, #[cfg(target_os = "illumos")] kstat_sampler, @@ -128,14 +139,14 @@ impl MetricsManager { impl MetricsManager { /// Track metrics for a physical datalink. pub async fn track_physical_link( - &mut self, + &self, link_name: impl AsRef, interval: Duration, ) -> Result<(), Error> { - let hostname = self.hostname().await?; + let hostname = hostname()?; let link = link::PhysicalDataLink { - rack_id: self.rack_id, - sled_id: self.sled_id, + rack_id: self.metadata.rack_id, + sled_id: self.metadata.sled_id, serial: self.serial_number(), hostname, link_name: link_name.as_ref().to_string(), @@ -146,7 +157,10 @@ impl MetricsManager { .add_target(link, details) .await .map_err(Error::Kstat)?; - self.tracked_links.insert(link_name.as_ref().to_string(), id); + self.tracked_links + .lock() + .unwrap() + .insert(link_name.as_ref().to_string(), id); Ok(()) } @@ -155,10 +169,12 @@ impl MetricsManager { /// This works for both physical and virtual links. #[allow(dead_code)] pub async fn stop_tracking_link( - &mut self, + &self, link_name: impl AsRef, ) -> Result<(), Error> { - if let Some(id) = self.tracked_links.remove(link_name.as_ref()) { + let maybe_id = + self.tracked_links.lock().unwrap().remove(link_name.as_ref()); + if let Some(id) = maybe_id { self.kstat_sampler.remove_target(id).await.map_err(Error::Kstat) } else { Ok(()) @@ -174,8 +190,8 @@ impl MetricsManager { interval: Duration, ) -> Result<(), Error> { let link = link::VirtualDataLink { - rack_id: self.rack_id, - sled_id: self.sled_id, + rack_id: self.metadata.rack_id, + sled_id: self.metadata.sled_id, serial: self.serial_number(), hostname: hostname.as_ref().to_string(), link_name: link_name.as_ref().to_string(), @@ -190,40 +206,19 @@ impl MetricsManager { // Return the serial number out of the baseboard, if one exists. fn serial_number(&self) -> String { - match &self.baseboard { + match &self.metadata.baseboard { Baseboard::Gimlet { identifier, .. } => identifier.clone(), Baseboard::Unknown => String::from("unknown"), Baseboard::Pc { identifier, .. } => identifier.clone(), } } - - // Return the system's hostname. - // - // If we've failed to get it previously, we try again. If _that_ fails, - // return an error. - // - // TODO-cleanup: This will become much simpler once - // `OnceCell::get_or_try_init` is stabilized. - async fn hostname(&mut self) -> Result { - if let Some(hn) = &self.hostname { - return Ok(hn.clone()); - } - let hn = tokio::process::Command::new("hostname") - .env_clear() - .output() - .await - .map(|out| String::from_utf8_lossy(&out.stdout).trim().to_string()) - .map_err(Error::Hostname)?; - self.hostname.replace(hn.clone()); - Ok(hn) - } } #[cfg(not(target_os = "illumos"))] impl MetricsManager { /// Track metrics for a physical datalink. pub async fn track_physical_link( - &mut self, + &self, _link_name: impl AsRef, _interval: Duration, ) -> Result<(), Error> { @@ -237,7 +232,7 @@ impl MetricsManager { /// This works for both physical and virtual links. #[allow(dead_code)] pub async fn stop_tracking_link( - &mut self, + &self, _link_name: impl AsRef, ) -> Result<(), Error> { Err(Error::Kstat(anyhow!( @@ -258,3 +253,28 @@ impl MetricsManager { ))) } } + +// Return the current hostname if possible. +#[cfg(target_os = "illumos")] +fn hostname() -> Result { + // See netdb.h + const MAX_LEN: usize = 256; + let mut out = vec![0u8; MAX_LEN + 1]; + if unsafe { + libc::gethostname(out.as_mut_ptr() as *mut libc::c_char, MAX_LEN) + } == 0 + { + // Split into subslices by NULL bytes. + // + // We should have a NULL byte, since we've asked for no more than 255 + // bytes in a 256 byte buffer, but you never know. + let Some(chunk) = out.split(|x| *x == 0).next() else { + return Err(Error::HostnameMissingNull); + }; + let s = std::ffi::CString::new(chunk) + .map_err(|_| Error::NonUtf8Hostname)?; + s.into_string().map_err(|_| Error::NonUtf8Hostname) + } else { + Err(std::io::Error::last_os_error()).map_err(|_| Error::NonUtf8Hostname) + } +} diff --git a/sled-agent/src/params.rs b/sled-agent/src/params.rs index fda952ca87..d192f745f6 100644 --- a/sled-agent/src/params.rs +++ b/sled-agent/src/params.rs @@ -82,6 +82,17 @@ pub struct InstanceHardware { pub cloud_init_bytes: Option, } +/// Metadata used to track statistics about an instance. +/// +// NOTE: The instance ID is not here, since it's already provided in other +// pieces of the instance-related requests. It is pulled from there when +// publishing metrics for the instance. +#[derive(Clone, Debug, Deserialize, JsonSchema, Serialize)] +pub struct InstanceMetadata { + pub silo_id: Uuid, + pub project_id: Uuid, +} + /// The body of a request to ensure that a instance and VMM are known to a sled /// agent. #[derive(Serialize, Deserialize, JsonSchema)] @@ -103,6 +114,9 @@ pub struct InstanceEnsureBody { /// The address at which this VMM should serve a Propolis server API. pub propolis_addr: SocketAddr, + + /// Metadata used to track instance statistics. + pub metadata: InstanceMetadata, } /// The body of a request to move a previously-ensured instance into a specific diff --git a/sled-agent/src/sim/http_entrypoints.rs b/sled-agent/src/sim/http_entrypoints.rs index 09ffdf5dc4..494db58828 100644 --- a/sled-agent/src/sim/http_entrypoints.rs +++ b/sled-agent/src/sim/http_entrypoints.rs @@ -98,6 +98,7 @@ async fn instance_register( body_args.hardware, body_args.instance_runtime, body_args.vmm_runtime, + body_args.metadata, ) .await?, )) diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index 27a06e4617..483b2d6aa8 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -13,7 +13,7 @@ use super::storage::Storage; use crate::nexus::NexusClient; use crate::params::{ DiskStateRequested, InstanceExternalIpBody, InstanceHardware, - InstanceMigrationSourceParams, InstancePutStateResponse, + InstanceMetadata, InstanceMigrationSourceParams, InstancePutStateResponse, InstanceStateRequested, InstanceUnregisterResponse, Inventory, OmicronZonesConfig, SledRole, }; @@ -239,6 +239,9 @@ impl SledAgent { hardware: InstanceHardware, instance_runtime: InstanceRuntimeState, vmm_runtime: VmmRuntimeState, + // This is currently unused, but will be included as part of work + // tracked in https://github.com/oxidecomputer/omicron/issues/4851. + _metadata: InstanceMetadata, ) -> Result { // respond with a fake 500 level failure if asked to ensure an instance // with more than 16 CPUs. diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index 39ba1045f7..5381337b8c 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -17,7 +17,7 @@ use crate::metrics::MetricsManager; use crate::nexus::{ConvertInto, NexusClientWithResolver, NexusRequestQueue}; use crate::params::{ DiskStateRequested, InstanceExternalIpBody, InstanceHardware, - InstanceMigrationSourceParams, InstancePutStateResponse, + InstanceMetadata, InstanceMigrationSourceParams, InstancePutStateResponse, InstanceStateRequested, InstanceUnregisterResponse, Inventory, OmicronZonesConfig, SledRole, TimeSync, VpcFirewallRule, ZoneBundleMetadata, Zpool, @@ -401,6 +401,55 @@ impl SledAgent { let underlay_nics = underlay::find_nics(&config.data_links)?; illumos_utils::opte::initialize_xde_driver(&log, &underlay_nics)?; + // Start collecting metric data. + // + // First, we're creating a shareable type for managing the metrics + // themselves early on, so that we can pass it to other components of + // the sled agent that need it. + // + // Then we'll start tracking physical links and register as a producer + // with Nexus in the background. + let metrics_manager = MetricsManager::new( + request.body.id, + request.body.rack_id, + long_running_task_handles.hardware_manager.baseboard(), + log.new(o!("component" => "MetricsManager")), + )?; + + // Start tracking the underlay physical links. + for nic in underlay::find_nics(&config.data_links)? { + let link_name = nic.interface(); + if let Err(e) = metrics_manager + .track_physical_link( + link_name, + crate::metrics::LINK_SAMPLE_INTERVAL, + ) + .await + { + error!( + log, + "failed to start tracking physical link metrics"; + "link_name" => link_name, + "error" => ?e, + ); + } + } + + // Spawn a task in the background to register our metric producer with + // Nexus. This should not block progress here. + let endpoint = ProducerEndpoint { + id: request.body.id, + kind: ProducerKind::SledAgent, + address: sled_address.into(), + base_route: String::from("/metrics/collect"), + interval: crate::metrics::METRIC_COLLECTION_INTERVAL, + }; + tokio::task::spawn(register_metric_producer_with_nexus( + log.clone(), + nexus_client.clone(), + endpoint, + )); + // Create the PortManager to manage all the OPTE ports on the sled. let port_manager = PortManager::new( parent_log.new(o!("component" => "PortManager")), @@ -521,47 +570,6 @@ impl SledAgent { rack_network_config.clone(), )?; - let mut metrics_manager = MetricsManager::new( - request.body.id, - request.body.rack_id, - long_running_task_handles.hardware_manager.baseboard(), - log.new(o!("component" => "MetricsManager")), - )?; - - // Start tracking the underlay physical links. - for nic in underlay::find_nics(&config.data_links)? { - let link_name = nic.interface(); - if let Err(e) = metrics_manager - .track_physical_link( - link_name, - crate::metrics::LINK_SAMPLE_INTERVAL, - ) - .await - { - error!( - log, - "failed to start tracking physical link metrics"; - "link_name" => link_name, - "error" => ?e, - ); - } - } - - // Spawn a task in the background to register our metric producer with - // Nexus. This should not block progress here. - let endpoint = ProducerEndpoint { - id: request.body.id, - kind: ProducerKind::SledAgent, - address: sled_address.into(), - base_route: String::from("/metrics/collect"), - interval: crate::metrics::METRIC_COLLECTION_INTERVAL, - }; - tokio::task::spawn(register_metric_producer_with_nexus( - log.clone(), - nexus_client.clone(), - endpoint, - )); - let sled_agent = SledAgent { inner: Arc::new(SledAgentInner { id: request.body.id, @@ -945,6 +953,7 @@ impl SledAgent { /// Idempotently ensures that a given instance is registered with this sled, /// i.e., that it can be addressed by future calls to /// [`Self::instance_ensure_state`]. + #[allow(clippy::too_many_arguments)] pub async fn instance_ensure_registered( &self, instance_id: Uuid, @@ -953,6 +962,7 @@ impl SledAgent { instance_runtime: InstanceRuntimeState, vmm_runtime: VmmRuntimeState, propolis_addr: SocketAddr, + metadata: InstanceMetadata, ) -> Result { self.inner .instances @@ -963,6 +973,7 @@ impl SledAgent { instance_runtime, vmm_runtime, propolis_addr, + metadata, ) .await .map_err(|e| Error::Instance(e)) From cd0effee306ed1ec563bd328ed749840f512ed60 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Thu, 7 Mar 2024 13:07:19 -0800 Subject: [PATCH 089/157] add reconfigurator-cli and facilities for exploring, testing the planner (#5197) --- Cargo.lock | 49 +- Cargo.toml | 7 +- dev-tools/omdb/Cargo.toml | 4 +- dev-tools/omdb/src/bin/omdb/db.rs | 136 + dev-tools/omdb/src/bin/omdb/nexus.rs | 34 +- dev-tools/omdb/tests/successes.out | 13 + dev-tools/omdb/tests/test_all_output.rs | 122 +- dev-tools/omdb/tests/usage_errors.out | 46 +- dev-tools/reconfigurator-cli/Cargo.toml | 40 + dev-tools/reconfigurator-cli/src/main.rs | 768 + .../reconfigurator-cli/tests/input/cmds.txt | 14 + .../tests/input/complex.json | 11558 ++++++++++++++++ .../tests/output/cmd-complex-stdout | 385 + .../tests/output/cmd-stderr | 0 .../tests/output/cmd-stdout | 58 + .../reconfigurator-cli/tests/test_basic.rs | 115 + nexus/Cargo.toml | 1 + .../db-queries/src/db/datastore/deployment.rs | 11 +- nexus/db-queries/src/db/datastore/ip_pool.rs | 29 + nexus/db-queries/src/db/datastore/zpool.rs | 32 +- nexus/inventory/src/builder.rs | 7 +- nexus/reconfigurator/planning/Cargo.toml | 3 + .../planning/src/blueprint_builder.rs | 357 +- nexus/reconfigurator/planning/src/lib.rs | 1 + nexus/reconfigurator/planning/src/planner.rs | 31 +- nexus/reconfigurator/planning/src/system.rs | 578 + nexus/reconfigurator/preparation/Cargo.toml | 12 + nexus/reconfigurator/preparation/src/lib.rs | 74 + nexus/src/app/deployment.rs | 102 +- nexus/types/Cargo.toml | 2 + nexus/types/src/deployment.rs | 82 +- nexus/types/src/inventory.rs | 67 +- test-utils/Cargo.toml | 2 +- test-utils/src/dev/test_cmds.rs | 70 + 34 files changed, 14330 insertions(+), 480 deletions(-) create mode 100644 dev-tools/reconfigurator-cli/Cargo.toml create mode 100644 dev-tools/reconfigurator-cli/src/main.rs create mode 100644 dev-tools/reconfigurator-cli/tests/input/cmds.txt create mode 100644 dev-tools/reconfigurator-cli/tests/input/complex.json create mode 100644 dev-tools/reconfigurator-cli/tests/output/cmd-complex-stdout create mode 100644 dev-tools/reconfigurator-cli/tests/output/cmd-stderr create mode 100644 dev-tools/reconfigurator-cli/tests/output/cmd-stdout create mode 100644 dev-tools/reconfigurator-cli/tests/test_basic.rs create mode 100644 nexus/reconfigurator/planning/src/system.rs create mode 100644 nexus/reconfigurator/preparation/Cargo.toml create mode 100644 nexus/reconfigurator/preparation/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 5d773a9abc..fd9155bb8e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4521,6 +4521,8 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", + "gateway-client", + "indexmap 2.2.5", "internal-dns", "ipnet", "ipnetwork", @@ -4536,6 +4538,17 @@ dependencies = [ "uuid", ] +[[package]] +name = "nexus-reconfigurator-preparation" +version = "0.1.0" +dependencies = [ + "illumos-utils", + "nexus-db-model", + "nexus-types", + "omicron-common", + "omicron-workspace-hack", +] + [[package]] name = "nexus-test-interface" version = "0.1.0" @@ -4611,6 +4624,7 @@ dependencies = [ "dns-service-client", "futures", "gateway-client", + "humantime", "omicron-common", "omicron-passwords", "omicron-uuid-kinds", @@ -4620,6 +4634,7 @@ dependencies = [ "schemars", "serde", "serde_json", + "serde_with", "sled-agent-client", "steno", "strum 0.26.1", @@ -5074,6 +5089,7 @@ dependencies = [ "nexus-inventory", "nexus-reconfigurator-execution", "nexus-reconfigurator-planning", + "nexus-reconfigurator-preparation", "nexus-test-interface", "nexus-test-utils", "nexus-test-utils-macros", @@ -5148,6 +5164,8 @@ version = "0.1.0" dependencies = [ "anyhow", "async-bb8-diesel", + "camino", + "camino-tempfile", "chrono", "clap 4.5.0", "crossterm", @@ -5169,6 +5187,7 @@ dependencies = [ "nexus-config", "nexus-db-model", "nexus-db-queries", + "nexus-reconfigurator-preparation", "nexus-test-utils", "nexus-test-utils-macros", "nexus-types", @@ -5180,7 +5199,6 @@ dependencies = [ "oximeter-client", "pq-sys", "ratatui", - "regex", "serde", "serde_json", "sled-agent-client", @@ -6946,6 +6964,35 @@ dependencies = [ "rand_core 0.3.1", ] +[[package]] +name = "reconfigurator-cli" +version = "0.1.0" +dependencies = [ + "anyhow", + "camino", + "camino-tempfile", + "clap 4.5.0", + "dropshot", + "expectorate", + "humantime", + "indexmap 2.2.5", + "nexus-reconfigurator-planning", + "nexus-types", + "omicron-common", + "omicron-rpaths", + "omicron-test-utils", + "omicron-workspace-hack", + "reedline", + "regex", + "serde_json", + "slog", + "slog-error-chain", + "subprocess", + "swrite", + "tabled", + "uuid", +] + [[package]] name = "redox_syscall" version = "0.2.16" diff --git a/Cargo.toml b/Cargo.toml index deda5a8dd8..299d715d67 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ members = [ "dev-tools/omdb", "dev-tools/omicron-dev", "dev-tools/oxlog", + "dev-tools/reconfigurator-cli", "dev-tools/xtask", "dns-server", "end-to-end-tests", @@ -46,6 +47,7 @@ members = [ "nexus/macros-common", "nexus/reconfigurator/execution", "nexus/reconfigurator/planning", + "nexus/reconfigurator/preparation", "nexus/test-interface", "nexus/test-utils-macros", "nexus/test-utils", @@ -98,6 +100,7 @@ default-members = [ "dev-tools/omdb", "dev-tools/omicron-dev", "dev-tools/oxlog", + "dev-tools/reconfigurator-cli", # Do not include xtask in the list of default members, because this causes # hakari to not work as well and build times to be longer. # See omicron#4392. @@ -124,6 +127,7 @@ default-members = [ "nexus/inventory", "nexus/reconfigurator/execution", "nexus/reconfigurator/planning", + "nexus/reconfigurator/preparation", "nexus/types", "oximeter/collector", "oximeter/db", @@ -264,6 +268,7 @@ nexus-inventory = { path = "nexus/inventory" } nexus-macros-common = { path = "nexus/macros-common" } nexus-reconfigurator-execution = { path = "nexus/reconfigurator/execution" } nexus-reconfigurator-planning = { path = "nexus/reconfigurator/planning" } +nexus-reconfigurator-preparation = { path = "nexus/reconfigurator/preparation" } omicron-certificates = { path = "certificates" } omicron-passwords = { path = "passwords" } omicron-workspace-hack = "0.1.0" @@ -342,7 +347,7 @@ samael = { version = "0.0.14", features = ["xmlsec"] } schemars = "0.8.16" secrecy = "0.8.0" semver = { version = "1.0.22", features = ["std", "serde"] } -serde = { version = "1.0", default-features = false, features = [ "derive" ] } +serde = { version = "1.0", default-features = false, features = [ "derive", "rc" ] } serde_derive = "1.0" serde_human_bytes = { git = "http://github.com/oxidecomputer/serde_human_bytes", branch = "main" } serde_json = "1.0.114" diff --git a/dev-tools/omdb/Cargo.toml b/dev-tools/omdb/Cargo.toml index df56fda571..2b0480be2d 100644 --- a/dev-tools/omdb/Cargo.toml +++ b/dev-tools/omdb/Cargo.toml @@ -10,6 +10,7 @@ omicron-rpaths.workspace = true [dependencies] anyhow.workspace = true async-bb8-diesel.workspace = true +camino.workspace = true chrono.workspace = true clap.workspace = true crossterm.workspace = true @@ -28,6 +29,7 @@ nexus-client.workspace = true nexus-config.workspace = true nexus-db-model.workspace = true nexus-db-queries.workspace = true +nexus-reconfigurator-preparation.workspace = true nexus-types.workspace = true omicron-common.workspace = true oximeter-client.workspace = true @@ -55,8 +57,8 @@ expectorate.workspace = true nexus-test-utils-macros.workspace = true omicron-nexus.workspace = true omicron-test-utils.workspace = true -regex.workspace = true subprocess.workspace = true +camino-tempfile.workspace = true # Disable doc builds by default for our binaries to work around issue # rust-lang/cargo#8373. These docs would not be very useful anyway. diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index c7e9022fc2..02b68144df 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -22,6 +22,7 @@ use anyhow::Context; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; use async_bb8_diesel::AsyncSimpleConnection; +use camino::Utf8PathBuf; use chrono::SecondsFormat; use clap::Args; use clap::Subcommand; @@ -34,6 +35,8 @@ use diesel::JoinOnDsl; use diesel::NullableExpressionMethods; use diesel::OptionalExtension; use diesel::TextExpressionMethods; +use dropshot::PaginationOrder; +use futures::StreamExt; use gateway_client::types::SpType; use ipnetwork::IpNetwork; use nexus_config::PostgresConfigWithUrl; @@ -68,20 +71,26 @@ use nexus_db_queries::db; use nexus_db_queries::db::datastore::read_only_resources_associated_with_volume; use nexus_db_queries::db::datastore::CrucibleTargets; use nexus_db_queries::db::datastore::DataStoreConnection; +use nexus_db_queries::db::datastore::DataStoreInventoryTest; use nexus_db_queries::db::datastore::InstanceAndActiveVmm; use nexus_db_queries::db::identity::Asset; use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::db::model::ServiceKind; use nexus_db_queries::db::DataStore; +use nexus_reconfigurator_preparation::policy_from_db; use nexus_test_utils::db::ALLOW_FULL_TABLE_SCAN_SQL; +use nexus_types::deployment::Blueprint; +use nexus_types::deployment::UnstableReconfiguratorState; use nexus_types::identity::Resource; use nexus_types::internal_api::params::DnsRecord; use nexus_types::internal_api::params::Srv; use nexus_types::inventory::CabooseWhich; use nexus_types::inventory::Collection; use nexus_types::inventory::RotPageWhich; +use omicron_common::address::NEXUS_REDUNDANCY; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Generation; +use omicron_common::api::external::LookupType; use omicron_common::api::external::MacAddr; use sled_agent_client::types::VolumeConstructionRequest; use std::borrow::Cow; @@ -175,6 +184,8 @@ enum DbCommands { Dns(DnsArgs), /// Print information about collected hardware/software inventory Inventory(InventoryArgs), + /// Save the current Reconfigurator inputs to a file + ReconfiguratorSave(ReconfiguratorSaveArgs), /// Print information about control plane services Services(ServicesArgs), /// Print information about sleds @@ -310,6 +321,12 @@ struct CollectionsShowArgs { show_long_strings: bool, } +#[derive(Debug, Args)] +struct ReconfiguratorSaveArgs { + /// where to save the output + output_file: Utf8PathBuf, +} + #[derive(Debug, Args)] struct ServicesArgs { #[command(subcommand)] @@ -464,6 +481,15 @@ impl DbArgs { ) .await } + DbCommands::ReconfiguratorSave(reconfig_save_args) => { + cmd_db_reconfigurator_save( + &opctx, + &datastore, + &self.fetch_opts, + reconfig_save_args, + ) + .await + } DbCommands::Services(ServicesArgs { command: ServicesCommands::ListInstances, }) => { @@ -3104,3 +3130,113 @@ impl LongStringFormatter { s.into() } } + +// Reconfigurator + +/// Packages up database state that's used as input to the Reconfigurator +/// planner into a file so that it can be loaded into `reconfigurator-cli` +async fn cmd_db_reconfigurator_save( + opctx: &OpContext, + datastore: &DataStore, + fetch_opts: &DbFetchOptions, + reconfig_save_args: &ReconfiguratorSaveArgs, +) -> Result<(), anyhow::Error> { + // See Nexus::blueprint_planning_context(). + eprint!("assembling policy ... "); + let sled_rows = datastore + .sled_list_all_batched(opctx) + .await + .context("listing sleds")?; + let zpool_rows = datastore + .zpool_list_all_external_batched(opctx) + .await + .context("listing zpools")?; + let ip_pool_range_rows = { + let (authz_service_ip_pool, _) = datastore + .ip_pools_service_lookup(opctx) + .await + .context("fetching IP services pool")?; + datastore + .ip_pool_list_ranges_batched(opctx, &authz_service_ip_pool) + .await + .context("listing services IP pool ranges")? + }; + + let policy = policy_from_db( + &sled_rows, + &zpool_rows, + &ip_pool_range_rows, + NEXUS_REDUNDANCY, + ) + .context("assembling policy")?; + eprintln!("done."); + + eprint!("loading inventory collections ... "); + let collection_ids = datastore + .inventory_collections() + .await + .context("listing collections")?; + let collections = futures::stream::iter(collection_ids) + .filter_map(|id| async move { + let read = datastore + .inventory_collection_read(opctx, id) + .await + .with_context(|| format!("reading collection {}", id)); + if let Err(error) = &read { + eprintln!("warning: {}", error); + } + read.ok() + }) + .collect::>() + .await; + eprintln!("done."); + + eprint!("loading blueprints ... "); + let limit = fetch_opts.fetch_limit; + let pagparams = DataPageParams { + marker: None, + direction: PaginationOrder::Ascending, + limit, + }; + let blueprint_ids = datastore + .blueprints_list(opctx, &pagparams) + .await + .context("listing blueprints")?; + check_limit(&blueprint_ids, limit, || "listing blueprint ids"); + let blueprints = futures::stream::iter(blueprint_ids) + .filter_map(|bpm| async move { + let blueprint_id = bpm.id; + let read = datastore + .blueprint_read( + opctx, + &nexus_db_queries::authz::Blueprint::new( + nexus_db_queries::authz::FLEET, + blueprint_id, + LookupType::ById(blueprint_id), + ), + ) + .await + .with_context(|| format!("reading blueprint {}", blueprint_id)); + if let Err(error) = &read { + eprintln!("warning: {}", error); + } + read.ok() + }) + .collect::>() + .await; + eprintln!("done."); + + let state = + UnstableReconfiguratorState { policy: policy, collections, blueprints }; + + let output_path = &reconfig_save_args.output_file; + let file = std::fs::OpenOptions::new() + .create_new(true) + .write(true) + .open(&output_path) + .with_context(|| format!("open {:?}", output_path))?; + serde_json::to_writer_pretty(&file, &state) + .with_context(|| format!("write {:?}", output_path))?; + eprintln!("wrote {}", output_path); + Ok(()) +} diff --git a/dev-tools/omdb/src/bin/omdb/nexus.rs b/dev-tools/omdb/src/bin/omdb/nexus.rs index 03d6fd6e80..692931db51 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus.rs @@ -869,39 +869,7 @@ async fn cmd_nexus_blueprints_show( .blueprint_view(&args.blueprint_id) .await .with_context(|| format!("fetching blueprint {}", args.blueprint_id))?; - println!("blueprint {}", blueprint.id); - println!( - "parent: {}", - blueprint - .parent_blueprint_id - .map(|u| u.to_string()) - .unwrap_or_else(|| String::from("")) - ); - println!( - "created by {}{}", - blueprint.creator, - if blueprint.creator.parse::().is_ok() { - " (likely a Nexus instance)" - } else { - "" - } - ); - println!( - "created at {}", - humantime::format_rfc3339_millis(blueprint.time_created.into(),) - ); - println!("comment: {}", blueprint.comment); - println!("zones:\n"); - for (sled_id, sled_zones) in &blueprint.omicron_zones { - println!( - " sled {}: Omicron zones at generation {}", - sled_id, sled_zones.generation - ); - for z in &sled_zones.zones { - println!(" {} {}", z.id, z.zone_type.label()); - } - } - + println!("{:?}", blueprint); Ok(()) } diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 3086c98f32..fe590acf55 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -58,6 +58,19 @@ stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable note: database schema version matches expected () ============================================= +EXECUTING COMMAND: omdb ["db", "reconfigurator-save", ""] +termination: Exited(0) +--------------------------------------------- +stdout: +--------------------------------------------- +stderr: +note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable +note: database schema version matches expected () +assembling policy ... done. +loading inventory collections ... done. +loading blueprints ... done. +wrote +============================================= EXECUTING COMMAND: omdb ["db", "services", "list-instances"] termination: Exited(0) --------------------------------------------- diff --git a/dev-tools/omdb/tests/test_all_output.rs b/dev-tools/omdb/tests/test_all_output.rs index dc681712eb..4a9802eee6 100644 --- a/dev-tools/omdb/tests/test_all_output.rs +++ b/dev-tools/omdb/tests/test_all_output.rs @@ -9,8 +9,11 @@ use expectorate::assert_contents; use nexus_test_utils_macros::nexus_test; +use nexus_types::deployment::UnstableReconfiguratorState; use omicron_test_utils::dev::test_cmds::path_to_executable; +use omicron_test_utils::dev::test_cmds::redact_variable; use omicron_test_utils::dev::test_cmds::run_command; +use slog_error_chain::InlineErrorChain; use std::fmt::Write; use std::path::Path; use subprocess::Exec; @@ -53,7 +56,7 @@ async fn test_omdb_usage_errors() { ]; for args in invocations { - do_run(&mut output, |exec| exec, &cmd_path, args).await; + do_run(&mut output, |exec| exec, &cmd_path, args, &[]).await; } assert_contents("tests/usage_errors.out", &output); @@ -71,13 +74,17 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { let nexus_internal_url = format!("http://{}/", cptestctx.internal_client.bind_address); let mgs_url = format!("http://{}/", gwtestctx.client.bind_address); + let tmpdir = camino_tempfile::tempdir() + .expect("failed to create temporary directory"); + let tmppath = tmpdir.path().join("reconfigurator-save.out"); let mut output = String::new(); - let invocations: &[&[&'static str]] = &[ + let invocations: &[&[&str]] = &[ &["db", "disks", "list"], &["db", "dns", "show"], &["db", "dns", "diff", "external", "2"], &["db", "dns", "names", "external", "2"], &["db", "instances"], + &["db", "reconfigurator-save", tmppath.as_str()], &["db", "services", "list-instances"], &["db", "services", "list-by-sled"], &["db", "sleds"], @@ -103,11 +110,35 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { }, &cmd_path, args, + &[tmppath.as_str()], ) .await; } assert_contents("tests/successes.out", &output); + + // The `reconfigurator-save` output is not easy to compare as a string. But + // let's make sure we can at least parse it and that it looks broadly like + // what we'd expect. + let generated = std::fs::read_to_string(&tmppath).unwrap_or_else(|error| { + panic!( + "failed to read temporary file containing reconfigurator-save \ + output: {:?}: {}", + tmppath, + InlineErrorChain::new(&error), + ) + }); + let parsed: UnstableReconfiguratorState = serde_json::from_str(&generated) + .unwrap_or_else(|error| { + panic!( + "failed to parse reconfigurator-save output (path {}): {}", + tmppath, + InlineErrorChain::new(&error), + ) + }); + assert!(parsed.policy.sleds.len() > 0); + assert!(parsed.collections.len() > 0); + gwtestctx.teardown().await; } @@ -132,7 +163,7 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { // Database URL // Case 1: specified on the command line let args = &["db", "--db-url", &postgres_url, "sleds"]; - do_run(&mut output, |exec| exec, &cmd_path, args).await; + do_run(&mut output, |exec| exec, &cmd_path, args, &[]).await; // Case 2: specified in multiple places (command-line argument wins) let args = &["db", "--db-url", "junk", "sleds"]; @@ -142,6 +173,7 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { move |exec| exec.env("OMDB_DB_URL", &p), &cmd_path, args, + &[], ) .await; @@ -154,7 +186,7 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { "background-tasks", "doc", ]; - do_run(&mut output, |exec| exec, &cmd_path.clone(), args).await; + do_run(&mut output, |exec| exec, &cmd_path.clone(), args, &[]).await; // Case 2: specified in multiple places (command-line argument wins) let args = @@ -165,6 +197,7 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { move |exec| exec.env("OMDB_NEXUS_URL", &n), &cmd_path, args, + &[], ) .await; @@ -177,6 +210,7 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { move |exec| exec.env("OMDB_DNS_SERVER", dns_sockaddr.to_string()), &cmd_path, args, + &[], ) .await; @@ -187,7 +221,7 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { "background-tasks", "doc", ]; - do_run(&mut output, move |exec| exec, &cmd_path, args).await; + do_run(&mut output, move |exec| exec, &cmd_path, args, &[]).await; let args = &["db", "sleds"]; do_run( @@ -195,11 +229,12 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { move |exec| exec.env("OMDB_DNS_SERVER", dns_sockaddr.to_string()), &cmd_path, args, + &[], ) .await; let args = &["--dns-server", &dns_sockaddr.to_string(), "db", "sleds"]; - do_run(&mut output, move |exec| exec, &cmd_path, args).await; + do_run(&mut output, move |exec| exec, &cmd_path, args, &[]).await; assert_contents("tests/env.out", &output); } @@ -209,6 +244,7 @@ async fn do_run( modexec: F, cmd_path: &Path, args: &[&str], + extra_redactions: &[&str], ) where F: FnOnce(Exec) -> Exec + Send + 'static, { @@ -217,7 +253,9 @@ async fn do_run( output, "EXECUTING COMMAND: {} {:?}\n", cmd_path.file_name().expect("missing command").to_string_lossy(), - args.iter().map(|r| redact_variable(r)).collect::>(), + args.iter() + .map(|r| redact_variable(r, extra_redactions)) + .collect::>(), ) .unwrap(); @@ -249,75 +287,9 @@ async fn do_run( write!(output, "termination: {:?}\n", exit_status).unwrap(); write!(output, "---------------------------------------------\n").unwrap(); write!(output, "stdout:\n").unwrap(); - output.push_str(&redact_variable(&stdout_text)); + output.push_str(&redact_variable(&stdout_text, extra_redactions)); write!(output, "---------------------------------------------\n").unwrap(); write!(output, "stderr:\n").unwrap(); - output.push_str(&redact_variable(&stderr_text)); + output.push_str(&redact_variable(&stderr_text, extra_redactions)); write!(output, "=============================================\n").unwrap(); } - -/// Redacts text from stdout/stderr that may change from invocation to invocation -/// (e.g., assigned TCP port numbers, timestamps) -/// -/// This allows use to use expectorate to verify the shape of the CLI output. -fn redact_variable(input: &str) -> String { - // Replace TCP port numbers. We include the localhost characters to avoid - // catching any random sequence of numbers. - let s = regex::Regex::new(r"\[::1\]:\d{4,5}") - .unwrap() - .replace_all(input, "[::1]:REDACTED_PORT") - .to_string(); - let s = regex::Regex::new(r"\[::ffff:127.0.0.1\]:\d{4,5}") - .unwrap() - .replace_all(&s, "[::ffff:127.0.0.1]:REDACTED_PORT") - .to_string(); - let s = regex::Regex::new(r"127\.0\.0\.1:\d{4,5}") - .unwrap() - .replace_all(&s, "127.0.0.1:REDACTED_PORT") - .to_string(); - - // Replace uuids. - let s = regex::Regex::new( - "[a-zA-Z0-9]{8}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{4}-\ - [a-zA-Z0-9]{4}-[a-zA-Z0-9]{12}", - ) - .unwrap() - .replace_all(&s, "REDACTED_UUID_REDACTED_UUID_REDACTED") - .to_string(); - - // Replace timestamps. - let s = regex::Regex::new(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z") - .unwrap() - .replace_all(&s, "") - .to_string(); - - let s = regex::Regex::new(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z") - .unwrap() - .replace_all(&s, "") - .to_string(); - - // Replace formatted durations. These are pretty specific to the background - // task output. - let s = regex::Regex::new(r"\d+s ago") - .unwrap() - .replace_all(&s, "s ago") - .to_string(); - - let s = regex::Regex::new(r"\d+ms") - .unwrap() - .replace_all(&s, "ms") - .to_string(); - - let s = regex::Regex::new( - r"note: database schema version matches expected \(\d+\.\d+\.\d+\)", - ) - .unwrap() - .replace_all( - &s, - "note: database schema version matches expected \ - ()", - ) - .to_string(); - - s -} diff --git a/dev-tools/omdb/tests/usage_errors.out b/dev-tools/omdb/tests/usage_errors.out index 2f9001671d..b704982266 100644 --- a/dev-tools/omdb/tests/usage_errors.out +++ b/dev-tools/omdb/tests/usage_errors.out @@ -94,17 +94,18 @@ Query the control plane database (CockroachDB) Usage: omdb db [OPTIONS] Commands: - rack Print information about the rack - disks Print information about disks - dns Print information about internal and external DNS - inventory Print information about collected hardware/software inventory - services Print information about control plane services - sleds Print information about sleds - instances Print information about customer instances - network Print information about the network - snapshots Print information about snapshots - validate Validate the contents of the database - help Print this message or the help of the given subcommand(s) + rack Print information about the rack + disks Print information about disks + dns Print information about internal and external DNS + inventory Print information about collected hardware/software inventory + reconfigurator-save Save the current Reconfigurator inputs to a file + services Print information about control plane services + sleds Print information about sleds + instances Print information about customer instances + network Print information about the network + snapshots Print information about snapshots + validate Validate the contents of the database + help Print this message or the help of the given subcommand(s) Options: --db-url URL of the database SQL interface [env: OMDB_DB_URL=] @@ -123,17 +124,18 @@ Query the control plane database (CockroachDB) Usage: omdb db [OPTIONS] Commands: - rack Print information about the rack - disks Print information about disks - dns Print information about internal and external DNS - inventory Print information about collected hardware/software inventory - services Print information about control plane services - sleds Print information about sleds - instances Print information about customer instances - network Print information about the network - snapshots Print information about snapshots - validate Validate the contents of the database - help Print this message or the help of the given subcommand(s) + rack Print information about the rack + disks Print information about disks + dns Print information about internal and external DNS + inventory Print information about collected hardware/software inventory + reconfigurator-save Save the current Reconfigurator inputs to a file + services Print information about control plane services + sleds Print information about sleds + instances Print information about customer instances + network Print information about the network + snapshots Print information about snapshots + validate Validate the contents of the database + help Print this message or the help of the given subcommand(s) Options: --db-url URL of the database SQL interface [env: OMDB_DB_URL=] diff --git a/dev-tools/reconfigurator-cli/Cargo.toml b/dev-tools/reconfigurator-cli/Cargo.toml new file mode 100644 index 0000000000..8a8ea85544 --- /dev/null +++ b/dev-tools/reconfigurator-cli/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "reconfigurator-cli" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[build-dependencies] +omicron-rpaths.workspace = true + +[dependencies] +anyhow.workspace = true +camino.workspace = true +clap.workspace = true +dropshot.workspace = true +humantime.workspace = true +indexmap.workspace = true +nexus-reconfigurator-planning.workspace = true +nexus-types.workspace = true +omicron-common.workspace = true +reedline.workspace = true +serde_json.workspace = true +slog-error-chain.workspace = true +slog.workspace = true +swrite.workspace = true +tabled.workspace = true +uuid.workspace = true +omicron-workspace-hack.workspace = true + +[dev-dependencies] +camino-tempfile.workspace = true +expectorate.workspace = true +omicron-test-utils.workspace = true +regex.workspace = true +subprocess.workspace = true + +# Disable doc builds by default for our binaries to work around issue +# rust-lang/cargo#8373. These docs would not be very useful anyway. +[[bin]] +name = "reconfigurator-cli" +doc = false diff --git a/dev-tools/reconfigurator-cli/src/main.rs b/dev-tools/reconfigurator-cli/src/main.rs new file mode 100644 index 0000000000..9cdf09a6f9 --- /dev/null +++ b/dev-tools/reconfigurator-cli/src/main.rs @@ -0,0 +1,768 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! developer REPL for driving blueprint planning + +use anyhow::{anyhow, bail, Context}; +use camino::Utf8PathBuf; +use clap::CommandFactory; +use clap::FromArgMatches; +use clap::{Args, Parser, Subcommand}; +use indexmap::IndexMap; +use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; +use nexus_reconfigurator_planning::planner::Planner; +use nexus_reconfigurator_planning::system::{ + SledBuilder, SledHwInventory, SystemDescription, +}; +use nexus_types::deployment::{Blueprint, UnstableReconfiguratorState}; +use nexus_types::inventory::Collection; +use nexus_types::inventory::OmicronZonesConfig; +use omicron_common::api::external::Generation; +use reedline::{Reedline, Signal}; +use std::io::BufRead; +use swrite::{swriteln, SWrite}; +use tabled::Tabled; +use uuid::Uuid; + +/// REPL state +#[derive(Debug)] +struct ReconfiguratorSim { + /// describes the sleds in the system + /// + /// This resembles what we get from the `sled` table in a real system. It + /// also contains enough information to generate inventory collections that + /// describe the system. + system: SystemDescription, + + /// inventory collections created by the user + collections: IndexMap, + + /// blueprints created by the user + blueprints: IndexMap, + + log: slog::Logger, +} + +/// interactive REPL for exploring the planner +#[derive(Parser, Debug)] +struct CmdReconfiguratorSim { + input_file: Option, +} + +// REPL implementation + +fn main() -> anyhow::Result<()> { + let cmd = CmdReconfiguratorSim::parse(); + + let log = dropshot::ConfigLogging::StderrTerminal { + level: dropshot::ConfigLoggingLevel::Debug, + } + .to_logger("reconfigurator-sim") + .context("creating logger")?; + + let mut sim = ReconfiguratorSim { + system: SystemDescription::new(), + collections: IndexMap::new(), + blueprints: IndexMap::new(), + log, + }; + + if let Some(input_file) = cmd.input_file { + let file = std::fs::File::open(&input_file) + .with_context(|| format!("open {:?}", &input_file))?; + let bufread = std::io::BufReader::new(file); + for maybe_buffer in bufread.lines() { + let buffer = maybe_buffer + .with_context(|| format!("read {:?}", &input_file))?; + println!("> {}", buffer); + match process_entry(&mut sim, buffer) { + LoopResult::Continue => (), + LoopResult::Bail(error) => return Err(error), + } + println!(""); + } + } else { + let mut ed = Reedline::create(); + let prompt = reedline::DefaultPrompt::new( + reedline::DefaultPromptSegment::Empty, + reedline::DefaultPromptSegment::Empty, + ); + loop { + match ed.read_line(&prompt) { + Ok(Signal::Success(buffer)) => { + match process_entry(&mut sim, buffer) { + LoopResult::Continue => (), + LoopResult::Bail(error) => return Err(error), + } + } + Ok(Signal::CtrlD) | Ok(Signal::CtrlC) => break, + Err(error) => { + bail!("reconfigurator-cli: unexpected error: {:#}", error); + } + } + } + } + + Ok(()) +} + +/// Describes next steps after evaluating one "line" of user input +/// +/// This could just be `Result`, but it's easy to misuse that here because +/// _commands_ might fail all the time without needing to bail out of the REPL. +/// We use a separate type for clarity about what success/failure actually +/// means. +enum LoopResult { + /// Show the prompt and accept another command + Continue, + + /// Exit the REPL with a fatal error + Bail(anyhow::Error), +} + +/// Processes one "line" of user input. +fn process_entry(sim: &mut ReconfiguratorSim, entry: String) -> LoopResult { + // If no input was provided, take another lap (print the prompt and accept + // another line). This gets handled specially because otherwise clap would + // treat this as a usage error and print a help message, which isn't what we + // want here. + if entry.trim().is_empty() { + return LoopResult::Continue; + } + + // Parse the line of input as a REPL command. + // + // Using `split_whitespace()` like this is going to be a problem if we ever + // want to support arguments with whitespace in them (using quotes). But + // it's good enough for now. + let parts = entry.split_whitespace(); + let parsed_command = TopLevelArgs::command() + .multicall(true) + .try_get_matches_from(parts) + .and_then(|matches| TopLevelArgs::from_arg_matches(&matches)); + let command = match parsed_command { + Err(error) => { + // We failed to parse the command. Print the error. + return match error.print() { + // Assuming that worked, just take another lap. + Ok(_) => LoopResult::Continue, + // If we failed to even print the error, that itself is a fatal + // error. + Err(error) => LoopResult::Bail( + anyhow!(error).context("printing previous error"), + ), + }; + } + Ok(TopLevelArgs { command }) => command, + }; + + // Dispatch to the command's handler. + let cmd_result = match command { + Commands::SledList => cmd_sled_list(sim), + Commands::SledAdd(args) => cmd_sled_add(sim, args), + Commands::SledShow(args) => cmd_sled_show(sim, args), + Commands::InventoryList => cmd_inventory_list(sim), + Commands::InventoryGenerate => cmd_inventory_generate(sim), + Commands::BlueprintList => cmd_blueprint_list(sim), + Commands::BlueprintFromInventory(args) => { + cmd_blueprint_from_inventory(sim, args) + } + Commands::BlueprintPlan(args) => cmd_blueprint_plan(sim, args), + Commands::BlueprintShow(args) => cmd_blueprint_show(sim, args), + Commands::BlueprintDiff(args) => cmd_blueprint_diff(sim, args), + Commands::BlueprintDiffInventory(args) => { + cmd_blueprint_diff_inventory(sim, args) + } + Commands::Load(args) => cmd_load(sim, args), + Commands::FileContents(args) => cmd_file_contents(args), + Commands::Save(args) => cmd_save(sim, args), + }; + + match cmd_result { + Err(error) => println!("error: {:#}", error), + Ok(Some(s)) => println!("{}", s), + Ok(None) => (), + } + + LoopResult::Continue +} + +// clap configuration for the REPL commands + +/// reconfigurator-sim: simulate blueprint planning and execution +#[derive(Debug, Parser)] +struct TopLevelArgs { + #[command(subcommand)] + command: Commands, +} + +#[derive(Debug, Subcommand)] +enum Commands { + /// list sleds + SledList, + /// add a new sled + SledAdd(SledAddArgs), + /// show details about one sled + SledShow(SledArgs), + + /// list all inventory collections + InventoryList, + /// generates an inventory collection from the configured sleds + InventoryGenerate, + + /// list all blueprints + BlueprintList, + /// generate a blueprint that represents the contents of an inventory + BlueprintFromInventory(InventoryArgs), + /// run planner to generate a new blueprint + BlueprintPlan(BlueprintPlanArgs), + /// show details about a blueprint + BlueprintShow(BlueprintArgs), + /// show differences between two blueprints + BlueprintDiff(BlueprintDiffArgs), + /// show differences between a blueprint and an inventory collection + BlueprintDiffInventory(BlueprintDiffInventoryArgs), + + /// save state to a file + Save(SaveArgs), + /// load state from a file + Load(LoadArgs), + /// show information about what's in a saved file + FileContents(FileContentsArgs), +} + +#[derive(Debug, Args)] +struct SledAddArgs { + /// id of the new sled + sled_id: Option, +} + +#[derive(Debug, Args)] +struct SledArgs { + /// id of the sled + sled_id: Uuid, +} + +#[derive(Debug, Args)] +struct InventoryArgs { + /// id of the inventory collection to use in planning + collection_id: Uuid, +} + +#[derive(Debug, Args)] +struct BlueprintPlanArgs { + /// id of the blueprint on which this one will be based + parent_blueprint_id: Uuid, + /// id of the inventory collection to use in planning + collection_id: Uuid, +} + +#[derive(Debug, Args)] +struct BlueprintArgs { + /// id of the blueprint + blueprint_id: Uuid, +} + +#[derive(Debug, Args)] +struct BlueprintDiffInventoryArgs { + /// id of the inventory collection + collection_id: Uuid, + /// id of the blueprint + blueprint_id: Uuid, +} + +#[derive(Debug, Args)] +struct BlueprintDiffArgs { + /// id of the first blueprint + blueprint1_id: Uuid, + /// id of the second blueprint + blueprint2_id: Uuid, +} + +#[derive(Debug, Args)] +struct LoadArgs { + /// input file + filename: Utf8PathBuf, + + /// id of inventory collection to use for sled details + /// (may be omitted only if the file contains only one collection) + collection_id: Option, +} + +#[derive(Debug, Args)] +struct FileContentsArgs { + /// input file + filename: Utf8PathBuf, +} + +#[derive(Debug, Args)] +struct SaveArgs { + /// output file + filename: Utf8PathBuf, +} + +// Command handlers + +fn cmd_sled_list( + sim: &mut ReconfiguratorSim, +) -> anyhow::Result> { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct Sled { + id: Uuid, + nzpools: usize, + subnet: String, + } + + let policy = sim.system.to_policy().context("failed to generate policy")?; + let rows = policy.sleds.iter().map(|(sled_id, sled_resources)| Sled { + id: *sled_id, + subnet: sled_resources.subnet.net().to_string(), + nzpools: sled_resources.zpools.len(), + }); + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + Ok(Some(table)) +} + +fn cmd_sled_add( + sim: &mut ReconfiguratorSim, + add: SledAddArgs, +) -> anyhow::Result> { + let mut new_sled = SledBuilder::new(); + if let Some(sled_id) = add.sled_id { + new_sled = new_sled.id(sled_id); + } + + let _ = sim.system.sled(new_sled).context("adding sled")?; + Ok(Some(String::from("added sled"))) +} + +fn cmd_sled_show( + sim: &mut ReconfiguratorSim, + args: SledArgs, +) -> anyhow::Result> { + let policy = sim.system.to_policy().context("failed to generate policy")?; + let sled_id = args.sled_id; + let sled_resources = policy + .sleds + .get(&sled_id) + .ok_or_else(|| anyhow!("no sled with id {:?}", sled_id))?; + let mut s = String::new(); + swriteln!(s, "sled {}", sled_id); + swriteln!(s, "subnet {}", sled_resources.subnet.net()); + swriteln!(s, "zpools ({}):", sled_resources.zpools.len()); + for z in &sled_resources.zpools { + swriteln!(s, " {:?}", z); + } + Ok(Some(s)) +} + +fn cmd_inventory_list( + sim: &mut ReconfiguratorSim, +) -> anyhow::Result> { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct InventoryRow { + id: Uuid, + nerrors: usize, + time_done: String, + } + + let rows = sim.collections.values().map(|collection| { + let id = collection.id; + InventoryRow { + id, + nerrors: collection.errors.len(), + time_done: humantime::format_rfc3339_millis( + collection.time_done.into(), + ) + .to_string(), + } + }); + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + Ok(Some(table)) +} + +fn cmd_inventory_generate( + sim: &mut ReconfiguratorSim, +) -> anyhow::Result> { + let mut builder = + sim.system.to_collection_builder().context("generating inventory")?; + // For an inventory we just generated from thin air, pretend like each sled + // has no zones on it. + let sled_ids = sim.system.to_policy().unwrap().sleds.into_keys(); + for sled_id in sled_ids { + builder + .found_sled_omicron_zones( + "fake sled agent", + sled_id, + OmicronZonesConfig { + generation: Generation::new(), + zones: vec![], + }, + ) + .context("recording Omicron zones")?; + } + let inventory = builder.build(); + let rv = format!( + "generated inventory collection {} from configured sleds", + inventory.id + ); + sim.collections.insert(inventory.id, inventory); + Ok(Some(rv)) +} + +fn cmd_blueprint_list( + sim: &mut ReconfiguratorSim, +) -> anyhow::Result> { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct BlueprintRow { + id: Uuid, + } + + let rows = sim + .blueprints + .values() + .map(|blueprint| BlueprintRow { id: blueprint.id }); + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + Ok(Some(table)) +} + +fn cmd_blueprint_from_inventory( + sim: &mut ReconfiguratorSim, + args: InventoryArgs, +) -> anyhow::Result> { + let collection_id = args.collection_id; + let collection = sim + .collections + .get(&collection_id) + .ok_or_else(|| anyhow!("no such collection: {}", collection_id))?; + let dns_version = Generation::new(); + let policy = sim.system.to_policy().context("generating policy")?; + let creator = "reconfigurator-sim"; + let blueprint = BlueprintBuilder::build_initial_from_collection( + collection, + dns_version, + &policy, + creator, + ) + .context("building collection")?; + let rv = format!( + "generated blueprint {} from inventory collection {}", + blueprint.id, collection_id + ); + sim.blueprints.insert(blueprint.id, blueprint); + Ok(Some(rv)) +} + +fn cmd_blueprint_plan( + sim: &mut ReconfiguratorSim, + args: BlueprintPlanArgs, +) -> anyhow::Result> { + let parent_blueprint_id = args.parent_blueprint_id; + let collection_id = args.collection_id; + let parent_blueprint = sim + .blueprints + .get(&parent_blueprint_id) + .ok_or_else(|| anyhow!("no such blueprint: {}", parent_blueprint_id))?; + let collection = sim + .collections + .get(&collection_id) + .ok_or_else(|| anyhow!("no such collection: {}", collection_id))?; + let dns_version = Generation::new(); + let policy = sim.system.to_policy().context("generating policy")?; + let creator = "reconfigurator-sim"; + let planner = Planner::new_based_on( + sim.log.clone(), + parent_blueprint, + dns_version, + &policy, + creator, + collection, + ) + .context("creating planner")?; + let blueprint = planner.plan().context("generating blueprint")?; + let rv = format!( + "generated blueprint {} based on parent blueprint {}", + blueprint.id, parent_blueprint_id, + ); + sim.blueprints.insert(blueprint.id, blueprint); + Ok(Some(rv)) +} + +fn cmd_blueprint_show( + sim: &mut ReconfiguratorSim, + args: BlueprintArgs, +) -> anyhow::Result> { + let blueprint = sim + .blueprints + .get(&args.blueprint_id) + .ok_or_else(|| anyhow!("no such blueprint: {}", args.blueprint_id))?; + Ok(Some(format!("{:?}", blueprint))) +} + +fn cmd_blueprint_diff( + sim: &mut ReconfiguratorSim, + args: BlueprintDiffArgs, +) -> anyhow::Result> { + let blueprint1_id = args.blueprint1_id; + let blueprint2_id = args.blueprint2_id; + let blueprint1 = sim + .blueprints + .get(&blueprint1_id) + .ok_or_else(|| anyhow!("no such blueprint: {}", blueprint1_id))?; + let blueprint2 = sim + .blueprints + .get(&blueprint2_id) + .ok_or_else(|| anyhow!("no such blueprint: {}", blueprint2_id))?; + + let diff = blueprint1.diff_sleds(&blueprint2); + Ok(Some(diff.to_string())) +} + +fn cmd_blueprint_diff_inventory( + sim: &mut ReconfiguratorSim, + args: BlueprintDiffInventoryArgs, +) -> anyhow::Result> { + let collection_id = args.collection_id; + let blueprint_id = args.blueprint_id; + let collection = sim.collections.get(&collection_id).ok_or_else(|| { + anyhow!("no such inventory collection: {}", collection_id) + })?; + let blueprint = sim + .blueprints + .get(&blueprint_id) + .ok_or_else(|| anyhow!("no such blueprint: {}", blueprint_id))?; + + let zones = collection.all_omicron_zones().map(|z| z.id).collect(); + let diff = blueprint.diff_sleds_from_collection(&collection, &zones); + Ok(Some(diff.to_string())) +} + +fn cmd_save( + sim: &mut ReconfiguratorSim, + args: SaveArgs, +) -> anyhow::Result> { + let policy = sim.system.to_policy().context("creating policy")?; + let saved = UnstableReconfiguratorState { + policy, + collections: sim.collections.values().cloned().collect(), + blueprints: sim.blueprints.values().cloned().collect(), + }; + + let output_path = &args.filename; + let outfile = std::fs::OpenOptions::new() + .create_new(true) + .write(true) + .open(output_path) + .with_context(|| format!("open {:?}", output_path))?; + serde_json::to_writer_pretty(&outfile, &saved) + .with_context(|| format!("writing to {:?}", output_path)) + .unwrap_or_else(|e| panic!("{:#}", e)); + Ok(Some(format!( + "saved policy, collections, and blueprints to {:?}", + output_path + ))) +} + +fn read_file( + input_path: &camino::Utf8Path, +) -> anyhow::Result { + let file = std::fs::File::open(input_path) + .with_context(|| format!("open {:?}", input_path))?; + serde_json::from_reader(file) + .with_context(|| format!("read {:?}", input_path)) +} + +fn cmd_load( + sim: &mut ReconfiguratorSim, + args: LoadArgs, +) -> anyhow::Result> { + let input_path = args.filename; + let collection_id = args.collection_id; + let loaded = read_file(&input_path)?; + + let mut s = String::new(); + + let collection_id = match collection_id { + Some(s) => s, + None => match loaded.collections.len() { + 1 => loaded.collections[0].id, + 0 => bail!( + "no collection_id specified and file contains 0 collections" + ), + count => bail!( + "no collection_id specified and file contains {} \ + collections: {}", + count, + loaded + .collections + .iter() + .map(|c| c.id.to_string()) + .collect::>() + .join(", ") + ), + }, + }; + + swriteln!( + s, + "using collection {} as source of sled inventory data", + collection_id + ); + let primary_collection = + loaded.collections.iter().find(|c| c.id == collection_id).ok_or_else( + || { + anyhow!( + "collection {} not found in file {:?}", + collection_id, + input_path + ) + }, + )?; + + let current_policy = sim.system.to_policy().context("generating policy")?; + for (sled_id, sled_resources) in loaded.policy.sleds { + if current_policy.sleds.contains_key(&sled_id) { + swriteln!( + s, + "sled {}: skipped (one with \ + the same id is already loaded)", + sled_id + ); + continue; + } + + let Some(inventory_sled_agent) = + primary_collection.sled_agents.get(&sled_id) + else { + swriteln!( + s, + "error: load sled {}: no inventory found for sled agent in \ + collection {}", + sled_id, + collection_id + ); + continue; + }; + + let inventory_sp = match &inventory_sled_agent.baseboard_id { + Some(baseboard_id) => { + let inv_sp = primary_collection + .sps + .get(baseboard_id) + .ok_or_else(|| { + anyhow!( + "error: load sled {}: missing SP inventory", + sled_id + ) + })?; + let inv_rot = primary_collection + .rots + .get(baseboard_id) + .ok_or_else(|| { + anyhow!( + "error: load sled {}: missing RoT inventory", + sled_id + ) + })?; + Some(SledHwInventory { baseboard_id, sp: inv_sp, rot: inv_rot }) + } + None => None, + }; + + let result = sim.system.sled_full( + sled_id, + sled_resources, + inventory_sp, + inventory_sled_agent, + ); + + match result { + Ok(_) => swriteln!(s, "sled {} loaded", sled_id), + Err(error) => { + swriteln!(s, "error: load sled {}: {:#}", sled_id, error) + } + }; + } + + for collection in loaded.collections { + if sim.collections.contains_key(&collection.id) { + swriteln!( + s, + "collection {}: skipped (one with the \ + same id is already loaded)", + collection.id + ); + } else { + swriteln!(s, "collection {} loaded", collection.id); + sim.collections.insert(collection.id, collection); + } + } + + for blueprint in loaded.blueprints { + if sim.blueprints.contains_key(&blueprint.id) { + swriteln!( + s, + "blueprint {}: skipped (one with the \ + same id is already loaded)", + blueprint.id + ); + } else { + swriteln!(s, "blueprint {} loaded", blueprint.id); + sim.blueprints.insert(blueprint.id, blueprint); + } + } + + swriteln!(s, "loaded data from {:?}", input_path); + Ok(Some(s)) +} + +fn cmd_file_contents(args: FileContentsArgs) -> anyhow::Result> { + let loaded = read_file(&args.filename)?; + + let mut s = String::new(); + + for (sled_id, sled_resources) in loaded.policy.sleds { + swriteln!( + s, + "sled: {} (subnet: {}, zpools: {})", + sled_id, + sled_resources.subnet.net(), + sled_resources.zpools.len() + ); + } + + for collection in loaded.collections { + swriteln!( + s, + "collection: {} (errors: {}, completed at: {})", + collection.id, + collection.errors.len(), + humantime::format_rfc3339_millis(collection.time_done.into()) + .to_string(), + ); + } + + for blueprint in loaded.blueprints { + swriteln!( + s, + "blueprint: {} (created at: {})", + blueprint.id, + blueprint.time_created + ); + } + + Ok(Some(s)) +} diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds.txt b/dev-tools/reconfigurator-cli/tests/input/cmds.txt new file mode 100644 index 0000000000..3997812e55 --- /dev/null +++ b/dev-tools/reconfigurator-cli/tests/input/cmds.txt @@ -0,0 +1,14 @@ +sled-list +inventory-list +blueprint-list + +sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a +sled-add dde1c0e2-b10d-4621-b420-f179f7a7a00a +sled-list +sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a +sled-add 90c1102a-b9f5-4d88-92a2-60d54a2d98cc +sled-add 04ef3330-c682-4a08-8def-fcc4bef31bcd +sled-list + +inventory-generate +inventory-list diff --git a/dev-tools/reconfigurator-cli/tests/input/complex.json b/dev-tools/reconfigurator-cli/tests/input/complex.json new file mode 100644 index 0000000000..c168e153c1 --- /dev/null +++ b/dev-tools/reconfigurator-cli/tests/input/complex.json @@ -0,0 +1,11558 @@ +{ + "policy": { + "sleds": { + "1762267a-a0cb-4c9a-88b4-8ce828cbc021": { + "policy": { + "kind": "in_service", + "provision_policy": "provisionable" + }, + "state": "active", + "zpools": [ + "oxp_1a4447be-5c84-4dad-b22c-d35e5e1911b0", + "oxp_1d864940-c723-4f58-82e8-b03772b42356", + "oxp_3f95c036-d5d8-4d08-8c78-c0635ca34698", + "oxp_466351db-2ced-45fa-9431-22de1c4bd480", + "oxp_88d39560-b091-4fad-bad0-b0b1c514033b", + "oxp_88ddd98f-3181-4797-864a-7e8d5b028657", + "oxp_9041d780-b5ee-4159-a1db-8c4075e2176d", + "oxp_e8d6b9a2-bad4-43f0-b5c0-0618555a7f8c", + "oxp_eae1515c-cd4a-4d7b-9a99-dbc24cf2e337" + ], + "subnet": { + "net": "fd00:1122:3344:101::/64" + } + }, + "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f": { + "policy": { + "kind": "in_service", + "provision_policy": "provisionable" + }, + "state": "active", + "zpools": [ + "oxp_0622b2cf-297e-4356-b94c-d74309f443ba", + "oxp_276822e2-d7ce-4eae-b5a4-38d2e8586a37", + "oxp_2b0da7bc-2afc-4a16-95e1-5126bcb9b6b1", + "oxp_574728ca-1559-48f3-a46c-1e608966e6a0", + "oxp_6d2b29c5-aa58-40b7-b630-88301f08e945", + "oxp_8c7a7d84-c269-4178-9dbb-37f363e7958c", + "oxp_cc36b0cc-818a-4c7f-b50d-057ff7e7e8e3", + "oxp_e4a4a09a-43b8-460c-bc43-1f935600b681" + ], + "subnet": { + "net": "fd00:1122:3344:121::/64" + } + }, + "a6634f64-f6fb-426c-b00b-9b30aed9f53a": { + "policy": { + "kind": "in_service", + "provision_policy": "provisionable" + }, + "state": "active", + "zpools": [ + "oxp_02cc2369-9924-41c7-aa74-79d6d548f2dd", + "oxp_0c07bcc1-7666-4754-881d-4135992ff561", + "oxp_1fb80902-d979-461d-9c6b-a6541a5359c9", + "oxp_4e92cbcf-f194-4ffc-97e3-fdf3df7cc9ae", + "oxp_6a275151-d85f-488b-81a6-e558fdede658", + "oxp_742699b5-4ded-406d-9a08-24648d3b2efb", + "oxp_7929b5fb-4ef2-4791-92e6-1a46c7f608b9", + "oxp_93e4e338-0ad1-4ae1-a120-2cfb59b4df42", + "oxp_f9a037da-7d15-40d9-957d-7e42aadb333c", + "oxp_fe74ae4d-d7bc-4293-9ec9-69132aca4aba" + ], + "subnet": { + "net": "fd00:1122:3344:102::/64" + } + }, + "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89": { + "policy": { + "kind": "in_service", + "provision_policy": "provisionable" + }, + "state": "active", + "zpools": [ + "oxp_07d66b6b-8059-4d07-a0b3-8ce41e2a8c98", + "oxp_4ffc6336-04d1-4f21-9c27-a95aa640f960", + "oxp_5b8b10a4-d527-4176-a049-1e9c2a10edea", + "oxp_6aa59516-2018-4494-af6b-46704b20f6dc", + "oxp_7eabbdcc-28a3-4af9-bc4a-8d9b745d4df3", + "oxp_83a60822-1299-471c-bc79-3be0f87cf7c3", + "oxp_c8feed09-6556-4409-af8e-563c97d556eb", + "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a", + "oxp_e2244950-dda0-4dfa-8b63-5ffac16c5a62", + "oxp_f6813b0c-ab5a-4fd0-8e24-95de9b65280d" + ], + "subnet": { + "net": "fd00:1122:3344:103::/64" + } + } + }, + "service_ip_pool_ranges": [ + { + "first": "172.20.28.1", + "last": "172.20.28.10" + } + ], + "target_nexus_zone_count": 3 + }, + "collections": [ + { + "id": "83ed3949-d221-44f3-a901-02e9ff16d2a5", + "errors": [ + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page Cmpa: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"d7328126-995e-437c-b92e-c6c220d508b2\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:17:09 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"d7328126-995e-437c-b92e-c6c220d508b2\" }", + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page CfpaActive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"6a94be62-a0b1-4c63-b54c-7083266de0ab\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:17:09 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"6a94be62-a0b1-4c63-b54c-7083266de0ab\" }", + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page CfpaInactive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"858a0aad-c3df-4cdf-9c20-be6b1effdb20\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:17:09 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"858a0aad-c3df-4cdf-9c20-be6b1effdb20\" }", + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page CfpaScratch: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"40a5f1ae-5e03-41ec-8c45-6b8a83ce7ad4\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:17:09 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"40a5f1ae-5e03-41ec-8c45-6b8a83ce7ad4\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page Cmpa: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"fe9383e3-5b7c-45f2-bec6-c1f5df8caaa4\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:17:09 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"fe9383e3-5b7c-45f2-bec6-c1f5df8caaa4\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page CfpaActive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"9c467503-9155-4d79-b048-aea0c4153f9e\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:17:09 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"9c467503-9155-4d79-b048-aea0c4153f9e\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page CfpaInactive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"d397fe11-ec68-4a1f-9739-e42313158ae4\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:17:09 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"d397fe11-ec68-4a1f-9739-e42313158ae4\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page CfpaScratch: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"db83ac82-ec73-4914-adb7-6a243409702c\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:17:09 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"db83ac82-ec73-4914-adb7-6a243409702c\" }" + ], + "time_started": "2024-03-01T19:17:06.797656Z", + "time_done": "2024-03-01T19:17:10.594988Z", + "collector": "7f83d784-9c5c-498f-b559-4176bbafbc4b", + "baseboards": [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + } + ], + "cabooses": [ + { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + }, + { + "board": "gimlet-c", + "git_commit": "620f18f924d180d13a0577bd51d7c48c1d670549", + "name": "gimlet-c", + "version": "1.0.5" + }, + { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + }, + { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + }, + { + "board": "oxide-rot-1", + "git_commit": "08f5a258dd48728cfaae68fcc7fbd56c7359329c", + "name": "oxide-rot-1", + "version": "1.0.4" + }, + { + "board": "oxide-rot-1", + "git_commit": "4b2a3469ee63f14e237ed26d741c49e8cc27261f", + "name": "oxide-rot-1", + "version": "0.0.0-git" + }, + { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + }, + { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1", + "version": "1.0.0" + }, + { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1-dev", + "version": "0.0.0-git" + }, + { + "board": "sidecar-b", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "sidecar-b", + "version": "1.0.6" + }, + { + "board": "sidecar-b", + "git_commit": "a096c4c17b17772c72dbfb03e0b9c96e59d9a0fe", + "name": "sidecar-b", + "version": "0.0.0-git" + }, + { + "board": "sidecar-b", + "git_commit": "f04c427514943573272da3524aa4d976d069ac95", + "name": "sidecar-b", + "version": "0.0.0-git" + } + ], + "rot_pages": [ + { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + }, + { + "data_base64": "AAAAABgAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAa0EBHkNRkpjbiUrOzKdmz2xKt2ubIbVZ4ALX8/sXkig=" + }, + { + "data_base64": "AAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6n5IZG1N0E70rdhMMrS3k04zLw+wi+p0BE00p4LJpio=" + }, + { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + }, + { + "data_base64": "AAAAADYAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKR50f2Jw5Dyd7j2FPBE4r77CMk4azEk2sBLHfrvOts=" + }, + { + "data_base64": "AAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAT9AkBD3h7E4HX3ag0CWYdTvYhAwH5B7l6KL6QI/wPEQ=" + }, + { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + ], + "sps": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:17:09.327993Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "switch", + "sp_slot": 0, + "baseboard_revision": 4, + "hubris_archive": "9184be93590e655e", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:17:09.913395Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "sp_type": "switch", + "sp_slot": 1, + "baseboard_revision": 4, + "hubris_archive": "9184be93590e655e", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:08.168996Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 17, + "baseboard_revision": 6, + "hubris_archive": "5c745f4b4fa66f47", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:08.746433Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 15, + "baseboard_revision": 6, + "hubris_archive": "5c745f4b4fa66f47", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:07.575660Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 14, + "baseboard_revision": 6, + "hubris_archive": "5c745f4b4fa66f47", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:06.810258Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 16, + "baseboard_revision": 6, + "hubris_archive": "3c670b44fdeef4af", + "power_state": "A0" + } + ] + ], + "rots": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:17:09.327993Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "b" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "4599e4d7bc9b2164e8c0f0f7ce4bcae2bd35d635c8450854ea9a94ce49a2ca0f", + "slot_b_sha3_256_digest": "1e42a0761e5bd1d9f9a5e2c44acc34e7b7e007792977cdb0523d271db2e80c8c" + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:17:09.913395Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "active_slot": { + "slot": "b" + }, + "persistent_boot_preference": { + "slot": "b" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "92e07077972c83af6872ec817fb88d74a36f6956276505e9771302c5bb7ec2a8", + "slot_b_sha3_256_digest": "fb25825eaf97f8f2307afe3caebf0b70865499f6d116db3aff382c505b4d8dd7" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:08.168996Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "a" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "96caca4fb6a789c228cdb69ca4afe300cecd46fb611e1fb12c384fe9597a0d1c", + "slot_b_sha3_256_digest": "29215490b1984f4cfcea7d95176b8338466b2fc60123cfc27e895209bef506bb" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:08.746433Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "a" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "96caca4fb6a789c228cdb69ca4afe300cecd46fb611e1fb12c384fe9597a0d1c", + "slot_b_sha3_256_digest": "29215490b1984f4cfcea7d95176b8338466b2fc60123cfc27e895209bef506bb" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:07.575660Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "b" + }, + "persistent_boot_preference": { + "slot": "b" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "7d081250f8eb830b7dc5067334316f497a184b01788b5614a7a7f88b93a87c20", + "slot_b_sha3_256_digest": "d416d5ba9d0d3bd495f0f5fcd2aecc28cd83b53d1a8f94a28241d85b30761451" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:06.810258Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "a" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "7d081250f8eb830b7dc5067334316f497a184b01788b5614a7a7f88b93a87c20", + "slot_b_sha3_256_digest": "4ec328195a23e86a1545d5ee576e73834aae81fb79830a54d0503475875f1760" + } + ] + ], + "cabooses_found": { + "SpSlot0": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:17:09.329382Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "sidecar-b", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:17:09.915072Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "sidecar-b", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:08.170364Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:08.751437Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:07.577731Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:06.811674Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ] + ], + "SpSlot1": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:17:09.330756Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "f04c427514943573272da3524aa4d976d069ac95", + "name": "sidecar-b", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:17:09.916864Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "a096c4c17b17772c72dbfb03e0b9c96e59d9a0fe", + "name": "sidecar-b", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:08.172087Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:08.752817Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:07.579882Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:06.813085Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "620f18f924d180d13a0577bd51d7c48c1d670549", + "name": "gimlet-c", + "version": "1.0.5" + } + } + ] + ], + "RotSlotA": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:17:09.537712Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1-dev", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:17:10.123027Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "4b2a3469ee63f14e237ed26d741c49e8cc27261f", + "name": "oxide-rot-1", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:08.423630Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:09.004153Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:07.831036Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:07.074158Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ] + ], + "RotSlotB": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:17:09.744584Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:17:10.331006Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1", + "version": "1.0.0" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:08.675629Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:09.256102Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:08.090096Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:07.327673Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "08f5a258dd48728cfaae68fcc7fbd56c7359329c", + "name": "oxide-rot-1", + "version": "1.0.4" + } + } + ] + ] + }, + "rot_pages_found": { + "Cmpa": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:08.691258Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:09.272635Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:08.107097Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:07.344150Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ] + ], + "CfpaActive": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:08.707253Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAT9AkBD3h7E4HX3ag0CWYdTvYhAwH5B7l6KL6QI/wPEQ=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:09.288646Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6n5IZG1N0E70rdhMMrS3k04zLw+wi+p0BE00p4LJpio=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:08.123462Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:07.416190Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + } + } + ] + ], + "CfpaInactive": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:08.723232Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADYAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKR50f2Jw5Dyd7j2FPBE4r77CMk4azEk2sBLHfrvOts=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:09.304669Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAABgAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAa0EBHkNRkpjbiUrOzKdmz2xKt2ubIbVZ4ALX8/sXkig=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:08.139558Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:07.487114Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + } + } + ] + ], + "CfpaScratch": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:08.739167Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAT9AkBD3h7E4HX3ag0CWYdTvYhAwH5B7l6KL6QI/wPEQ=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:09.320638Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6n5IZG1N0E70rdhMMrS3k04zLw+wi+p0BE00p4LJpio=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:08.162584Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:07.568207Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + } + } + ] + ] + }, + "sled_agents": { + "1762267a-a0cb-4c9a-88b4-8ce828cbc021": { + "time_collected": "2024-03-01T19:17:10.485528Z", + "source": "http://[fd00:1122:3344:101::1]:12345", + "sled_id": "1762267a-a0cb-4c9a-88b4-8ce828cbc021", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + "sled_agent_address": "[fd00:1122:3344:101::1]:12345", + "sled_role": "scrimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + }, + "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f": { + "time_collected": "2024-03-01T19:17:10.522140Z", + "source": "http://[fd00:1122:3344:121::1]:12345", + "sled_id": "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + "sled_agent_address": "[fd00:1122:3344:121::1]:12345", + "sled_role": "gimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + }, + "a6634f64-f6fb-426c-b00b-9b30aed9f53a": { + "time_collected": "2024-03-01T19:17:10.557759Z", + "source": "http://[fd00:1122:3344:102::1]:12345", + "sled_id": "a6634f64-f6fb-426c-b00b-9b30aed9f53a", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + "sled_agent_address": "[fd00:1122:3344:102::1]:12345", + "sled_role": "gimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + }, + "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89": { + "time_collected": "2024-03-01T19:17:10.593424Z", + "source": "http://[fd00:1122:3344:103::1]:12345", + "sled_id": "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + "sled_agent_address": "[fd00:1122:3344:103::1]:12345", + "sled_role": "scrimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + } + }, + "omicron_zones": { + "1762267a-a0cb-4c9a-88b4-8ce828cbc021": { + "time_collected": "2024-03-01T19:17:10.486656Z", + "source": "http://[fd00:1122:3344:101::1]:12345", + "sled_id": "1762267a-a0cb-4c9a-88b4-8ce828cbc021", + "zones": { + "generation": 5, + "zones": [ + { + "id": "0706860a-3801-42a7-8000-fff741f7665b", + "underlay_address": "fd00:1122:3344:101::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::c]:32345", + "dataset": { + "pool_name": "oxp_1d864940-c723-4f58-82e8-b03772b42356" + } + } + }, + { + "id": "34a279a2-a025-41ff-a392-1addac400f38", + "underlay_address": "fd00:1122:3344:101::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:101::3]:32221", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + } + } + }, + { + "id": "3d344935-6f48-405f-8794-2c6e8fa4b1c2", + "underlay_address": "fd00:1122:3344:1::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + }, + "dns_address": "[fd00:1122:3344:1::1]:53", + "gz_address": "fd00:1122:3344:1::2", + "gz_address_index": 0, + "http_address": "[fd00:1122:3344:1::1]:5353" + } + }, + { + "id": "59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "underlay_address": "fd00:1122:3344:101::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.2", + "external_tls": true, + "internal_address": "[fd00:1122:3344:101::5]:12221", + "nic": { + "id": "81a0385e-86b9-4109-b63d-61c1407c50d1", + "ip": "172.30.2.5", + "kind": { + "type": "service", + "id": "59c3ed4f-3670-44b1-9ea0-9a6162a08629" + }, + "mac": "A8:40:25:FF:E1:17", + "name": "nexus-59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "67852e14-ce7f-4e50-a61d-4059d0c057b9", + "underlay_address": "fd00:1122:3344:101::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::10]:32345", + "dataset": { + "pool_name": "oxp_eae1515c-cd4a-4d7b-9a99-dbc24cf2e337" + } + } + }, + { + "id": "6ace1a69-d55a-45f0-a127-99c90f1b070c", + "underlay_address": "fd00:1122:3344:101::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::f]:32345", + "dataset": { + "pool_name": "oxp_466351db-2ced-45fa-9431-22de1c4bd480" + } + } + }, + { + "id": "84d6b7fc-b5c2-499f-8d17-0eb1ea5affbe", + "underlay_address": "fd00:1122:3344:101::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::9]:32345", + "dataset": { + "pool_name": "oxp_e8d6b9a2-bad4-43f0-b5c0-0618555a7f8c" + } + } + }, + { + "id": "854c1544-d0ea-4d65-9cf4-776e91a4fcb5", + "underlay_address": "fd00:1122:3344:101::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::8]:32345", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + } + } + }, + { + "id": "86ee7359-aa35-41d9-8a76-54961bc516a1", + "underlay_address": "fd00:1122:3344:101::4", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:101::4]:32221", + "dataset": { + "pool_name": "oxp_e8d6b9a2-bad4-43f0-b5c0-0618555a7f8c" + } + } + }, + { + "id": "925ea436-fad4-4fff-8ae0-24edd725a2b0", + "underlay_address": "fd00:1122:3344:101::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::d]:32345", + "dataset": { + "pool_name": "oxp_88ddd98f-3181-4797-864a-7e8d5b028657" + } + } + }, + { + "id": "be272d1e-4d34-4478-94b4-a76f5fcbef36", + "underlay_address": "fd00:1122:3344:101::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::a]:32345", + "dataset": { + "pool_name": "oxp_9041d780-b5ee-4159-a1db-8c4075e2176d" + } + } + }, + { + "id": "bebd754d-d06c-45f4-99bf-e1aab6253ab8", + "underlay_address": "fd00:1122:3344:101::6", + "zone_type": { + "type": "oximeter", + "address": "[fd00:1122:3344:101::6]:12223" + } + }, + { + "id": "d630cca7-e3f7-47a8-aee8-ad0790895abc", + "underlay_address": "fd00:1122:3344:101::11", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:101::11]:123", + "dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "nic": { + "id": "9741f03f-f150-4229-9f8d-e29a69c87883", + "ip": "172.30.3.5", + "kind": { + "type": "service", + "id": "d630cca7-e3f7-47a8-aee8-ad0790895abc" + }, + "mac": "A8:40:25:FF:E7:A8", + "name": "ntp-d630cca7-e3f7-47a8-aee8-ad0790895abc", + "primary": true, + "slot": 0, + "subnet": "172.30.3.0/24", + "vni": 100 + }, + "ntp_servers": [ + "ntp.eng.oxide.computer" + ], + "snat_cfg": { + "ip": "172.20.28.5", + "first_port": 0, + "last_port": 16383 + } + } + }, + { + "id": "d8c51c79-46ff-44bd-80be-08d6283f4e21", + "underlay_address": "fd00:1122:3344:101::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::e]:32345", + "dataset": { + "pool_name": "oxp_3f95c036-d5d8-4d08-8c78-c0635ca34698" + } + } + }, + { + "id": "dbbc0422-2b23-4d9e-b1db-8003cfc29ad0", + "underlay_address": "fd00:1122:3344:101::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::b]:32345", + "dataset": { + "pool_name": "oxp_1a4447be-5c84-4dad-b22c-d35e5e1911b0" + } + } + }, + { + "id": "f33a2375-212d-42e4-b539-6079aeb4e5b7", + "underlay_address": "fd00:1122:3344:101::7", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:101::7]:17000" + } + } + ] + } + }, + "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f": { + "time_collected": "2024-03-01T19:17:10.522942Z", + "source": "http://[fd00:1122:3344:121::1]:12345", + "sled_id": "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f", + "zones": { + "generation": 3, + "zones": [ + { + "id": "0eca39c6-62e2-4a1f-8ea2-e9332a774a26", + "underlay_address": "fd00:1122:3344:121::28", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::28]:32345", + "dataset": { + "pool_name": "oxp_cc36b0cc-818a-4c7f-b50d-057ff7e7e8e3" + } + } + }, + { + "id": "12431d3e-1516-4822-98b3-3711ff912e69", + "underlay_address": "fd00:1122:3344:121::22", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::22]:32345", + "dataset": { + "pool_name": "oxp_0622b2cf-297e-4356-b94c-d74309f443ba" + } + } + }, + { + "id": "2d5c29f7-4915-4876-82f8-e388b15c5067", + "underlay_address": "fd00:1122:3344:121::21", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:121::21]:123", + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "ntp_servers": [ + "d630cca7-e3f7-47a8-aee8-ad0790895abc.host.control-plane.oxide.internal", + "c74ba7d7-3d5f-4a29-aadc-effe33a90909.host.control-plane.oxide.internal" + ] + } + }, + { + "id": "56414d76-9b8c-4e15-acf1-cd269c2d8638", + "underlay_address": "fd00:1122:3344:121::25", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::25]:32345", + "dataset": { + "pool_name": "oxp_574728ca-1559-48f3-a46c-1e608966e6a0" + } + } + }, + { + "id": "891cb9da-301d-40cc-b93d-5fcb350fa968", + "underlay_address": "fd00:1122:3344:121::29", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::29]:32345", + "dataset": { + "pool_name": "oxp_e4a4a09a-43b8-460c-bc43-1f935600b681" + } + } + }, + { + "id": "d9014907-24c0-4c33-a475-db15942f4f8f", + "underlay_address": "fd00:1122:3344:121::23", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::23]:32345", + "dataset": { + "pool_name": "oxp_276822e2-d7ce-4eae-b5a4-38d2e8586a37" + } + } + }, + { + "id": "e950de25-7306-4cf7-a6f4-6db11adaa310", + "underlay_address": "fd00:1122:3344:121::27", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::27]:32345", + "dataset": { + "pool_name": "oxp_8c7a7d84-c269-4178-9dbb-37f363e7958c" + } + } + }, + { + "id": "fe3dc092-ea23-42d7-9f1d-36eed72b539c", + "underlay_address": "fd00:1122:3344:121::26", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::26]:32345", + "dataset": { + "pool_name": "oxp_6d2b29c5-aa58-40b7-b630-88301f08e945" + } + } + }, + { + "id": "ff8cce49-a721-4ad1-b09e-2bd6e4e42d00", + "underlay_address": "fd00:1122:3344:121::24", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::24]:32345", + "dataset": { + "pool_name": "oxp_2b0da7bc-2afc-4a16-95e1-5126bcb9b6b1" + } + } + } + ] + } + }, + "a6634f64-f6fb-426c-b00b-9b30aed9f53a": { + "time_collected": "2024-03-01T19:17:10.558583Z", + "source": "http://[fd00:1122:3344:102::1]:12345", + "sled_id": "a6634f64-f6fb-426c-b00b-9b30aed9f53a", + "zones": { + "generation": 5, + "zones": [ + { + "id": "0ebd2b5b-2dec-4d17-8e23-e468c04dacb6", + "underlay_address": "fd00:1122:3344:102::11", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::11]:32345", + "dataset": { + "pool_name": "oxp_0c07bcc1-7666-4754-881d-4135992ff561" + } + } + }, + { + "id": "18b99404-d8b2-4463-bc99-638d9b678ab3", + "underlay_address": "fd00:1122:3344:102::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::e]:32345", + "dataset": { + "pool_name": "oxp_f9a037da-7d15-40d9-957d-7e42aadb333c" + } + } + }, + { + "id": "479811e7-6409-45d4-b294-5333534e47ed", + "underlay_address": "fd00:1122:3344:102::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::9]:32345", + "dataset": { + "pool_name": "oxp_1fb80902-d979-461d-9c6b-a6541a5359c9" + } + } + }, + { + "id": "54d1b693-bc4d-4c47-9009-c6f624073801", + "underlay_address": "fd00:1122:3344:102::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::a]:32345", + "dataset": { + "pool_name": "oxp_02cc2369-9924-41c7-aa74-79d6d548f2dd" + } + } + }, + { + "id": "5b743072-9369-45d7-b7c9-dbdf736301f8", + "underlay_address": "fd00:1122:3344:2::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + }, + "dns_address": "[fd00:1122:3344:2::1]:53", + "gz_address": "fd00:1122:3344:2::2", + "gz_address_index": 1, + "http_address": "[fd00:1122:3344:2::1]:5353" + } + }, + { + "id": "5c4163fa-d6a6-4a0c-9476-3dda135bc9a4", + "underlay_address": "fd00:1122:3344:102::7", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:102::7]:17000" + } + }, + { + "id": "61a39188-dd82-43f6-a87c-ebb4c2d54ac4", + "underlay_address": "fd00:1122:3344:102::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::8]:32345", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "658076ed-fb43-49e7-a813-df3d23497848", + "underlay_address": "fd00:1122:3344:102::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::10]:32345", + "dataset": { + "pool_name": "oxp_7929b5fb-4ef2-4791-92e6-1a46c7f608b9" + } + } + }, + { + "id": "6edb836e-b100-4080-bb03-738d6441d571", + "underlay_address": "fd00:1122:3344:102::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::c]:32345", + "dataset": { + "pool_name": "oxp_fe74ae4d-d7bc-4293-9ec9-69132aca4aba" + } + } + }, + { + "id": "7f83d784-9c5c-498f-b559-4176bbafbc4b", + "underlay_address": "fd00:1122:3344:102::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.3", + "external_tls": true, + "internal_address": "[fd00:1122:3344:102::5]:12221", + "nic": { + "id": "3771a19c-cc40-4fd5-b99d-ed10071018a7", + "ip": "172.30.2.6", + "kind": { + "type": "service", + "id": "7f83d784-9c5c-498f-b559-4176bbafbc4b" + }, + "mac": "A8:40:25:FF:B2:AF", + "name": "nexus-7f83d784-9c5c-498f-b559-4176bbafbc4b", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "8052938f-73bc-4c54-8ed0-c62f6d77b9fd", + "underlay_address": "fd00:1122:3344:102::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::d]:32345", + "dataset": { + "pool_name": "oxp_93e4e338-0ad1-4ae1-a120-2cfb59b4df42" + } + } + }, + { + "id": "86bc0a2a-e24a-4bc1-8222-89207a0937f9", + "underlay_address": "fd00:1122:3344:102::4", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:102::4]:32221", + "dataset": { + "pool_name": "oxp_1fb80902-d979-461d-9c6b-a6541a5359c9" + } + } + }, + { + "id": "9241b9da-8188-4aae-a9dc-62acb453618f", + "underlay_address": "fd00:1122:3344:102::6", + "zone_type": { + "type": "clickhouse", + "address": "[fd00:1122:3344:102::6]:8123", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "a781a349-62b2-41f0-9dd7-c64d290aef82", + "underlay_address": "fd00:1122:3344:102::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::f]:32345", + "dataset": { + "pool_name": "oxp_4e92cbcf-f194-4ffc-97e3-fdf3df7cc9ae" + } + } + }, + { + "id": "c67814bf-9972-4bb0-a666-24778ca70a16", + "underlay_address": "fd00:1122:3344:102::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:102::3]:32221", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "underlay_address": "fd00:1122:3344:102::12", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:102::12]:123", + "dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "nic": { + "id": "9f03c82c-3192-498d-978d-a175049ac389", + "ip": "172.30.3.6", + "kind": { + "type": "service", + "id": "c74ba7d7-3d5f-4a29-aadc-effe33a90909" + }, + "mac": "A8:40:25:FF:E4:11", + "name": "ntp-c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "primary": true, + "slot": 0, + "subnet": "172.30.3.0/24", + "vni": 100 + }, + "ntp_servers": [ + "ntp.eng.oxide.computer" + ], + "snat_cfg": { + "ip": "172.20.28.6", + "first_port": 16384, + "last_port": 32767 + } + } + }, + { + "id": "dfc27183-ed85-44c3-ac3c-4427c4cb03e3", + "underlay_address": "fd00:1122:3344:102::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::b]:32345", + "dataset": { + "pool_name": "oxp_742699b5-4ded-406d-9a08-24648d3b2efb" + } + } + } + ] + } + }, + "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89": { + "time_collected": "2024-03-01T19:17:10.594368Z", + "source": "http://[fd00:1122:3344:103::1]:12345", + "sled_id": "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89", + "zones": { + "generation": 5, + "zones": [ + { + "id": "185e6b00-7e18-456c-9dfc-edd0194ac207", + "underlay_address": "fd00:1122:3344:103::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::a]:32345", + "dataset": { + "pool_name": "oxp_6aa59516-2018-4494-af6b-46704b20f6dc" + } + } + }, + { + "id": "1fc78bc7-feca-4a10-9beb-58b53fda5210", + "underlay_address": "fd00:1122:3344:103::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::10]:32345", + "dataset": { + "pool_name": "oxp_07d66b6b-8059-4d07-a0b3-8ce41e2a8c98" + } + } + }, + { + "id": "45dccfd6-4674-43c6-a0e7-4dde5f24d90d", + "underlay_address": "fd00:1122:3344:3::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + }, + "dns_address": "[fd00:1122:3344:3::1]:53", + "gz_address": "fd00:1122:3344:3::2", + "gz_address_index": 2, + "http_address": "[fd00:1122:3344:3::1]:5353" + } + }, + { + "id": "4b74c418-0a08-4489-b652-e97267c1c220", + "underlay_address": "fd00:1122:3344:103::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::7]:32345", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + } + } + }, + { + "id": "54c947d2-6355-453c-80fc-8f49cc2129ee", + "underlay_address": "fd00:1122:3344:103::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.4", + "external_tls": true, + "internal_address": "[fd00:1122:3344:103::5]:12221", + "nic": { + "id": "1f55dc2b-18ee-485c-8831-b603a8df78e6", + "ip": "172.30.2.7", + "kind": { + "type": "service", + "id": "54c947d2-6355-453c-80fc-8f49cc2129ee" + }, + "mac": "A8:40:25:FF:96:05", + "name": "nexus-54c947d2-6355-453c-80fc-8f49cc2129ee", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "60d64b70-89f5-4774-9e41-2c79c4173911", + "underlay_address": "fd00:1122:3344:103::6", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:103::6]:17000" + } + }, + { + "id": "921958c0-7761-4294-945d-2a078327bf2c", + "underlay_address": "fd00:1122:3344:103::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::8]:32345", + "dataset": { + "pool_name": "oxp_7eabbdcc-28a3-4af9-bc4a-8d9b745d4df3" + } + } + }, + { + "id": "9df22034-59f6-431f-82e7-258fe983879b", + "underlay_address": "fd00:1122:3344:103::4", + "zone_type": { + "type": "external_dns", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + }, + "dns_address": "172.20.28.1:53", + "http_address": "[fd00:1122:3344:103::4]:5353", + "nic": { + "id": "0376a963-e3c1-44bc-9cbf-9e8cca58410b", + "ip": "172.30.1.5", + "kind": { + "type": "service", + "id": "9df22034-59f6-431f-82e7-258fe983879b" + }, + "mac": "A8:40:25:FF:90:12", + "name": "external-dns-9df22034-59f6-431f-82e7-258fe983879b", + "primary": true, + "slot": 0, + "subnet": "172.30.1.0/24", + "vni": 100 + } + } + }, + { + "id": "bb21c0c4-c43f-4913-9bf9-707bd7f1769a", + "underlay_address": "fd00:1122:3344:103::11", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:103::11]:123", + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "ntp_servers": [ + "d630cca7-e3f7-47a8-aee8-ad0790895abc.host.control-plane.oxide.internal", + "c74ba7d7-3d5f-4a29-aadc-effe33a90909.host.control-plane.oxide.internal" + ] + } + }, + { + "id": "ccb0d612-9d7c-41ba-bd51-661c3c75ec91", + "underlay_address": "fd00:1122:3344:103::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::9]:32345", + "dataset": { + "pool_name": "oxp_c8feed09-6556-4409-af8e-563c97d556eb" + } + } + }, + { + "id": "ccef3e95-cae5-4072-9b7d-49e56fa9dff4", + "underlay_address": "fd00:1122:3344:103::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::d]:32345", + "dataset": { + "pool_name": "oxp_5b8b10a4-d527-4176-a049-1e9c2a10edea" + } + } + }, + { + "id": "d42fc874-8416-4212-b5a0-febba4c6b0ac", + "underlay_address": "fd00:1122:3344:103::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::b]:32345", + "dataset": { + "pool_name": "oxp_83a60822-1299-471c-bc79-3be0f87cf7c3" + } + } + }, + { + "id": "d5ba7d45-26d7-45ca-8c0a-a560f243b5fa", + "underlay_address": "fd00:1122:3344:103::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::e]:32345", + "dataset": { + "pool_name": "oxp_4ffc6336-04d1-4f21-9c27-a95aa640f960" + } + } + }, + { + "id": "da27ee6a-9841-46aa-acda-7cce3e04cb60", + "underlay_address": "fd00:1122:3344:103::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:103::3]:32221", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + } + } + }, + { + "id": "e09525cf-9c87-4d05-8860-1356dd0dee8a", + "underlay_address": "fd00:1122:3344:103::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::f]:32345", + "dataset": { + "pool_name": "oxp_e2244950-dda0-4dfa-8b63-5ffac16c5a62" + } + } + }, + { + "id": "f7264a3e-e8e7-4476-8df3-76c9d00a383b", + "underlay_address": "fd00:1122:3344:103::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::c]:32345", + "dataset": { + "pool_name": "oxp_f6813b0c-ab5a-4fd0-8e24-95de9b65280d" + } + } + } + ] + } + } + } + }, + { + "id": "0a8dc8ff-9013-43aa-9f06-9540b24ea902", + "errors": [ + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page Cmpa: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"bb340ba7-cf9e-48c9-9e87-4b26fcb9b2aa\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:17:09 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"bb340ba7-cf9e-48c9-9e87-4b26fcb9b2aa\" }", + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page CfpaActive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"86914fc3-f142-4dbf-8ac5-a94c03c38a93\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:17:09 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"86914fc3-f142-4dbf-8ac5-a94c03c38a93\" }", + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page CfpaInactive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"e29b10e1-aea9-48a9-8054-951715c06bdb\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:17:09 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"e29b10e1-aea9-48a9-8054-951715c06bdb\" }", + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page CfpaScratch: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"719a3865-65bc-4228-810f-20b93a5199d0\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:17:10 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"719a3865-65bc-4228-810f-20b93a5199d0\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page Cmpa: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"70ee4f97-6ffe-46fe-810d-6d0f602e8749\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:17:10 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"70ee4f97-6ffe-46fe-810d-6d0f602e8749\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page CfpaActive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"6d81dff5-4390-43dc-a976-489980a574f3\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:17:10 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"6d81dff5-4390-43dc-a976-489980a574f3\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page CfpaInactive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"f83589b8-03d7-47f6-8e49-4090810c6485\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:17:10 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"f83589b8-03d7-47f6-8e49-4090810c6485\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page CfpaScratch: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"73e37767-229b-48ca-9fcd-d98988c1a131\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:17:10 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"73e37767-229b-48ca-9fcd-d98988c1a131\" }" + ], + "time_started": "2024-03-01T19:17:06.813002Z", + "time_done": "2024-03-01T19:17:11.153859Z", + "collector": "54c947d2-6355-453c-80fc-8f49cc2129ee", + "baseboards": [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + } + ], + "cabooses": [ + { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + }, + { + "board": "gimlet-c", + "git_commit": "620f18f924d180d13a0577bd51d7c48c1d670549", + "name": "gimlet-c", + "version": "1.0.5" + }, + { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + }, + { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + }, + { + "board": "oxide-rot-1", + "git_commit": "08f5a258dd48728cfaae68fcc7fbd56c7359329c", + "name": "oxide-rot-1", + "version": "1.0.4" + }, + { + "board": "oxide-rot-1", + "git_commit": "4b2a3469ee63f14e237ed26d741c49e8cc27261f", + "name": "oxide-rot-1", + "version": "0.0.0-git" + }, + { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + }, + { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1", + "version": "1.0.0" + }, + { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1-dev", + "version": "0.0.0-git" + }, + { + "board": "sidecar-b", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "sidecar-b", + "version": "1.0.6" + }, + { + "board": "sidecar-b", + "git_commit": "a096c4c17b17772c72dbfb03e0b9c96e59d9a0fe", + "name": "sidecar-b", + "version": "0.0.0-git" + }, + { + "board": "sidecar-b", + "git_commit": "f04c427514943573272da3524aa4d976d069ac95", + "name": "sidecar-b", + "version": "0.0.0-git" + } + ], + "rot_pages": [ + { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + }, + { + "data_base64": "AAAAABgAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAa0EBHkNRkpjbiUrOzKdmz2xKt2ubIbVZ4ALX8/sXkig=" + }, + { + "data_base64": "AAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6n5IZG1N0E70rdhMMrS3k04zLw+wi+p0BE00p4LJpio=" + }, + { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + }, + { + "data_base64": "AAAAADYAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKR50f2Jw5Dyd7j2FPBE4r77CMk4azEk2sBLHfrvOts=" + }, + { + "data_base64": "AAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAT9AkBD3h7E4HX3ag0CWYdTvYhAwH5B7l6KL6QI/wPEQ=" + }, + { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + ], + "sps": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:17:09.906135Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "switch", + "sp_slot": 0, + "baseboard_revision": 4, + "hubris_archive": "9184be93590e655e", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:17:10.475436Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "sp_type": "switch", + "sp_slot": 1, + "baseboard_revision": 4, + "hubris_archive": "9184be93590e655e", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:08.746072Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 17, + "baseboard_revision": 6, + "hubris_archive": "5c745f4b4fa66f47", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:09.327592Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 15, + "baseboard_revision": 6, + "hubris_archive": "5c745f4b4fa66f47", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:08.014626Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 14, + "baseboard_revision": 6, + "hubris_archive": "5c745f4b4fa66f47", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:06.876242Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 16, + "baseboard_revision": 6, + "hubris_archive": "3c670b44fdeef4af", + "power_state": "A0" + } + ] + ], + "rots": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:17:09.906135Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "b" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "4599e4d7bc9b2164e8c0f0f7ce4bcae2bd35d635c8450854ea9a94ce49a2ca0f", + "slot_b_sha3_256_digest": "1e42a0761e5bd1d9f9a5e2c44acc34e7b7e007792977cdb0523d271db2e80c8c" + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:17:10.475436Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "active_slot": { + "slot": "b" + }, + "persistent_boot_preference": { + "slot": "b" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "92e07077972c83af6872ec817fb88d74a36f6956276505e9771302c5bb7ec2a8", + "slot_b_sha3_256_digest": "fb25825eaf97f8f2307afe3caebf0b70865499f6d116db3aff382c505b4d8dd7" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:08.746072Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "a" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "96caca4fb6a789c228cdb69ca4afe300cecd46fb611e1fb12c384fe9597a0d1c", + "slot_b_sha3_256_digest": "29215490b1984f4cfcea7d95176b8338466b2fc60123cfc27e895209bef506bb" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:09.327592Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "a" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "96caca4fb6a789c228cdb69ca4afe300cecd46fb611e1fb12c384fe9597a0d1c", + "slot_b_sha3_256_digest": "29215490b1984f4cfcea7d95176b8338466b2fc60123cfc27e895209bef506bb" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:08.014626Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "b" + }, + "persistent_boot_preference": { + "slot": "b" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "7d081250f8eb830b7dc5067334316f497a184b01788b5614a7a7f88b93a87c20", + "slot_b_sha3_256_digest": "d416d5ba9d0d3bd495f0f5fcd2aecc28cd83b53d1a8f94a28241d85b30761451" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:06.876242Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "a" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "7d081250f8eb830b7dc5067334316f497a184b01788b5614a7a7f88b93a87c20", + "slot_b_sha3_256_digest": "4ec328195a23e86a1545d5ee576e73834aae81fb79830a54d0503475875f1760" + } + ] + ], + "cabooses_found": { + "SpSlot0": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:17:09.907517Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "sidecar-b", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:17:10.476830Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "sidecar-b", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:08.751041Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:09.329018Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:08.123957Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:07.130790Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ] + ], + "SpSlot1": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:17:09.909025Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "f04c427514943573272da3524aa4d976d069ac95", + "name": "sidecar-b", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:17:10.478263Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "a096c4c17b17772c72dbfb03e0b9c96e59d9a0fe", + "name": "sidecar-b", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:08.752437Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:09.330438Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:08.163634Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:07.328321Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "620f18f924d180d13a0577bd51d7c48c1d670549", + "name": "gimlet-c", + "version": "1.0.5" + } + } + ] + ], + "RotSlotA": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:17:10.115796Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1-dev", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:17:10.685118Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "4b2a3469ee63f14e237ed26d741c49e8cc27261f", + "name": "oxide-rot-1", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:09.003752Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:09.582176Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:08.415126Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:07.643681Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ] + ], + "RotSlotB": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:17:10.322717Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:17:10.892030Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1", + "version": "1.0.0" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:09.255745Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:09.834209Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:08.667106Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:07.894743Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "08f5a258dd48728cfaae68fcc7fbd56c7359329c", + "name": "oxide-rot-1", + "version": "1.0.4" + } + } + ] + ] + }, + "rot_pages_found": { + "Cmpa": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:09.272691Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:09.850755Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:08.682710Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:07.911203Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ] + ], + "CfpaActive": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:09.288403Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAT9AkBD3h7E4HX3ag0CWYdTvYhAwH5B7l6KL6QI/wPEQ=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:09.866661Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6n5IZG1N0E70rdhMMrS3k04zLw+wi+p0BE00p4LJpio=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:08.699070Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:07.927224Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + } + } + ] + ], + "CfpaInactive": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:09.304293Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADYAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKR50f2Jw5Dyd7j2FPBE4r77CMk4azEk2sBLHfrvOts=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:09.882683Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAABgAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAa0EBHkNRkpjbiUrOzKdmz2xKt2ubIbVZ4ALX8/sXkig=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:08.714707Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:07.943244Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + } + } + ] + ], + "CfpaScratch": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:17:09.320329Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAT9AkBD3h7E4HX3ag0CWYdTvYhAwH5B7l6KL6QI/wPEQ=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:17:09.898662Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6n5IZG1N0E70rdhMMrS3k04zLw+wi+p0BE00p4LJpio=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:17:08.730680Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:17:07.959254Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + } + } + ] + ] + }, + "sled_agents": { + "1762267a-a0cb-4c9a-88b4-8ce828cbc021": { + "time_collected": "2024-03-01T19:17:11.045570Z", + "source": "http://[fd00:1122:3344:101::1]:12345", + "sled_id": "1762267a-a0cb-4c9a-88b4-8ce828cbc021", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + "sled_agent_address": "[fd00:1122:3344:101::1]:12345", + "sled_role": "scrimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + }, + "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f": { + "time_collected": "2024-03-01T19:17:11.081825Z", + "source": "http://[fd00:1122:3344:121::1]:12345", + "sled_id": "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + "sled_agent_address": "[fd00:1122:3344:121::1]:12345", + "sled_role": "gimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + }, + "a6634f64-f6fb-426c-b00b-9b30aed9f53a": { + "time_collected": "2024-03-01T19:17:11.117162Z", + "source": "http://[fd00:1122:3344:102::1]:12345", + "sled_id": "a6634f64-f6fb-426c-b00b-9b30aed9f53a", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + "sled_agent_address": "[fd00:1122:3344:102::1]:12345", + "sled_role": "gimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + }, + "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89": { + "time_collected": "2024-03-01T19:17:11.152394Z", + "source": "http://[fd00:1122:3344:103::1]:12345", + "sled_id": "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + "sled_agent_address": "[fd00:1122:3344:103::1]:12345", + "sled_role": "scrimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + } + }, + "omicron_zones": { + "1762267a-a0cb-4c9a-88b4-8ce828cbc021": { + "time_collected": "2024-03-01T19:17:11.046682Z", + "source": "http://[fd00:1122:3344:101::1]:12345", + "sled_id": "1762267a-a0cb-4c9a-88b4-8ce828cbc021", + "zones": { + "generation": 5, + "zones": [ + { + "id": "0706860a-3801-42a7-8000-fff741f7665b", + "underlay_address": "fd00:1122:3344:101::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::c]:32345", + "dataset": { + "pool_name": "oxp_1d864940-c723-4f58-82e8-b03772b42356" + } + } + }, + { + "id": "34a279a2-a025-41ff-a392-1addac400f38", + "underlay_address": "fd00:1122:3344:101::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:101::3]:32221", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + } + } + }, + { + "id": "3d344935-6f48-405f-8794-2c6e8fa4b1c2", + "underlay_address": "fd00:1122:3344:1::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + }, + "dns_address": "[fd00:1122:3344:1::1]:53", + "gz_address": "fd00:1122:3344:1::2", + "gz_address_index": 0, + "http_address": "[fd00:1122:3344:1::1]:5353" + } + }, + { + "id": "59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "underlay_address": "fd00:1122:3344:101::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.2", + "external_tls": true, + "internal_address": "[fd00:1122:3344:101::5]:12221", + "nic": { + "id": "81a0385e-86b9-4109-b63d-61c1407c50d1", + "ip": "172.30.2.5", + "kind": { + "type": "service", + "id": "59c3ed4f-3670-44b1-9ea0-9a6162a08629" + }, + "mac": "A8:40:25:FF:E1:17", + "name": "nexus-59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "67852e14-ce7f-4e50-a61d-4059d0c057b9", + "underlay_address": "fd00:1122:3344:101::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::10]:32345", + "dataset": { + "pool_name": "oxp_eae1515c-cd4a-4d7b-9a99-dbc24cf2e337" + } + } + }, + { + "id": "6ace1a69-d55a-45f0-a127-99c90f1b070c", + "underlay_address": "fd00:1122:3344:101::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::f]:32345", + "dataset": { + "pool_name": "oxp_466351db-2ced-45fa-9431-22de1c4bd480" + } + } + }, + { + "id": "84d6b7fc-b5c2-499f-8d17-0eb1ea5affbe", + "underlay_address": "fd00:1122:3344:101::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::9]:32345", + "dataset": { + "pool_name": "oxp_e8d6b9a2-bad4-43f0-b5c0-0618555a7f8c" + } + } + }, + { + "id": "854c1544-d0ea-4d65-9cf4-776e91a4fcb5", + "underlay_address": "fd00:1122:3344:101::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::8]:32345", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + } + } + }, + { + "id": "86ee7359-aa35-41d9-8a76-54961bc516a1", + "underlay_address": "fd00:1122:3344:101::4", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:101::4]:32221", + "dataset": { + "pool_name": "oxp_e8d6b9a2-bad4-43f0-b5c0-0618555a7f8c" + } + } + }, + { + "id": "925ea436-fad4-4fff-8ae0-24edd725a2b0", + "underlay_address": "fd00:1122:3344:101::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::d]:32345", + "dataset": { + "pool_name": "oxp_88ddd98f-3181-4797-864a-7e8d5b028657" + } + } + }, + { + "id": "be272d1e-4d34-4478-94b4-a76f5fcbef36", + "underlay_address": "fd00:1122:3344:101::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::a]:32345", + "dataset": { + "pool_name": "oxp_9041d780-b5ee-4159-a1db-8c4075e2176d" + } + } + }, + { + "id": "bebd754d-d06c-45f4-99bf-e1aab6253ab8", + "underlay_address": "fd00:1122:3344:101::6", + "zone_type": { + "type": "oximeter", + "address": "[fd00:1122:3344:101::6]:12223" + } + }, + { + "id": "d630cca7-e3f7-47a8-aee8-ad0790895abc", + "underlay_address": "fd00:1122:3344:101::11", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:101::11]:123", + "dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "nic": { + "id": "9741f03f-f150-4229-9f8d-e29a69c87883", + "ip": "172.30.3.5", + "kind": { + "type": "service", + "id": "d630cca7-e3f7-47a8-aee8-ad0790895abc" + }, + "mac": "A8:40:25:FF:E7:A8", + "name": "ntp-d630cca7-e3f7-47a8-aee8-ad0790895abc", + "primary": true, + "slot": 0, + "subnet": "172.30.3.0/24", + "vni": 100 + }, + "ntp_servers": [ + "ntp.eng.oxide.computer" + ], + "snat_cfg": { + "ip": "172.20.28.5", + "first_port": 0, + "last_port": 16383 + } + } + }, + { + "id": "d8c51c79-46ff-44bd-80be-08d6283f4e21", + "underlay_address": "fd00:1122:3344:101::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::e]:32345", + "dataset": { + "pool_name": "oxp_3f95c036-d5d8-4d08-8c78-c0635ca34698" + } + } + }, + { + "id": "dbbc0422-2b23-4d9e-b1db-8003cfc29ad0", + "underlay_address": "fd00:1122:3344:101::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::b]:32345", + "dataset": { + "pool_name": "oxp_1a4447be-5c84-4dad-b22c-d35e5e1911b0" + } + } + }, + { + "id": "f33a2375-212d-42e4-b539-6079aeb4e5b7", + "underlay_address": "fd00:1122:3344:101::7", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:101::7]:17000" + } + } + ] + } + }, + "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f": { + "time_collected": "2024-03-01T19:17:11.082540Z", + "source": "http://[fd00:1122:3344:121::1]:12345", + "sled_id": "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f", + "zones": { + "generation": 3, + "zones": [ + { + "id": "0eca39c6-62e2-4a1f-8ea2-e9332a774a26", + "underlay_address": "fd00:1122:3344:121::28", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::28]:32345", + "dataset": { + "pool_name": "oxp_cc36b0cc-818a-4c7f-b50d-057ff7e7e8e3" + } + } + }, + { + "id": "12431d3e-1516-4822-98b3-3711ff912e69", + "underlay_address": "fd00:1122:3344:121::22", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::22]:32345", + "dataset": { + "pool_name": "oxp_0622b2cf-297e-4356-b94c-d74309f443ba" + } + } + }, + { + "id": "2d5c29f7-4915-4876-82f8-e388b15c5067", + "underlay_address": "fd00:1122:3344:121::21", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:121::21]:123", + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "ntp_servers": [ + "d630cca7-e3f7-47a8-aee8-ad0790895abc.host.control-plane.oxide.internal", + "c74ba7d7-3d5f-4a29-aadc-effe33a90909.host.control-plane.oxide.internal" + ] + } + }, + { + "id": "56414d76-9b8c-4e15-acf1-cd269c2d8638", + "underlay_address": "fd00:1122:3344:121::25", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::25]:32345", + "dataset": { + "pool_name": "oxp_574728ca-1559-48f3-a46c-1e608966e6a0" + } + } + }, + { + "id": "891cb9da-301d-40cc-b93d-5fcb350fa968", + "underlay_address": "fd00:1122:3344:121::29", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::29]:32345", + "dataset": { + "pool_name": "oxp_e4a4a09a-43b8-460c-bc43-1f935600b681" + } + } + }, + { + "id": "d9014907-24c0-4c33-a475-db15942f4f8f", + "underlay_address": "fd00:1122:3344:121::23", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::23]:32345", + "dataset": { + "pool_name": "oxp_276822e2-d7ce-4eae-b5a4-38d2e8586a37" + } + } + }, + { + "id": "e950de25-7306-4cf7-a6f4-6db11adaa310", + "underlay_address": "fd00:1122:3344:121::27", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::27]:32345", + "dataset": { + "pool_name": "oxp_8c7a7d84-c269-4178-9dbb-37f363e7958c" + } + } + }, + { + "id": "fe3dc092-ea23-42d7-9f1d-36eed72b539c", + "underlay_address": "fd00:1122:3344:121::26", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::26]:32345", + "dataset": { + "pool_name": "oxp_6d2b29c5-aa58-40b7-b630-88301f08e945" + } + } + }, + { + "id": "ff8cce49-a721-4ad1-b09e-2bd6e4e42d00", + "underlay_address": "fd00:1122:3344:121::24", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::24]:32345", + "dataset": { + "pool_name": "oxp_2b0da7bc-2afc-4a16-95e1-5126bcb9b6b1" + } + } + } + ] + } + }, + "a6634f64-f6fb-426c-b00b-9b30aed9f53a": { + "time_collected": "2024-03-01T19:17:11.118025Z", + "source": "http://[fd00:1122:3344:102::1]:12345", + "sled_id": "a6634f64-f6fb-426c-b00b-9b30aed9f53a", + "zones": { + "generation": 5, + "zones": [ + { + "id": "0ebd2b5b-2dec-4d17-8e23-e468c04dacb6", + "underlay_address": "fd00:1122:3344:102::11", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::11]:32345", + "dataset": { + "pool_name": "oxp_0c07bcc1-7666-4754-881d-4135992ff561" + } + } + }, + { + "id": "18b99404-d8b2-4463-bc99-638d9b678ab3", + "underlay_address": "fd00:1122:3344:102::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::e]:32345", + "dataset": { + "pool_name": "oxp_f9a037da-7d15-40d9-957d-7e42aadb333c" + } + } + }, + { + "id": "479811e7-6409-45d4-b294-5333534e47ed", + "underlay_address": "fd00:1122:3344:102::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::9]:32345", + "dataset": { + "pool_name": "oxp_1fb80902-d979-461d-9c6b-a6541a5359c9" + } + } + }, + { + "id": "54d1b693-bc4d-4c47-9009-c6f624073801", + "underlay_address": "fd00:1122:3344:102::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::a]:32345", + "dataset": { + "pool_name": "oxp_02cc2369-9924-41c7-aa74-79d6d548f2dd" + } + } + }, + { + "id": "5b743072-9369-45d7-b7c9-dbdf736301f8", + "underlay_address": "fd00:1122:3344:2::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + }, + "dns_address": "[fd00:1122:3344:2::1]:53", + "gz_address": "fd00:1122:3344:2::2", + "gz_address_index": 1, + "http_address": "[fd00:1122:3344:2::1]:5353" + } + }, + { + "id": "5c4163fa-d6a6-4a0c-9476-3dda135bc9a4", + "underlay_address": "fd00:1122:3344:102::7", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:102::7]:17000" + } + }, + { + "id": "61a39188-dd82-43f6-a87c-ebb4c2d54ac4", + "underlay_address": "fd00:1122:3344:102::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::8]:32345", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "658076ed-fb43-49e7-a813-df3d23497848", + "underlay_address": "fd00:1122:3344:102::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::10]:32345", + "dataset": { + "pool_name": "oxp_7929b5fb-4ef2-4791-92e6-1a46c7f608b9" + } + } + }, + { + "id": "6edb836e-b100-4080-bb03-738d6441d571", + "underlay_address": "fd00:1122:3344:102::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::c]:32345", + "dataset": { + "pool_name": "oxp_fe74ae4d-d7bc-4293-9ec9-69132aca4aba" + } + } + }, + { + "id": "7f83d784-9c5c-498f-b559-4176bbafbc4b", + "underlay_address": "fd00:1122:3344:102::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.3", + "external_tls": true, + "internal_address": "[fd00:1122:3344:102::5]:12221", + "nic": { + "id": "3771a19c-cc40-4fd5-b99d-ed10071018a7", + "ip": "172.30.2.6", + "kind": { + "type": "service", + "id": "7f83d784-9c5c-498f-b559-4176bbafbc4b" + }, + "mac": "A8:40:25:FF:B2:AF", + "name": "nexus-7f83d784-9c5c-498f-b559-4176bbafbc4b", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "8052938f-73bc-4c54-8ed0-c62f6d77b9fd", + "underlay_address": "fd00:1122:3344:102::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::d]:32345", + "dataset": { + "pool_name": "oxp_93e4e338-0ad1-4ae1-a120-2cfb59b4df42" + } + } + }, + { + "id": "86bc0a2a-e24a-4bc1-8222-89207a0937f9", + "underlay_address": "fd00:1122:3344:102::4", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:102::4]:32221", + "dataset": { + "pool_name": "oxp_1fb80902-d979-461d-9c6b-a6541a5359c9" + } + } + }, + { + "id": "9241b9da-8188-4aae-a9dc-62acb453618f", + "underlay_address": "fd00:1122:3344:102::6", + "zone_type": { + "type": "clickhouse", + "address": "[fd00:1122:3344:102::6]:8123", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "a781a349-62b2-41f0-9dd7-c64d290aef82", + "underlay_address": "fd00:1122:3344:102::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::f]:32345", + "dataset": { + "pool_name": "oxp_4e92cbcf-f194-4ffc-97e3-fdf3df7cc9ae" + } + } + }, + { + "id": "c67814bf-9972-4bb0-a666-24778ca70a16", + "underlay_address": "fd00:1122:3344:102::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:102::3]:32221", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "underlay_address": "fd00:1122:3344:102::12", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:102::12]:123", + "dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "nic": { + "id": "9f03c82c-3192-498d-978d-a175049ac389", + "ip": "172.30.3.6", + "kind": { + "type": "service", + "id": "c74ba7d7-3d5f-4a29-aadc-effe33a90909" + }, + "mac": "A8:40:25:FF:E4:11", + "name": "ntp-c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "primary": true, + "slot": 0, + "subnet": "172.30.3.0/24", + "vni": 100 + }, + "ntp_servers": [ + "ntp.eng.oxide.computer" + ], + "snat_cfg": { + "ip": "172.20.28.6", + "first_port": 16384, + "last_port": 32767 + } + } + }, + { + "id": "dfc27183-ed85-44c3-ac3c-4427c4cb03e3", + "underlay_address": "fd00:1122:3344:102::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::b]:32345", + "dataset": { + "pool_name": "oxp_742699b5-4ded-406d-9a08-24648d3b2efb" + } + } + } + ] + } + }, + "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89": { + "time_collected": "2024-03-01T19:17:11.153275Z", + "source": "http://[fd00:1122:3344:103::1]:12345", + "sled_id": "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89", + "zones": { + "generation": 5, + "zones": [ + { + "id": "185e6b00-7e18-456c-9dfc-edd0194ac207", + "underlay_address": "fd00:1122:3344:103::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::a]:32345", + "dataset": { + "pool_name": "oxp_6aa59516-2018-4494-af6b-46704b20f6dc" + } + } + }, + { + "id": "1fc78bc7-feca-4a10-9beb-58b53fda5210", + "underlay_address": "fd00:1122:3344:103::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::10]:32345", + "dataset": { + "pool_name": "oxp_07d66b6b-8059-4d07-a0b3-8ce41e2a8c98" + } + } + }, + { + "id": "45dccfd6-4674-43c6-a0e7-4dde5f24d90d", + "underlay_address": "fd00:1122:3344:3::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + }, + "dns_address": "[fd00:1122:3344:3::1]:53", + "gz_address": "fd00:1122:3344:3::2", + "gz_address_index": 2, + "http_address": "[fd00:1122:3344:3::1]:5353" + } + }, + { + "id": "4b74c418-0a08-4489-b652-e97267c1c220", + "underlay_address": "fd00:1122:3344:103::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::7]:32345", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + } + } + }, + { + "id": "54c947d2-6355-453c-80fc-8f49cc2129ee", + "underlay_address": "fd00:1122:3344:103::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.4", + "external_tls": true, + "internal_address": "[fd00:1122:3344:103::5]:12221", + "nic": { + "id": "1f55dc2b-18ee-485c-8831-b603a8df78e6", + "ip": "172.30.2.7", + "kind": { + "type": "service", + "id": "54c947d2-6355-453c-80fc-8f49cc2129ee" + }, + "mac": "A8:40:25:FF:96:05", + "name": "nexus-54c947d2-6355-453c-80fc-8f49cc2129ee", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "60d64b70-89f5-4774-9e41-2c79c4173911", + "underlay_address": "fd00:1122:3344:103::6", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:103::6]:17000" + } + }, + { + "id": "921958c0-7761-4294-945d-2a078327bf2c", + "underlay_address": "fd00:1122:3344:103::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::8]:32345", + "dataset": { + "pool_name": "oxp_7eabbdcc-28a3-4af9-bc4a-8d9b745d4df3" + } + } + }, + { + "id": "9df22034-59f6-431f-82e7-258fe983879b", + "underlay_address": "fd00:1122:3344:103::4", + "zone_type": { + "type": "external_dns", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + }, + "dns_address": "172.20.28.1:53", + "http_address": "[fd00:1122:3344:103::4]:5353", + "nic": { + "id": "0376a963-e3c1-44bc-9cbf-9e8cca58410b", + "ip": "172.30.1.5", + "kind": { + "type": "service", + "id": "9df22034-59f6-431f-82e7-258fe983879b" + }, + "mac": "A8:40:25:FF:90:12", + "name": "external-dns-9df22034-59f6-431f-82e7-258fe983879b", + "primary": true, + "slot": 0, + "subnet": "172.30.1.0/24", + "vni": 100 + } + } + }, + { + "id": "bb21c0c4-c43f-4913-9bf9-707bd7f1769a", + "underlay_address": "fd00:1122:3344:103::11", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:103::11]:123", + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "ntp_servers": [ + "d630cca7-e3f7-47a8-aee8-ad0790895abc.host.control-plane.oxide.internal", + "c74ba7d7-3d5f-4a29-aadc-effe33a90909.host.control-plane.oxide.internal" + ] + } + }, + { + "id": "ccb0d612-9d7c-41ba-bd51-661c3c75ec91", + "underlay_address": "fd00:1122:3344:103::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::9]:32345", + "dataset": { + "pool_name": "oxp_c8feed09-6556-4409-af8e-563c97d556eb" + } + } + }, + { + "id": "ccef3e95-cae5-4072-9b7d-49e56fa9dff4", + "underlay_address": "fd00:1122:3344:103::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::d]:32345", + "dataset": { + "pool_name": "oxp_5b8b10a4-d527-4176-a049-1e9c2a10edea" + } + } + }, + { + "id": "d42fc874-8416-4212-b5a0-febba4c6b0ac", + "underlay_address": "fd00:1122:3344:103::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::b]:32345", + "dataset": { + "pool_name": "oxp_83a60822-1299-471c-bc79-3be0f87cf7c3" + } + } + }, + { + "id": "d5ba7d45-26d7-45ca-8c0a-a560f243b5fa", + "underlay_address": "fd00:1122:3344:103::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::e]:32345", + "dataset": { + "pool_name": "oxp_4ffc6336-04d1-4f21-9c27-a95aa640f960" + } + } + }, + { + "id": "da27ee6a-9841-46aa-acda-7cce3e04cb60", + "underlay_address": "fd00:1122:3344:103::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:103::3]:32221", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + } + } + }, + { + "id": "e09525cf-9c87-4d05-8860-1356dd0dee8a", + "underlay_address": "fd00:1122:3344:103::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::f]:32345", + "dataset": { + "pool_name": "oxp_e2244950-dda0-4dfa-8b63-5ffac16c5a62" + } + } + }, + { + "id": "f7264a3e-e8e7-4476-8df3-76c9d00a383b", + "underlay_address": "fd00:1122:3344:103::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::c]:32345", + "dataset": { + "pool_name": "oxp_f6813b0c-ab5a-4fd0-8e24-95de9b65280d" + } + } + } + ] + } + } + } + }, + { + "id": "cd3547d7-f172-47d2-9bbf-9730e88b0559", + "errors": [ + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page Cmpa: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"a86ce874-ddce-4497-9e34-c7053d224035\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:06 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"a86ce874-ddce-4497-9e34-c7053d224035\" }", + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page CfpaActive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"bf3dbdb6-cac9-43a1-a55e-973d2397ffe6\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:06 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"bf3dbdb6-cac9-43a1-a55e-973d2397ffe6\" }", + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page CfpaInactive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"fabd84d3-7c0c-4e60-99b4-f97b9288839c\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:06 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"fabd84d3-7c0c-4e60-99b4-f97b9288839c\" }", + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page CfpaScratch: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"c0160967-c6b3-49b7-b801-0fcd6d4f5e31\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:06 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"c0160967-c6b3-49b7-b801-0fcd6d4f5e31\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page Cmpa: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"247b36be-d31b-460a-8722-58a21008eba3\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:06 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"247b36be-d31b-460a-8722-58a21008eba3\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page CfpaActive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"81998378-2641-4eab-8bcb-f335995e0af8\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:06 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"81998378-2641-4eab-8bcb-f335995e0af8\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page CfpaInactive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"8fcd5a26-f45a-47fc-9219-e57ac207fd2e\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:06 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"8fcd5a26-f45a-47fc-9219-e57ac207fd2e\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page CfpaScratch: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"20e66cd5-274f-487d-8186-c5e4cd25ae74\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:06 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"20e66cd5-274f-487d-8186-c5e4cd25ae74\" }" + ], + "time_started": "2024-03-01T19:18:03.580607Z", + "time_done": "2024-03-01T19:18:07.486108Z", + "collector": "59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "baseboards": [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + } + ], + "cabooses": [ + { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + }, + { + "board": "gimlet-c", + "git_commit": "620f18f924d180d13a0577bd51d7c48c1d670549", + "name": "gimlet-c", + "version": "1.0.5" + }, + { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + }, + { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + }, + { + "board": "oxide-rot-1", + "git_commit": "08f5a258dd48728cfaae68fcc7fbd56c7359329c", + "name": "oxide-rot-1", + "version": "1.0.4" + }, + { + "board": "oxide-rot-1", + "git_commit": "4b2a3469ee63f14e237ed26d741c49e8cc27261f", + "name": "oxide-rot-1", + "version": "0.0.0-git" + }, + { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + }, + { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1", + "version": "1.0.0" + }, + { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1-dev", + "version": "0.0.0-git" + }, + { + "board": "sidecar-b", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "sidecar-b", + "version": "1.0.6" + }, + { + "board": "sidecar-b", + "git_commit": "a096c4c17b17772c72dbfb03e0b9c96e59d9a0fe", + "name": "sidecar-b", + "version": "0.0.0-git" + }, + { + "board": "sidecar-b", + "git_commit": "f04c427514943573272da3524aa4d976d069ac95", + "name": "sidecar-b", + "version": "0.0.0-git" + } + ], + "rot_pages": [ + { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + }, + { + "data_base64": "AAAAABgAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAa0EBHkNRkpjbiUrOzKdmz2xKt2ubIbVZ4ALX8/sXkig=" + }, + { + "data_base64": "AAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6n5IZG1N0E70rdhMMrS3k04zLw+wi+p0BE00p4LJpio=" + }, + { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + }, + { + "data_base64": "AAAAADYAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKR50f2Jw5Dyd7j2FPBE4r77CMk4azEk2sBLHfrvOts=" + }, + { + "data_base64": "AAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAT9AkBD3h7E4HX3ag0CWYdTvYhAwH5B7l6KL6QI/wPEQ=" + }, + { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + ], + "sps": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:18:05.910026Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "switch", + "sp_slot": 0, + "baseboard_revision": 4, + "hubris_archive": "9184be93590e655e", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:18:06.485436Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "sp_type": "switch", + "sp_slot": 1, + "baseboard_revision": 4, + "hubris_archive": "9184be93590e655e", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:04.749894Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 17, + "baseboard_revision": 6, + "hubris_archive": "5c745f4b4fa66f47", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:05.327348Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 15, + "baseboard_revision": 6, + "hubris_archive": "5c745f4b4fa66f47", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:04.172071Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 14, + "baseboard_revision": 6, + "hubris_archive": "5c745f4b4fa66f47", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:03.592833Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 16, + "baseboard_revision": 6, + "hubris_archive": "3c670b44fdeef4af", + "power_state": "A0" + } + ] + ], + "rots": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:18:05.910026Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "b" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "4599e4d7bc9b2164e8c0f0f7ce4bcae2bd35d635c8450854ea9a94ce49a2ca0f", + "slot_b_sha3_256_digest": "1e42a0761e5bd1d9f9a5e2c44acc34e7b7e007792977cdb0523d271db2e80c8c" + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:18:06.485436Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "active_slot": { + "slot": "b" + }, + "persistent_boot_preference": { + "slot": "b" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "92e07077972c83af6872ec817fb88d74a36f6956276505e9771302c5bb7ec2a8", + "slot_b_sha3_256_digest": "fb25825eaf97f8f2307afe3caebf0b70865499f6d116db3aff382c505b4d8dd7" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:04.749894Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "a" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "96caca4fb6a789c228cdb69ca4afe300cecd46fb611e1fb12c384fe9597a0d1c", + "slot_b_sha3_256_digest": "29215490b1984f4cfcea7d95176b8338466b2fc60123cfc27e895209bef506bb" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:05.327348Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "a" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "96caca4fb6a789c228cdb69ca4afe300cecd46fb611e1fb12c384fe9597a0d1c", + "slot_b_sha3_256_digest": "29215490b1984f4cfcea7d95176b8338466b2fc60123cfc27e895209bef506bb" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:04.172071Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "b" + }, + "persistent_boot_preference": { + "slot": "b" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "7d081250f8eb830b7dc5067334316f497a184b01788b5614a7a7f88b93a87c20", + "slot_b_sha3_256_digest": "d416d5ba9d0d3bd495f0f5fcd2aecc28cd83b53d1a8f94a28241d85b30761451" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:03.592833Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "a" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "7d081250f8eb830b7dc5067334316f497a184b01788b5614a7a7f88b93a87c20", + "slot_b_sha3_256_digest": "4ec328195a23e86a1545d5ee576e73834aae81fb79830a54d0503475875f1760" + } + ] + ], + "cabooses_found": { + "SpSlot0": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:18:05.911536Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "sidecar-b", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:18:06.486873Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "sidecar-b", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:04.751575Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:05.332841Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:04.173529Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:03.594379Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ] + ], + "SpSlot1": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:18:05.912952Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "f04c427514943573272da3524aa4d976d069ac95", + "name": "sidecar-b", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:18:06.488258Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "a096c4c17b17772c72dbfb03e0b9c96e59d9a0fe", + "name": "sidecar-b", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:04.753197Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:05.334324Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:04.174886Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:03.596125Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "620f18f924d180d13a0577bd51d7c48c1d670549", + "name": "gimlet-c", + "version": "1.0.5" + } + } + ] + ], + "RotSlotA": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:18:06.119739Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1-dev", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:18:06.695247Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "4b2a3469ee63f14e237ed26d741c49e8cc27261f", + "name": "oxide-rot-1", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:05.004236Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:05.586015Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:04.426689Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:03.847427Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ] + ], + "RotSlotB": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:18:06.328695Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:18:06.903153Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1", + "version": "1.0.0" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:05.256365Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:05.838048Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:04.678679Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:04.100460Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "08f5a258dd48728cfaae68fcc7fbd56c7359329c", + "name": "oxide-rot-1", + "version": "1.0.4" + } + } + ] + ] + }, + "rot_pages_found": { + "Cmpa": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:05.273195Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:05.854538Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:04.695195Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:04.116988Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ] + ], + "CfpaActive": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:05.288821Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAT9AkBD3h7E4HX3ag0CWYdTvYhAwH5B7l6KL6QI/wPEQ=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:05.870545Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6n5IZG1N0E70rdhMMrS3k04zLw+wi+p0BE00p4LJpio=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:04.711196Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:04.132904Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + } + } + ] + ], + "CfpaInactive": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:05.304991Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADYAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKR50f2Jw5Dyd7j2FPBE4r77CMk4azEk2sBLHfrvOts=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:05.886552Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAABgAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAa0EBHkNRkpjbiUrOzKdmz2xKt2ubIbVZ4ALX8/sXkig=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:04.727187Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:04.148861Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + } + } + ] + ], + "CfpaScratch": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:05.320918Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAT9AkBD3h7E4HX3ag0CWYdTvYhAwH5B7l6KL6QI/wPEQ=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:05.902654Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6n5IZG1N0E70rdhMMrS3k04zLw+wi+p0BE00p4LJpio=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:04.743134Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:04.164870Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + } + } + ] + ] + }, + "sled_agents": { + "1762267a-a0cb-4c9a-88b4-8ce828cbc021": { + "time_collected": "2024-03-01T19:18:07.325811Z", + "source": "http://[fd00:1122:3344:101::1]:12345", + "sled_id": "1762267a-a0cb-4c9a-88b4-8ce828cbc021", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + "sled_agent_address": "[fd00:1122:3344:101::1]:12345", + "sled_role": "scrimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + }, + "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f": { + "time_collected": "2024-03-01T19:18:07.365147Z", + "source": "http://[fd00:1122:3344:121::1]:12345", + "sled_id": "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + "sled_agent_address": "[fd00:1122:3344:121::1]:12345", + "sled_role": "gimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + }, + "a6634f64-f6fb-426c-b00b-9b30aed9f53a": { + "time_collected": "2024-03-01T19:18:07.401947Z", + "source": "http://[fd00:1122:3344:102::1]:12345", + "sled_id": "a6634f64-f6fb-426c-b00b-9b30aed9f53a", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + "sled_agent_address": "[fd00:1122:3344:102::1]:12345", + "sled_role": "gimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + }, + "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89": { + "time_collected": "2024-03-01T19:18:07.440624Z", + "source": "http://[fd00:1122:3344:103::1]:12345", + "sled_id": "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + "sled_agent_address": "[fd00:1122:3344:103::1]:12345", + "sled_role": "scrimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + } + }, + "omicron_zones": { + "1762267a-a0cb-4c9a-88b4-8ce828cbc021": { + "time_collected": "2024-03-01T19:18:07.327154Z", + "source": "http://[fd00:1122:3344:101::1]:12345", + "sled_id": "1762267a-a0cb-4c9a-88b4-8ce828cbc021", + "zones": { + "generation": 5, + "zones": [ + { + "id": "0706860a-3801-42a7-8000-fff741f7665b", + "underlay_address": "fd00:1122:3344:101::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::c]:32345", + "dataset": { + "pool_name": "oxp_1d864940-c723-4f58-82e8-b03772b42356" + } + } + }, + { + "id": "34a279a2-a025-41ff-a392-1addac400f38", + "underlay_address": "fd00:1122:3344:101::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:101::3]:32221", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + } + } + }, + { + "id": "3d344935-6f48-405f-8794-2c6e8fa4b1c2", + "underlay_address": "fd00:1122:3344:1::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + }, + "dns_address": "[fd00:1122:3344:1::1]:53", + "gz_address": "fd00:1122:3344:1::2", + "gz_address_index": 0, + "http_address": "[fd00:1122:3344:1::1]:5353" + } + }, + { + "id": "59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "underlay_address": "fd00:1122:3344:101::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.2", + "external_tls": true, + "internal_address": "[fd00:1122:3344:101::5]:12221", + "nic": { + "id": "81a0385e-86b9-4109-b63d-61c1407c50d1", + "ip": "172.30.2.5", + "kind": { + "type": "service", + "id": "59c3ed4f-3670-44b1-9ea0-9a6162a08629" + }, + "mac": "A8:40:25:FF:E1:17", + "name": "nexus-59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "67852e14-ce7f-4e50-a61d-4059d0c057b9", + "underlay_address": "fd00:1122:3344:101::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::10]:32345", + "dataset": { + "pool_name": "oxp_eae1515c-cd4a-4d7b-9a99-dbc24cf2e337" + } + } + }, + { + "id": "6ace1a69-d55a-45f0-a127-99c90f1b070c", + "underlay_address": "fd00:1122:3344:101::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::f]:32345", + "dataset": { + "pool_name": "oxp_466351db-2ced-45fa-9431-22de1c4bd480" + } + } + }, + { + "id": "84d6b7fc-b5c2-499f-8d17-0eb1ea5affbe", + "underlay_address": "fd00:1122:3344:101::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::9]:32345", + "dataset": { + "pool_name": "oxp_e8d6b9a2-bad4-43f0-b5c0-0618555a7f8c" + } + } + }, + { + "id": "854c1544-d0ea-4d65-9cf4-776e91a4fcb5", + "underlay_address": "fd00:1122:3344:101::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::8]:32345", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + } + } + }, + { + "id": "86ee7359-aa35-41d9-8a76-54961bc516a1", + "underlay_address": "fd00:1122:3344:101::4", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:101::4]:32221", + "dataset": { + "pool_name": "oxp_e8d6b9a2-bad4-43f0-b5c0-0618555a7f8c" + } + } + }, + { + "id": "925ea436-fad4-4fff-8ae0-24edd725a2b0", + "underlay_address": "fd00:1122:3344:101::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::d]:32345", + "dataset": { + "pool_name": "oxp_88ddd98f-3181-4797-864a-7e8d5b028657" + } + } + }, + { + "id": "be272d1e-4d34-4478-94b4-a76f5fcbef36", + "underlay_address": "fd00:1122:3344:101::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::a]:32345", + "dataset": { + "pool_name": "oxp_9041d780-b5ee-4159-a1db-8c4075e2176d" + } + } + }, + { + "id": "bebd754d-d06c-45f4-99bf-e1aab6253ab8", + "underlay_address": "fd00:1122:3344:101::6", + "zone_type": { + "type": "oximeter", + "address": "[fd00:1122:3344:101::6]:12223" + } + }, + { + "id": "d630cca7-e3f7-47a8-aee8-ad0790895abc", + "underlay_address": "fd00:1122:3344:101::11", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:101::11]:123", + "dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "nic": { + "id": "9741f03f-f150-4229-9f8d-e29a69c87883", + "ip": "172.30.3.5", + "kind": { + "type": "service", + "id": "d630cca7-e3f7-47a8-aee8-ad0790895abc" + }, + "mac": "A8:40:25:FF:E7:A8", + "name": "ntp-d630cca7-e3f7-47a8-aee8-ad0790895abc", + "primary": true, + "slot": 0, + "subnet": "172.30.3.0/24", + "vni": 100 + }, + "ntp_servers": [ + "ntp.eng.oxide.computer" + ], + "snat_cfg": { + "ip": "172.20.28.5", + "first_port": 0, + "last_port": 16383 + } + } + }, + { + "id": "d8c51c79-46ff-44bd-80be-08d6283f4e21", + "underlay_address": "fd00:1122:3344:101::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::e]:32345", + "dataset": { + "pool_name": "oxp_3f95c036-d5d8-4d08-8c78-c0635ca34698" + } + } + }, + { + "id": "dbbc0422-2b23-4d9e-b1db-8003cfc29ad0", + "underlay_address": "fd00:1122:3344:101::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::b]:32345", + "dataset": { + "pool_name": "oxp_1a4447be-5c84-4dad-b22c-d35e5e1911b0" + } + } + }, + { + "id": "f33a2375-212d-42e4-b539-6079aeb4e5b7", + "underlay_address": "fd00:1122:3344:101::7", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:101::7]:17000" + } + } + ] + } + }, + "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f": { + "time_collected": "2024-03-01T19:18:07.365954Z", + "source": "http://[fd00:1122:3344:121::1]:12345", + "sled_id": "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f", + "zones": { + "generation": 3, + "zones": [ + { + "id": "0eca39c6-62e2-4a1f-8ea2-e9332a774a26", + "underlay_address": "fd00:1122:3344:121::28", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::28]:32345", + "dataset": { + "pool_name": "oxp_cc36b0cc-818a-4c7f-b50d-057ff7e7e8e3" + } + } + }, + { + "id": "12431d3e-1516-4822-98b3-3711ff912e69", + "underlay_address": "fd00:1122:3344:121::22", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::22]:32345", + "dataset": { + "pool_name": "oxp_0622b2cf-297e-4356-b94c-d74309f443ba" + } + } + }, + { + "id": "2d5c29f7-4915-4876-82f8-e388b15c5067", + "underlay_address": "fd00:1122:3344:121::21", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:121::21]:123", + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "ntp_servers": [ + "d630cca7-e3f7-47a8-aee8-ad0790895abc.host.control-plane.oxide.internal", + "c74ba7d7-3d5f-4a29-aadc-effe33a90909.host.control-plane.oxide.internal" + ] + } + }, + { + "id": "56414d76-9b8c-4e15-acf1-cd269c2d8638", + "underlay_address": "fd00:1122:3344:121::25", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::25]:32345", + "dataset": { + "pool_name": "oxp_574728ca-1559-48f3-a46c-1e608966e6a0" + } + } + }, + { + "id": "891cb9da-301d-40cc-b93d-5fcb350fa968", + "underlay_address": "fd00:1122:3344:121::29", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::29]:32345", + "dataset": { + "pool_name": "oxp_e4a4a09a-43b8-460c-bc43-1f935600b681" + } + } + }, + { + "id": "d9014907-24c0-4c33-a475-db15942f4f8f", + "underlay_address": "fd00:1122:3344:121::23", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::23]:32345", + "dataset": { + "pool_name": "oxp_276822e2-d7ce-4eae-b5a4-38d2e8586a37" + } + } + }, + { + "id": "e950de25-7306-4cf7-a6f4-6db11adaa310", + "underlay_address": "fd00:1122:3344:121::27", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::27]:32345", + "dataset": { + "pool_name": "oxp_8c7a7d84-c269-4178-9dbb-37f363e7958c" + } + } + }, + { + "id": "fe3dc092-ea23-42d7-9f1d-36eed72b539c", + "underlay_address": "fd00:1122:3344:121::26", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::26]:32345", + "dataset": { + "pool_name": "oxp_6d2b29c5-aa58-40b7-b630-88301f08e945" + } + } + }, + { + "id": "ff8cce49-a721-4ad1-b09e-2bd6e4e42d00", + "underlay_address": "fd00:1122:3344:121::24", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::24]:32345", + "dataset": { + "pool_name": "oxp_2b0da7bc-2afc-4a16-95e1-5126bcb9b6b1" + } + } + } + ] + } + }, + "a6634f64-f6fb-426c-b00b-9b30aed9f53a": { + "time_collected": "2024-03-01T19:18:07.403197Z", + "source": "http://[fd00:1122:3344:102::1]:12345", + "sled_id": "a6634f64-f6fb-426c-b00b-9b30aed9f53a", + "zones": { + "generation": 5, + "zones": [ + { + "id": "0ebd2b5b-2dec-4d17-8e23-e468c04dacb6", + "underlay_address": "fd00:1122:3344:102::11", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::11]:32345", + "dataset": { + "pool_name": "oxp_0c07bcc1-7666-4754-881d-4135992ff561" + } + } + }, + { + "id": "18b99404-d8b2-4463-bc99-638d9b678ab3", + "underlay_address": "fd00:1122:3344:102::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::e]:32345", + "dataset": { + "pool_name": "oxp_f9a037da-7d15-40d9-957d-7e42aadb333c" + } + } + }, + { + "id": "479811e7-6409-45d4-b294-5333534e47ed", + "underlay_address": "fd00:1122:3344:102::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::9]:32345", + "dataset": { + "pool_name": "oxp_1fb80902-d979-461d-9c6b-a6541a5359c9" + } + } + }, + { + "id": "54d1b693-bc4d-4c47-9009-c6f624073801", + "underlay_address": "fd00:1122:3344:102::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::a]:32345", + "dataset": { + "pool_name": "oxp_02cc2369-9924-41c7-aa74-79d6d548f2dd" + } + } + }, + { + "id": "5b743072-9369-45d7-b7c9-dbdf736301f8", + "underlay_address": "fd00:1122:3344:2::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + }, + "dns_address": "[fd00:1122:3344:2::1]:53", + "gz_address": "fd00:1122:3344:2::2", + "gz_address_index": 1, + "http_address": "[fd00:1122:3344:2::1]:5353" + } + }, + { + "id": "5c4163fa-d6a6-4a0c-9476-3dda135bc9a4", + "underlay_address": "fd00:1122:3344:102::7", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:102::7]:17000" + } + }, + { + "id": "61a39188-dd82-43f6-a87c-ebb4c2d54ac4", + "underlay_address": "fd00:1122:3344:102::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::8]:32345", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "658076ed-fb43-49e7-a813-df3d23497848", + "underlay_address": "fd00:1122:3344:102::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::10]:32345", + "dataset": { + "pool_name": "oxp_7929b5fb-4ef2-4791-92e6-1a46c7f608b9" + } + } + }, + { + "id": "6edb836e-b100-4080-bb03-738d6441d571", + "underlay_address": "fd00:1122:3344:102::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::c]:32345", + "dataset": { + "pool_name": "oxp_fe74ae4d-d7bc-4293-9ec9-69132aca4aba" + } + } + }, + { + "id": "7f83d784-9c5c-498f-b559-4176bbafbc4b", + "underlay_address": "fd00:1122:3344:102::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.3", + "external_tls": true, + "internal_address": "[fd00:1122:3344:102::5]:12221", + "nic": { + "id": "3771a19c-cc40-4fd5-b99d-ed10071018a7", + "ip": "172.30.2.6", + "kind": { + "type": "service", + "id": "7f83d784-9c5c-498f-b559-4176bbafbc4b" + }, + "mac": "A8:40:25:FF:B2:AF", + "name": "nexus-7f83d784-9c5c-498f-b559-4176bbafbc4b", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "8052938f-73bc-4c54-8ed0-c62f6d77b9fd", + "underlay_address": "fd00:1122:3344:102::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::d]:32345", + "dataset": { + "pool_name": "oxp_93e4e338-0ad1-4ae1-a120-2cfb59b4df42" + } + } + }, + { + "id": "86bc0a2a-e24a-4bc1-8222-89207a0937f9", + "underlay_address": "fd00:1122:3344:102::4", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:102::4]:32221", + "dataset": { + "pool_name": "oxp_1fb80902-d979-461d-9c6b-a6541a5359c9" + } + } + }, + { + "id": "9241b9da-8188-4aae-a9dc-62acb453618f", + "underlay_address": "fd00:1122:3344:102::6", + "zone_type": { + "type": "clickhouse", + "address": "[fd00:1122:3344:102::6]:8123", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "a781a349-62b2-41f0-9dd7-c64d290aef82", + "underlay_address": "fd00:1122:3344:102::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::f]:32345", + "dataset": { + "pool_name": "oxp_4e92cbcf-f194-4ffc-97e3-fdf3df7cc9ae" + } + } + }, + { + "id": "c67814bf-9972-4bb0-a666-24778ca70a16", + "underlay_address": "fd00:1122:3344:102::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:102::3]:32221", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "underlay_address": "fd00:1122:3344:102::12", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:102::12]:123", + "dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "nic": { + "id": "9f03c82c-3192-498d-978d-a175049ac389", + "ip": "172.30.3.6", + "kind": { + "type": "service", + "id": "c74ba7d7-3d5f-4a29-aadc-effe33a90909" + }, + "mac": "A8:40:25:FF:E4:11", + "name": "ntp-c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "primary": true, + "slot": 0, + "subnet": "172.30.3.0/24", + "vni": 100 + }, + "ntp_servers": [ + "ntp.eng.oxide.computer" + ], + "snat_cfg": { + "ip": "172.20.28.6", + "first_port": 16384, + "last_port": 32767 + } + } + }, + { + "id": "dfc27183-ed85-44c3-ac3c-4427c4cb03e3", + "underlay_address": "fd00:1122:3344:102::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::b]:32345", + "dataset": { + "pool_name": "oxp_742699b5-4ded-406d-9a08-24648d3b2efb" + } + } + } + ] + } + }, + "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89": { + "time_collected": "2024-03-01T19:18:07.483569Z", + "source": "http://[fd00:1122:3344:103::1]:12345", + "sled_id": "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89", + "zones": { + "generation": 5, + "zones": [ + { + "id": "185e6b00-7e18-456c-9dfc-edd0194ac207", + "underlay_address": "fd00:1122:3344:103::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::a]:32345", + "dataset": { + "pool_name": "oxp_6aa59516-2018-4494-af6b-46704b20f6dc" + } + } + }, + { + "id": "1fc78bc7-feca-4a10-9beb-58b53fda5210", + "underlay_address": "fd00:1122:3344:103::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::10]:32345", + "dataset": { + "pool_name": "oxp_07d66b6b-8059-4d07-a0b3-8ce41e2a8c98" + } + } + }, + { + "id": "45dccfd6-4674-43c6-a0e7-4dde5f24d90d", + "underlay_address": "fd00:1122:3344:3::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + }, + "dns_address": "[fd00:1122:3344:3::1]:53", + "gz_address": "fd00:1122:3344:3::2", + "gz_address_index": 2, + "http_address": "[fd00:1122:3344:3::1]:5353" + } + }, + { + "id": "4b74c418-0a08-4489-b652-e97267c1c220", + "underlay_address": "fd00:1122:3344:103::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::7]:32345", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + } + } + }, + { + "id": "54c947d2-6355-453c-80fc-8f49cc2129ee", + "underlay_address": "fd00:1122:3344:103::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.4", + "external_tls": true, + "internal_address": "[fd00:1122:3344:103::5]:12221", + "nic": { + "id": "1f55dc2b-18ee-485c-8831-b603a8df78e6", + "ip": "172.30.2.7", + "kind": { + "type": "service", + "id": "54c947d2-6355-453c-80fc-8f49cc2129ee" + }, + "mac": "A8:40:25:FF:96:05", + "name": "nexus-54c947d2-6355-453c-80fc-8f49cc2129ee", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "60d64b70-89f5-4774-9e41-2c79c4173911", + "underlay_address": "fd00:1122:3344:103::6", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:103::6]:17000" + } + }, + { + "id": "921958c0-7761-4294-945d-2a078327bf2c", + "underlay_address": "fd00:1122:3344:103::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::8]:32345", + "dataset": { + "pool_name": "oxp_7eabbdcc-28a3-4af9-bc4a-8d9b745d4df3" + } + } + }, + { + "id": "9df22034-59f6-431f-82e7-258fe983879b", + "underlay_address": "fd00:1122:3344:103::4", + "zone_type": { + "type": "external_dns", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + }, + "dns_address": "172.20.28.1:53", + "http_address": "[fd00:1122:3344:103::4]:5353", + "nic": { + "id": "0376a963-e3c1-44bc-9cbf-9e8cca58410b", + "ip": "172.30.1.5", + "kind": { + "type": "service", + "id": "9df22034-59f6-431f-82e7-258fe983879b" + }, + "mac": "A8:40:25:FF:90:12", + "name": "external-dns-9df22034-59f6-431f-82e7-258fe983879b", + "primary": true, + "slot": 0, + "subnet": "172.30.1.0/24", + "vni": 100 + } + } + }, + { + "id": "bb21c0c4-c43f-4913-9bf9-707bd7f1769a", + "underlay_address": "fd00:1122:3344:103::11", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:103::11]:123", + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "ntp_servers": [ + "d630cca7-e3f7-47a8-aee8-ad0790895abc.host.control-plane.oxide.internal", + "c74ba7d7-3d5f-4a29-aadc-effe33a90909.host.control-plane.oxide.internal" + ] + } + }, + { + "id": "ccb0d612-9d7c-41ba-bd51-661c3c75ec91", + "underlay_address": "fd00:1122:3344:103::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::9]:32345", + "dataset": { + "pool_name": "oxp_c8feed09-6556-4409-af8e-563c97d556eb" + } + } + }, + { + "id": "ccef3e95-cae5-4072-9b7d-49e56fa9dff4", + "underlay_address": "fd00:1122:3344:103::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::d]:32345", + "dataset": { + "pool_name": "oxp_5b8b10a4-d527-4176-a049-1e9c2a10edea" + } + } + }, + { + "id": "d42fc874-8416-4212-b5a0-febba4c6b0ac", + "underlay_address": "fd00:1122:3344:103::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::b]:32345", + "dataset": { + "pool_name": "oxp_83a60822-1299-471c-bc79-3be0f87cf7c3" + } + } + }, + { + "id": "d5ba7d45-26d7-45ca-8c0a-a560f243b5fa", + "underlay_address": "fd00:1122:3344:103::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::e]:32345", + "dataset": { + "pool_name": "oxp_4ffc6336-04d1-4f21-9c27-a95aa640f960" + } + } + }, + { + "id": "da27ee6a-9841-46aa-acda-7cce3e04cb60", + "underlay_address": "fd00:1122:3344:103::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:103::3]:32221", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + } + } + }, + { + "id": "e09525cf-9c87-4d05-8860-1356dd0dee8a", + "underlay_address": "fd00:1122:3344:103::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::f]:32345", + "dataset": { + "pool_name": "oxp_e2244950-dda0-4dfa-8b63-5ffac16c5a62" + } + } + }, + { + "id": "f7264a3e-e8e7-4476-8df3-76c9d00a383b", + "underlay_address": "fd00:1122:3344:103::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::c]:32345", + "dataset": { + "pool_name": "oxp_f6813b0c-ab5a-4fd0-8e24-95de9b65280d" + } + } + } + ] + } + } + } + }, + { + "id": "623f7e48-6c47-4e17-8aa3-cebe98ca4287", + "errors": [ + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page Cmpa: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"431a5722-e328-450a-89f4-017274532055\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:10 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"431a5722-e328-450a-89f4-017274532055\" }", + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page CfpaActive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"38399802-a750-4c20-8243-f8df25e6c01b\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:11 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"38399802-a750-4c20-8243-f8df25e6c01b\" }", + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page CfpaInactive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"cbbbe7aa-4a9f-44d3-9713-6c12a1d2b00a\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:11 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"cbbbe7aa-4a9f-44d3-9713-6c12a1d2b00a\" }", + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page CfpaScratch: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"a93a52ae-2837-4a80-a8d9-68f43bdaabe9\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:11 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"a93a52ae-2837-4a80-a8d9-68f43bdaabe9\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page Cmpa: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"0016667c-69f3-4359-9ed8-176e8d03f270\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:11 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"0016667c-69f3-4359-9ed8-176e8d03f270\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page CfpaActive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"0f8d60bc-31ee-460d-a8b5-9073728c2402\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:11 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"0f8d60bc-31ee-460d-a8b5-9073728c2402\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page CfpaInactive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"595cf967-cade-4ae6-87f5-d04e5dbc55b9\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:11 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"595cf967-cade-4ae6-87f5-d04e5dbc55b9\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page CfpaScratch: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"83fe7422-15cd-48d3-b21d-967479fb090c\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:11 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"83fe7422-15cd-48d3-b21d-967479fb090c\" }" + ], + "time_started": "2024-03-01T19:18:07.755939Z", + "time_done": "2024-03-01T19:18:12.110340Z", + "collector": "54c947d2-6355-453c-80fc-8f49cc2129ee", + "baseboards": [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + } + ], + "cabooses": [ + { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + }, + { + "board": "gimlet-c", + "git_commit": "620f18f924d180d13a0577bd51d7c48c1d670549", + "name": "gimlet-c", + "version": "1.0.5" + }, + { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + }, + { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + }, + { + "board": "oxide-rot-1", + "git_commit": "08f5a258dd48728cfaae68fcc7fbd56c7359329c", + "name": "oxide-rot-1", + "version": "1.0.4" + }, + { + "board": "oxide-rot-1", + "git_commit": "4b2a3469ee63f14e237ed26d741c49e8cc27261f", + "name": "oxide-rot-1", + "version": "0.0.0-git" + }, + { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + }, + { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1", + "version": "1.0.0" + }, + { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1-dev", + "version": "0.0.0-git" + }, + { + "board": "sidecar-b", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "sidecar-b", + "version": "1.0.6" + }, + { + "board": "sidecar-b", + "git_commit": "a096c4c17b17772c72dbfb03e0b9c96e59d9a0fe", + "name": "sidecar-b", + "version": "0.0.0-git" + }, + { + "board": "sidecar-b", + "git_commit": "f04c427514943573272da3524aa4d976d069ac95", + "name": "sidecar-b", + "version": "0.0.0-git" + } + ], + "rot_pages": [ + { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + }, + { + "data_base64": "AAAAABgAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAa0EBHkNRkpjbiUrOzKdmz2xKt2ubIbVZ4ALX8/sXkig=" + }, + { + "data_base64": "AAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6n5IZG1N0E70rdhMMrS3k04zLw+wi+p0BE00p4LJpio=" + }, + { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + }, + { + "data_base64": "AAAAADYAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKR50f2Jw5Dyd7j2FPBE4r77CMk4azEk2sBLHfrvOts=" + }, + { + "data_base64": "AAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAT9AkBD3h7E4HX3ag0CWYdTvYhAwH5B7l6KL6QI/wPEQ=" + }, + { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + ], + "sps": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:18:10.815811Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "switch", + "sp_slot": 0, + "baseboard_revision": 4, + "hubris_archive": "9184be93590e655e", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:18:11.387242Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "sp_type": "switch", + "sp_slot": 1, + "baseboard_revision": 4, + "hubris_archive": "9184be93590e655e", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:09.650491Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 17, + "baseboard_revision": 6, + "hubris_archive": "5c745f4b4fa66f47", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:10.237239Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 15, + "baseboard_revision": 6, + "hubris_archive": "5c745f4b4fa66f47", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:08.884903Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 14, + "baseboard_revision": 6, + "hubris_archive": "5c745f4b4fa66f47", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:07.768812Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 16, + "baseboard_revision": 6, + "hubris_archive": "3c670b44fdeef4af", + "power_state": "A0" + } + ] + ], + "rots": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:18:10.815811Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "b" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "4599e4d7bc9b2164e8c0f0f7ce4bcae2bd35d635c8450854ea9a94ce49a2ca0f", + "slot_b_sha3_256_digest": "1e42a0761e5bd1d9f9a5e2c44acc34e7b7e007792977cdb0523d271db2e80c8c" + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:18:11.387242Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "active_slot": { + "slot": "b" + }, + "persistent_boot_preference": { + "slot": "b" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "92e07077972c83af6872ec817fb88d74a36f6956276505e9771302c5bb7ec2a8", + "slot_b_sha3_256_digest": "fb25825eaf97f8f2307afe3caebf0b70865499f6d116db3aff382c505b4d8dd7" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:09.650491Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "a" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "96caca4fb6a789c228cdb69ca4afe300cecd46fb611e1fb12c384fe9597a0d1c", + "slot_b_sha3_256_digest": "29215490b1984f4cfcea7d95176b8338466b2fc60123cfc27e895209bef506bb" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:10.237239Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "a" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "96caca4fb6a789c228cdb69ca4afe300cecd46fb611e1fb12c384fe9597a0d1c", + "slot_b_sha3_256_digest": "29215490b1984f4cfcea7d95176b8338466b2fc60123cfc27e895209bef506bb" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:08.884903Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "b" + }, + "persistent_boot_preference": { + "slot": "b" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "7d081250f8eb830b7dc5067334316f497a184b01788b5614a7a7f88b93a87c20", + "slot_b_sha3_256_digest": "d416d5ba9d0d3bd495f0f5fcd2aecc28cd83b53d1a8f94a28241d85b30761451" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:07.768812Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "a" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "7d081250f8eb830b7dc5067334316f497a184b01788b5614a7a7f88b93a87c20", + "slot_b_sha3_256_digest": "4ec328195a23e86a1545d5ee576e73834aae81fb79830a54d0503475875f1760" + } + ] + ], + "cabooses_found": { + "SpSlot0": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:18:10.817224Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "sidecar-b", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:18:11.388557Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "sidecar-b", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:09.651842Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:10.238618Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:08.886274Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:07.777292Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ] + ], + "SpSlot1": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:18:10.822223Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "f04c427514943573272da3524aa4d976d069ac95", + "name": "sidecar-b", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:18:11.389874Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "a096c4c17b17772c72dbfb03e0b9c96e59d9a0fe", + "name": "sidecar-b", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:09.653307Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:10.240066Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:08.887692Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:07.779701Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "620f18f924d180d13a0577bd51d7c48c1d670549", + "name": "gimlet-c", + "version": "1.0.5" + } + } + ] + ], + "RotSlotA": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:18:11.029595Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1-dev", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:18:11.595974Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "4b2a3469ee63f14e237ed26d741c49e8cc27261f", + "name": "oxide-rot-1", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:09.905198Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:10.491985Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:09.148589Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:08.142368Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ] + ], + "RotSlotB": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:18:11.236464Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:18:11.830868Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1", + "version": "1.0.0" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:10.164175Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:10.743940Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:09.402608Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:08.642339Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "08f5a258dd48728cfaae68fcc7fbd56c7359329c", + "name": "oxide-rot-1", + "version": "1.0.4" + } + } + ] + ] + }, + "rot_pages_found": { + "Cmpa": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:10.181324Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:10.760461Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:09.419090Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:08.722917Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ] + ], + "CfpaActive": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:10.197812Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAT9AkBD3h7E4HX3ag0CWYdTvYhAwH5B7l6KL6QI/wPEQ=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:10.776526Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6n5IZG1N0E70rdhMMrS3k04zLw+wi+p0BE00p4LJpio=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:09.491156Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:08.813851Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + } + } + ] + ], + "CfpaInactive": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:10.213728Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADYAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKR50f2Jw5Dyd7j2FPBE4r77CMk4azEk2sBLHfrvOts=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:10.792457Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAABgAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAa0EBHkNRkpjbiUrOzKdmz2xKt2ubIbVZ4ALX8/sXkig=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:09.562113Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:08.845830Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + } + } + ] + ], + "CfpaScratch": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:10.229796Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAT9AkBD3h7E4HX3ag0CWYdTvYhAwH5B7l6KL6QI/wPEQ=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:10.809035Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6n5IZG1N0E70rdhMMrS3k04zLw+wi+p0BE00p4LJpio=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:09.643193Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:08.878183Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + } + } + ] + ] + }, + "sled_agents": { + "1762267a-a0cb-4c9a-88b4-8ce828cbc021": { + "time_collected": "2024-03-01T19:18:12.000624Z", + "source": "http://[fd00:1122:3344:101::1]:12345", + "sled_id": "1762267a-a0cb-4c9a-88b4-8ce828cbc021", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + "sled_agent_address": "[fd00:1122:3344:101::1]:12345", + "sled_role": "scrimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + }, + "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f": { + "time_collected": "2024-03-01T19:18:12.036086Z", + "source": "http://[fd00:1122:3344:121::1]:12345", + "sled_id": "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + "sled_agent_address": "[fd00:1122:3344:121::1]:12345", + "sled_role": "gimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + }, + "a6634f64-f6fb-426c-b00b-9b30aed9f53a": { + "time_collected": "2024-03-01T19:18:12.071731Z", + "source": "http://[fd00:1122:3344:102::1]:12345", + "sled_id": "a6634f64-f6fb-426c-b00b-9b30aed9f53a", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + "sled_agent_address": "[fd00:1122:3344:102::1]:12345", + "sled_role": "gimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + }, + "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89": { + "time_collected": "2024-03-01T19:18:12.107230Z", + "source": "http://[fd00:1122:3344:103::1]:12345", + "sled_id": "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + "sled_agent_address": "[fd00:1122:3344:103::1]:12345", + "sled_role": "scrimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + } + }, + "omicron_zones": { + "1762267a-a0cb-4c9a-88b4-8ce828cbc021": { + "time_collected": "2024-03-01T19:18:12.001507Z", + "source": "http://[fd00:1122:3344:101::1]:12345", + "sled_id": "1762267a-a0cb-4c9a-88b4-8ce828cbc021", + "zones": { + "generation": 5, + "zones": [ + { + "id": "0706860a-3801-42a7-8000-fff741f7665b", + "underlay_address": "fd00:1122:3344:101::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::c]:32345", + "dataset": { + "pool_name": "oxp_1d864940-c723-4f58-82e8-b03772b42356" + } + } + }, + { + "id": "34a279a2-a025-41ff-a392-1addac400f38", + "underlay_address": "fd00:1122:3344:101::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:101::3]:32221", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + } + } + }, + { + "id": "3d344935-6f48-405f-8794-2c6e8fa4b1c2", + "underlay_address": "fd00:1122:3344:1::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + }, + "dns_address": "[fd00:1122:3344:1::1]:53", + "gz_address": "fd00:1122:3344:1::2", + "gz_address_index": 0, + "http_address": "[fd00:1122:3344:1::1]:5353" + } + }, + { + "id": "59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "underlay_address": "fd00:1122:3344:101::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.2", + "external_tls": true, + "internal_address": "[fd00:1122:3344:101::5]:12221", + "nic": { + "id": "81a0385e-86b9-4109-b63d-61c1407c50d1", + "ip": "172.30.2.5", + "kind": { + "type": "service", + "id": "59c3ed4f-3670-44b1-9ea0-9a6162a08629" + }, + "mac": "A8:40:25:FF:E1:17", + "name": "nexus-59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "67852e14-ce7f-4e50-a61d-4059d0c057b9", + "underlay_address": "fd00:1122:3344:101::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::10]:32345", + "dataset": { + "pool_name": "oxp_eae1515c-cd4a-4d7b-9a99-dbc24cf2e337" + } + } + }, + { + "id": "6ace1a69-d55a-45f0-a127-99c90f1b070c", + "underlay_address": "fd00:1122:3344:101::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::f]:32345", + "dataset": { + "pool_name": "oxp_466351db-2ced-45fa-9431-22de1c4bd480" + } + } + }, + { + "id": "84d6b7fc-b5c2-499f-8d17-0eb1ea5affbe", + "underlay_address": "fd00:1122:3344:101::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::9]:32345", + "dataset": { + "pool_name": "oxp_e8d6b9a2-bad4-43f0-b5c0-0618555a7f8c" + } + } + }, + { + "id": "854c1544-d0ea-4d65-9cf4-776e91a4fcb5", + "underlay_address": "fd00:1122:3344:101::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::8]:32345", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + } + } + }, + { + "id": "86ee7359-aa35-41d9-8a76-54961bc516a1", + "underlay_address": "fd00:1122:3344:101::4", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:101::4]:32221", + "dataset": { + "pool_name": "oxp_e8d6b9a2-bad4-43f0-b5c0-0618555a7f8c" + } + } + }, + { + "id": "925ea436-fad4-4fff-8ae0-24edd725a2b0", + "underlay_address": "fd00:1122:3344:101::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::d]:32345", + "dataset": { + "pool_name": "oxp_88ddd98f-3181-4797-864a-7e8d5b028657" + } + } + }, + { + "id": "be272d1e-4d34-4478-94b4-a76f5fcbef36", + "underlay_address": "fd00:1122:3344:101::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::a]:32345", + "dataset": { + "pool_name": "oxp_9041d780-b5ee-4159-a1db-8c4075e2176d" + } + } + }, + { + "id": "bebd754d-d06c-45f4-99bf-e1aab6253ab8", + "underlay_address": "fd00:1122:3344:101::6", + "zone_type": { + "type": "oximeter", + "address": "[fd00:1122:3344:101::6]:12223" + } + }, + { + "id": "d630cca7-e3f7-47a8-aee8-ad0790895abc", + "underlay_address": "fd00:1122:3344:101::11", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:101::11]:123", + "dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "nic": { + "id": "9741f03f-f150-4229-9f8d-e29a69c87883", + "ip": "172.30.3.5", + "kind": { + "type": "service", + "id": "d630cca7-e3f7-47a8-aee8-ad0790895abc" + }, + "mac": "A8:40:25:FF:E7:A8", + "name": "ntp-d630cca7-e3f7-47a8-aee8-ad0790895abc", + "primary": true, + "slot": 0, + "subnet": "172.30.3.0/24", + "vni": 100 + }, + "ntp_servers": [ + "ntp.eng.oxide.computer" + ], + "snat_cfg": { + "ip": "172.20.28.5", + "first_port": 0, + "last_port": 16383 + } + } + }, + { + "id": "d8c51c79-46ff-44bd-80be-08d6283f4e21", + "underlay_address": "fd00:1122:3344:101::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::e]:32345", + "dataset": { + "pool_name": "oxp_3f95c036-d5d8-4d08-8c78-c0635ca34698" + } + } + }, + { + "id": "dbbc0422-2b23-4d9e-b1db-8003cfc29ad0", + "underlay_address": "fd00:1122:3344:101::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::b]:32345", + "dataset": { + "pool_name": "oxp_1a4447be-5c84-4dad-b22c-d35e5e1911b0" + } + } + }, + { + "id": "f33a2375-212d-42e4-b539-6079aeb4e5b7", + "underlay_address": "fd00:1122:3344:101::7", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:101::7]:17000" + } + } + ] + } + }, + "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f": { + "time_collected": "2024-03-01T19:18:12.036797Z", + "source": "http://[fd00:1122:3344:121::1]:12345", + "sled_id": "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f", + "zones": { + "generation": 3, + "zones": [ + { + "id": "0eca39c6-62e2-4a1f-8ea2-e9332a774a26", + "underlay_address": "fd00:1122:3344:121::28", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::28]:32345", + "dataset": { + "pool_name": "oxp_cc36b0cc-818a-4c7f-b50d-057ff7e7e8e3" + } + } + }, + { + "id": "12431d3e-1516-4822-98b3-3711ff912e69", + "underlay_address": "fd00:1122:3344:121::22", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::22]:32345", + "dataset": { + "pool_name": "oxp_0622b2cf-297e-4356-b94c-d74309f443ba" + } + } + }, + { + "id": "2d5c29f7-4915-4876-82f8-e388b15c5067", + "underlay_address": "fd00:1122:3344:121::21", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:121::21]:123", + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "ntp_servers": [ + "d630cca7-e3f7-47a8-aee8-ad0790895abc.host.control-plane.oxide.internal", + "c74ba7d7-3d5f-4a29-aadc-effe33a90909.host.control-plane.oxide.internal" + ] + } + }, + { + "id": "56414d76-9b8c-4e15-acf1-cd269c2d8638", + "underlay_address": "fd00:1122:3344:121::25", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::25]:32345", + "dataset": { + "pool_name": "oxp_574728ca-1559-48f3-a46c-1e608966e6a0" + } + } + }, + { + "id": "891cb9da-301d-40cc-b93d-5fcb350fa968", + "underlay_address": "fd00:1122:3344:121::29", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::29]:32345", + "dataset": { + "pool_name": "oxp_e4a4a09a-43b8-460c-bc43-1f935600b681" + } + } + }, + { + "id": "d9014907-24c0-4c33-a475-db15942f4f8f", + "underlay_address": "fd00:1122:3344:121::23", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::23]:32345", + "dataset": { + "pool_name": "oxp_276822e2-d7ce-4eae-b5a4-38d2e8586a37" + } + } + }, + { + "id": "e950de25-7306-4cf7-a6f4-6db11adaa310", + "underlay_address": "fd00:1122:3344:121::27", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::27]:32345", + "dataset": { + "pool_name": "oxp_8c7a7d84-c269-4178-9dbb-37f363e7958c" + } + } + }, + { + "id": "fe3dc092-ea23-42d7-9f1d-36eed72b539c", + "underlay_address": "fd00:1122:3344:121::26", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::26]:32345", + "dataset": { + "pool_name": "oxp_6d2b29c5-aa58-40b7-b630-88301f08e945" + } + } + }, + { + "id": "ff8cce49-a721-4ad1-b09e-2bd6e4e42d00", + "underlay_address": "fd00:1122:3344:121::24", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::24]:32345", + "dataset": { + "pool_name": "oxp_2b0da7bc-2afc-4a16-95e1-5126bcb9b6b1" + } + } + } + ] + } + }, + "a6634f64-f6fb-426c-b00b-9b30aed9f53a": { + "time_collected": "2024-03-01T19:18:12.072810Z", + "source": "http://[fd00:1122:3344:102::1]:12345", + "sled_id": "a6634f64-f6fb-426c-b00b-9b30aed9f53a", + "zones": { + "generation": 5, + "zones": [ + { + "id": "0ebd2b5b-2dec-4d17-8e23-e468c04dacb6", + "underlay_address": "fd00:1122:3344:102::11", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::11]:32345", + "dataset": { + "pool_name": "oxp_0c07bcc1-7666-4754-881d-4135992ff561" + } + } + }, + { + "id": "18b99404-d8b2-4463-bc99-638d9b678ab3", + "underlay_address": "fd00:1122:3344:102::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::e]:32345", + "dataset": { + "pool_name": "oxp_f9a037da-7d15-40d9-957d-7e42aadb333c" + } + } + }, + { + "id": "479811e7-6409-45d4-b294-5333534e47ed", + "underlay_address": "fd00:1122:3344:102::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::9]:32345", + "dataset": { + "pool_name": "oxp_1fb80902-d979-461d-9c6b-a6541a5359c9" + } + } + }, + { + "id": "54d1b693-bc4d-4c47-9009-c6f624073801", + "underlay_address": "fd00:1122:3344:102::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::a]:32345", + "dataset": { + "pool_name": "oxp_02cc2369-9924-41c7-aa74-79d6d548f2dd" + } + } + }, + { + "id": "5b743072-9369-45d7-b7c9-dbdf736301f8", + "underlay_address": "fd00:1122:3344:2::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + }, + "dns_address": "[fd00:1122:3344:2::1]:53", + "gz_address": "fd00:1122:3344:2::2", + "gz_address_index": 1, + "http_address": "[fd00:1122:3344:2::1]:5353" + } + }, + { + "id": "5c4163fa-d6a6-4a0c-9476-3dda135bc9a4", + "underlay_address": "fd00:1122:3344:102::7", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:102::7]:17000" + } + }, + { + "id": "61a39188-dd82-43f6-a87c-ebb4c2d54ac4", + "underlay_address": "fd00:1122:3344:102::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::8]:32345", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "658076ed-fb43-49e7-a813-df3d23497848", + "underlay_address": "fd00:1122:3344:102::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::10]:32345", + "dataset": { + "pool_name": "oxp_7929b5fb-4ef2-4791-92e6-1a46c7f608b9" + } + } + }, + { + "id": "6edb836e-b100-4080-bb03-738d6441d571", + "underlay_address": "fd00:1122:3344:102::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::c]:32345", + "dataset": { + "pool_name": "oxp_fe74ae4d-d7bc-4293-9ec9-69132aca4aba" + } + } + }, + { + "id": "7f83d784-9c5c-498f-b559-4176bbafbc4b", + "underlay_address": "fd00:1122:3344:102::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.3", + "external_tls": true, + "internal_address": "[fd00:1122:3344:102::5]:12221", + "nic": { + "id": "3771a19c-cc40-4fd5-b99d-ed10071018a7", + "ip": "172.30.2.6", + "kind": { + "type": "service", + "id": "7f83d784-9c5c-498f-b559-4176bbafbc4b" + }, + "mac": "A8:40:25:FF:B2:AF", + "name": "nexus-7f83d784-9c5c-498f-b559-4176bbafbc4b", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "8052938f-73bc-4c54-8ed0-c62f6d77b9fd", + "underlay_address": "fd00:1122:3344:102::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::d]:32345", + "dataset": { + "pool_name": "oxp_93e4e338-0ad1-4ae1-a120-2cfb59b4df42" + } + } + }, + { + "id": "86bc0a2a-e24a-4bc1-8222-89207a0937f9", + "underlay_address": "fd00:1122:3344:102::4", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:102::4]:32221", + "dataset": { + "pool_name": "oxp_1fb80902-d979-461d-9c6b-a6541a5359c9" + } + } + }, + { + "id": "9241b9da-8188-4aae-a9dc-62acb453618f", + "underlay_address": "fd00:1122:3344:102::6", + "zone_type": { + "type": "clickhouse", + "address": "[fd00:1122:3344:102::6]:8123", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "a781a349-62b2-41f0-9dd7-c64d290aef82", + "underlay_address": "fd00:1122:3344:102::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::f]:32345", + "dataset": { + "pool_name": "oxp_4e92cbcf-f194-4ffc-97e3-fdf3df7cc9ae" + } + } + }, + { + "id": "c67814bf-9972-4bb0-a666-24778ca70a16", + "underlay_address": "fd00:1122:3344:102::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:102::3]:32221", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "underlay_address": "fd00:1122:3344:102::12", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:102::12]:123", + "dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "nic": { + "id": "9f03c82c-3192-498d-978d-a175049ac389", + "ip": "172.30.3.6", + "kind": { + "type": "service", + "id": "c74ba7d7-3d5f-4a29-aadc-effe33a90909" + }, + "mac": "A8:40:25:FF:E4:11", + "name": "ntp-c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "primary": true, + "slot": 0, + "subnet": "172.30.3.0/24", + "vni": 100 + }, + "ntp_servers": [ + "ntp.eng.oxide.computer" + ], + "snat_cfg": { + "ip": "172.20.28.6", + "first_port": 16384, + "last_port": 32767 + } + } + }, + { + "id": "dfc27183-ed85-44c3-ac3c-4427c4cb03e3", + "underlay_address": "fd00:1122:3344:102::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::b]:32345", + "dataset": { + "pool_name": "oxp_742699b5-4ded-406d-9a08-24648d3b2efb" + } + } + } + ] + } + }, + "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89": { + "time_collected": "2024-03-01T19:18:12.108386Z", + "source": "http://[fd00:1122:3344:103::1]:12345", + "sled_id": "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89", + "zones": { + "generation": 5, + "zones": [ + { + "id": "185e6b00-7e18-456c-9dfc-edd0194ac207", + "underlay_address": "fd00:1122:3344:103::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::a]:32345", + "dataset": { + "pool_name": "oxp_6aa59516-2018-4494-af6b-46704b20f6dc" + } + } + }, + { + "id": "1fc78bc7-feca-4a10-9beb-58b53fda5210", + "underlay_address": "fd00:1122:3344:103::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::10]:32345", + "dataset": { + "pool_name": "oxp_07d66b6b-8059-4d07-a0b3-8ce41e2a8c98" + } + } + }, + { + "id": "45dccfd6-4674-43c6-a0e7-4dde5f24d90d", + "underlay_address": "fd00:1122:3344:3::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + }, + "dns_address": "[fd00:1122:3344:3::1]:53", + "gz_address": "fd00:1122:3344:3::2", + "gz_address_index": 2, + "http_address": "[fd00:1122:3344:3::1]:5353" + } + }, + { + "id": "4b74c418-0a08-4489-b652-e97267c1c220", + "underlay_address": "fd00:1122:3344:103::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::7]:32345", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + } + } + }, + { + "id": "54c947d2-6355-453c-80fc-8f49cc2129ee", + "underlay_address": "fd00:1122:3344:103::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.4", + "external_tls": true, + "internal_address": "[fd00:1122:3344:103::5]:12221", + "nic": { + "id": "1f55dc2b-18ee-485c-8831-b603a8df78e6", + "ip": "172.30.2.7", + "kind": { + "type": "service", + "id": "54c947d2-6355-453c-80fc-8f49cc2129ee" + }, + "mac": "A8:40:25:FF:96:05", + "name": "nexus-54c947d2-6355-453c-80fc-8f49cc2129ee", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "60d64b70-89f5-4774-9e41-2c79c4173911", + "underlay_address": "fd00:1122:3344:103::6", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:103::6]:17000" + } + }, + { + "id": "921958c0-7761-4294-945d-2a078327bf2c", + "underlay_address": "fd00:1122:3344:103::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::8]:32345", + "dataset": { + "pool_name": "oxp_7eabbdcc-28a3-4af9-bc4a-8d9b745d4df3" + } + } + }, + { + "id": "9df22034-59f6-431f-82e7-258fe983879b", + "underlay_address": "fd00:1122:3344:103::4", + "zone_type": { + "type": "external_dns", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + }, + "dns_address": "172.20.28.1:53", + "http_address": "[fd00:1122:3344:103::4]:5353", + "nic": { + "id": "0376a963-e3c1-44bc-9cbf-9e8cca58410b", + "ip": "172.30.1.5", + "kind": { + "type": "service", + "id": "9df22034-59f6-431f-82e7-258fe983879b" + }, + "mac": "A8:40:25:FF:90:12", + "name": "external-dns-9df22034-59f6-431f-82e7-258fe983879b", + "primary": true, + "slot": 0, + "subnet": "172.30.1.0/24", + "vni": 100 + } + } + }, + { + "id": "bb21c0c4-c43f-4913-9bf9-707bd7f1769a", + "underlay_address": "fd00:1122:3344:103::11", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:103::11]:123", + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "ntp_servers": [ + "d630cca7-e3f7-47a8-aee8-ad0790895abc.host.control-plane.oxide.internal", + "c74ba7d7-3d5f-4a29-aadc-effe33a90909.host.control-plane.oxide.internal" + ] + } + }, + { + "id": "ccb0d612-9d7c-41ba-bd51-661c3c75ec91", + "underlay_address": "fd00:1122:3344:103::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::9]:32345", + "dataset": { + "pool_name": "oxp_c8feed09-6556-4409-af8e-563c97d556eb" + } + } + }, + { + "id": "ccef3e95-cae5-4072-9b7d-49e56fa9dff4", + "underlay_address": "fd00:1122:3344:103::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::d]:32345", + "dataset": { + "pool_name": "oxp_5b8b10a4-d527-4176-a049-1e9c2a10edea" + } + } + }, + { + "id": "d42fc874-8416-4212-b5a0-febba4c6b0ac", + "underlay_address": "fd00:1122:3344:103::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::b]:32345", + "dataset": { + "pool_name": "oxp_83a60822-1299-471c-bc79-3be0f87cf7c3" + } + } + }, + { + "id": "d5ba7d45-26d7-45ca-8c0a-a560f243b5fa", + "underlay_address": "fd00:1122:3344:103::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::e]:32345", + "dataset": { + "pool_name": "oxp_4ffc6336-04d1-4f21-9c27-a95aa640f960" + } + } + }, + { + "id": "da27ee6a-9841-46aa-acda-7cce3e04cb60", + "underlay_address": "fd00:1122:3344:103::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:103::3]:32221", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + } + } + }, + { + "id": "e09525cf-9c87-4d05-8860-1356dd0dee8a", + "underlay_address": "fd00:1122:3344:103::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::f]:32345", + "dataset": { + "pool_name": "oxp_e2244950-dda0-4dfa-8b63-5ffac16c5a62" + } + } + }, + { + "id": "f7264a3e-e8e7-4476-8df3-76c9d00a383b", + "underlay_address": "fd00:1122:3344:103::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::c]:32345", + "dataset": { + "pool_name": "oxp_f6813b0c-ab5a-4fd0-8e24-95de9b65280d" + } + } + } + ] + } + } + } + }, + { + "id": "4e9fbfc2-eb1d-4d45-bdb9-64d965a490c5", + "errors": [ + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page Cmpa: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"f6c35180-8831-4718-995a-a08427e3f7d0\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:11 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"f6c35180-8831-4718-995a-a08427e3f7d0\" }", + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page CfpaActive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"2206bf05-edce-4002-bc9c-72f93cfa45b1\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:11 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"2206bf05-edce-4002-bc9c-72f93cfa45b1\" }", + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page CfpaInactive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"081feef1-085b-4f9a-9c0a-9c07a7a67ece\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:11 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"081feef1-085b-4f9a-9c0a-9c07a7a67ece\" }", + "MGS \"http://[fd00:1122:3344:101::2]:12225\": SP SpIdentifier { slot: 0, type_: Switch }: rot page CfpaScratch: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"e04172a0-55e3-475b-a070-a383d22f3bb0\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:11 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 0 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"e04172a0-55e3-475b-a070-a383d22f3bb0\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page Cmpa: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"c8feb285-1846-467e-acac-5fbfb5e401cf\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:12 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"c8feb285-1846-467e-acac-5fbfb5e401cf\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page CfpaActive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"74c364c5-4838-43e3-af27-16c1f88e771e\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:12 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"74c364c5-4838-43e3-af27-16c1f88e771e\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page CfpaInactive: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"e72fd27c-41fd-4597-a623-0b90dde3751f\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:12 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"e72fd27c-41fd-4597-a623-0b90dde3751f\" }", + "MGS \"http://[fd00:1122:3344:103::2]:12225\": SP SpIdentifier { slot: 1, type_: Switch }: rot page CfpaScratch: Error Response: status: 503 Service Unavailable; headers: {\"content-type\": \"application/json\", \"x-request-id\": \"09377390-54dc-4481-a94c-fdc884b3cf03\", \"content-length\": \"281\", \"date\": \"Fri, 01 Mar 2024 19:18:12 GMT\"}; value: Error { error_code: Some(\"SpCommunicationFailed\"), message: \"error communicating with SP SpIdentifier { typ: Switch, slot: 1 }: Error response from SP: sprot: failed to deserialize message: sprot: failed to deserialize message\", request_id: \"09377390-54dc-4481-a94c-fdc884b3cf03\" }" + ], + "time_started": "2024-03-01T19:18:07.756995Z", + "time_done": "2024-03-01T19:18:13.548706Z", + "collector": "7f83d784-9c5c-498f-b559-4176bbafbc4b", + "baseboards": [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + } + ], + "cabooses": [ + { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + }, + { + "board": "gimlet-c", + "git_commit": "620f18f924d180d13a0577bd51d7c48c1d670549", + "name": "gimlet-c", + "version": "1.0.5" + }, + { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + }, + { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + }, + { + "board": "oxide-rot-1", + "git_commit": "08f5a258dd48728cfaae68fcc7fbd56c7359329c", + "name": "oxide-rot-1", + "version": "1.0.4" + }, + { + "board": "oxide-rot-1", + "git_commit": "4b2a3469ee63f14e237ed26d741c49e8cc27261f", + "name": "oxide-rot-1", + "version": "0.0.0-git" + }, + { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + }, + { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1", + "version": "1.0.0" + }, + { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1-dev", + "version": "0.0.0-git" + }, + { + "board": "sidecar-b", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "sidecar-b", + "version": "1.0.6" + }, + { + "board": "sidecar-b", + "git_commit": "a096c4c17b17772c72dbfb03e0b9c96e59d9a0fe", + "name": "sidecar-b", + "version": "0.0.0-git" + }, + { + "board": "sidecar-b", + "git_commit": "f04c427514943573272da3524aa4d976d069ac95", + "name": "sidecar-b", + "version": "0.0.0-git" + } + ], + "rot_pages": [ + { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + }, + { + "data_base64": "AAAAABgAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAa0EBHkNRkpjbiUrOzKdmz2xKt2ubIbVZ4ALX8/sXkig=" + }, + { + "data_base64": "AAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6n5IZG1N0E70rdhMMrS3k04zLw+wi+p0BE00p4LJpio=" + }, + { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + }, + { + "data_base64": "AAAAADYAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKR50f2Jw5Dyd7j2FPBE4r77CMk4azEk2sBLHfrvOts=" + }, + { + "data_base64": "AAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAT9AkBD3h7E4HX3ag0CWYdTvYhAwH5B7l6KL6QI/wPEQ=" + }, + { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + ], + "sps": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:18:11.402668Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "switch", + "sp_slot": 0, + "baseboard_revision": 4, + "hubris_archive": "9184be93590e655e", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:18:12.176222Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "sp_type": "switch", + "sp_slot": 1, + "baseboard_revision": 4, + "hubris_archive": "9184be93590e655e", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:10.088383Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 17, + "baseboard_revision": 6, + "hubris_archive": "5c745f4b4fa66f47", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:10.815174Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 15, + "baseboard_revision": 6, + "hubris_archive": "5c745f4b4fa66f47", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:08.950823Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 14, + "baseboard_revision": 6, + "hubris_archive": "5c745f4b4fa66f47", + "power_state": "A0" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:07.775665Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "sp_type": "sled", + "sp_slot": 16, + "baseboard_revision": 6, + "hubris_archive": "3c670b44fdeef4af", + "power_state": "A0" + } + ] + ], + "rots": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:18:11.402668Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "b" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "4599e4d7bc9b2164e8c0f0f7ce4bcae2bd35d635c8450854ea9a94ce49a2ca0f", + "slot_b_sha3_256_digest": "1e42a0761e5bd1d9f9a5e2c44acc34e7b7e007792977cdb0523d271db2e80c8c" + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:18:12.176222Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "active_slot": { + "slot": "b" + }, + "persistent_boot_preference": { + "slot": "b" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "92e07077972c83af6872ec817fb88d74a36f6956276505e9771302c5bb7ec2a8", + "slot_b_sha3_256_digest": "fb25825eaf97f8f2307afe3caebf0b70865499f6d116db3aff382c505b4d8dd7" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:10.088383Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "a" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "96caca4fb6a789c228cdb69ca4afe300cecd46fb611e1fb12c384fe9597a0d1c", + "slot_b_sha3_256_digest": "29215490b1984f4cfcea7d95176b8338466b2fc60123cfc27e895209bef506bb" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:10.815174Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "a" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "96caca4fb6a789c228cdb69ca4afe300cecd46fb611e1fb12c384fe9597a0d1c", + "slot_b_sha3_256_digest": "29215490b1984f4cfcea7d95176b8338466b2fc60123cfc27e895209bef506bb" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:08.950823Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "b" + }, + "persistent_boot_preference": { + "slot": "b" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "7d081250f8eb830b7dc5067334316f497a184b01788b5614a7a7f88b93a87c20", + "slot_b_sha3_256_digest": "d416d5ba9d0d3bd495f0f5fcd2aecc28cd83b53d1a8f94a28241d85b30761451" + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:07.775665Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "active_slot": { + "slot": "a" + }, + "persistent_boot_preference": { + "slot": "a" + }, + "pending_persistent_boot_preference": null, + "transient_boot_preference": null, + "slot_a_sha3_256_digest": "7d081250f8eb830b7dc5067334316f497a184b01788b5614a7a7f88b93a87c20", + "slot_b_sha3_256_digest": "4ec328195a23e86a1545d5ee576e73834aae81fb79830a54d0503475875f1760" + } + ] + ], + "cabooses_found": { + "SpSlot0": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:18:11.404063Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "sidecar-b", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:18:12.391683Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "sidecar-b", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:10.181795Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:10.816554Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:09.149044Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "gimlet-c", + "version": "1.0.8" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:07.778136Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ] + ], + "SpSlot1": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:18:11.405544Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "f04c427514943573272da3524aa4d976d069ac95", + "name": "sidecar-b", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:18:12.425692Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "sidecar-b", + "git_commit": "a096c4c17b17772c72dbfb03e0b9c96e59d9a0fe", + "name": "sidecar-b", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:10.229994Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:10.817966Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:09.402701Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "gimlet-c", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:07.836136Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "gimlet-c", + "git_commit": "620f18f924d180d13a0577bd51d7c48c1d670549", + "name": "gimlet-c", + "version": "1.0.5" + } + } + ] + ], + "RotSlotA": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:18:11.612405Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1-dev", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:18:12.633926Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "4b2a3469ee63f14e237ed26d741c49e8cc27261f", + "name": "oxide-rot-1", + "version": "0.0.0-git" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:10.482137Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:11.072878Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:09.718444Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:08.338242Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ] + ], + "RotSlotB": [ + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220005" + }, + { + "time_collected": "2024-03-01T19:18:11.819343Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000006", + "serial_number": "BRM44220011" + }, + { + "time_collected": "2024-03-01T19:18:12.889742Z", + "source": "http://[fd00:1122:3344:103::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "ec0b7fe84e51e6d320556380a6f1dbb464ab9647", + "name": "oxide-rot-1", + "version": "1.0.0" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:10.734260Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:11.324822Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "02cc4345b5def59e96ef1eb576889922e9468113", + "name": "oxide-rot-1", + "version": "1.0.5" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:09.971380Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "b398d0778a806acad7b015a3e183ad56a28a620d", + "name": "oxide-rot-1", + "version": "1.0.6" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:08.798226Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "caboose": { + "board": "oxide-rot-1", + "git_commit": "08f5a258dd48728cfaae68fcc7fbd56c7359329c", + "name": "oxide-rot-1", + "version": "1.0.4" + } + } + ] + ] + }, + "rot_pages_found": { + "Cmpa": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:10.750544Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:11.341290Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:09.987916Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:08.829941Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "oAAAeAAAAAAAAAAAAAAAAP8CAP3/AgD9AAAAAAUAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARWUu1VIp1fpGOb+BW4q2eCEKXyVVUF6Al2HiOrPVdrwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + } + ] + ], + "CfpaActive": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:10.766709Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAT9AkBD3h7E4HX3ag0CWYdTvYhAwH5B7l6KL6QI/wPEQ=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:11.357423Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6n5IZG1N0E70rdhMMrS3k04zLw+wi+p0BE00p4LJpio=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:10.004467Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:08.861702Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + } + } + ] + ], + "CfpaInactive": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:10.782522Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADYAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKR50f2Jw5Dyd7j2FPBE4r77CMk4azEk2sBLHfrvOts=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:11.373385Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAABgAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAa0EBHkNRkpjbiUrOzKdmz2xKt2ubIbVZ4ALX8/sXkig=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:10.021096Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:08.893754Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + } + } + ] + ], + "CfpaScratch": [ + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + { + "time_collected": "2024-03-01T19:18:10.798536Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAT9AkBD3h7E4HX3ag0CWYdTvYhAwH5B7l6KL6QI/wPEQ=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + { + "time_collected": "2024-03-01T19:18:11.395564Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6n5IZG1N0E70rdhMMrS3k04zLw+wi+p0BE00p4LJpio=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + { + "time_collected": "2024-03-01T19:18:10.040934Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAADIAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUPHEZ9TS6v9VwoGSbVIfZcUfeve3umWF/LfWrI991Fg=" + } + } + ], + [ + { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + { + "time_collected": "2024-03-01T19:18:08.909759Z", + "source": "http://[fd00:1122:3344:101::2]:12225", + "page": { + "data_base64": "AAAAAAoAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAD/AgD9/wIA/QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9+zE5FW+go+MrbcURmve9xn7h79n9qXBEVZFgbbZLZE=" + } + } + ] + ] + }, + "sled_agents": { + "1762267a-a0cb-4c9a-88b4-8ce828cbc021": { + "time_collected": "2024-03-01T19:18:13.421323Z", + "source": "http://[fd00:1122:3344:101::1]:12345", + "sled_id": "1762267a-a0cb-4c9a-88b4-8ce828cbc021", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM44220001" + }, + "sled_agent_address": "[fd00:1122:3344:101::1]:12345", + "sled_role": "scrimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + }, + "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f": { + "time_collected": "2024-03-01T19:18:13.464287Z", + "source": "http://[fd00:1122:3344:121::1]:12345", + "sled_id": "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM42220004" + }, + "sled_agent_address": "[fd00:1122:3344:121::1]:12345", + "sled_role": "gimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + }, + "a6634f64-f6fb-426c-b00b-9b30aed9f53a": { + "time_collected": "2024-03-01T19:18:13.506888Z", + "source": "http://[fd00:1122:3344:102::1]:12345", + "sled_id": "a6634f64-f6fb-426c-b00b-9b30aed9f53a", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM42220046" + }, + "sled_agent_address": "[fd00:1122:3344:102::1]:12345", + "sled_role": "gimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + }, + "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89": { + "time_collected": "2024-03-01T19:18:13.546670Z", + "source": "http://[fd00:1122:3344:103::1]:12345", + "sled_id": "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89", + "baseboard_id": { + "part_number": "913-0000019", + "serial_number": "BRM42220081" + }, + "sled_agent_address": "[fd00:1122:3344:103::1]:12345", + "sled_role": "scrimlet", + "usable_hardware_threads": 128, + "usable_physical_ram": 1086608900096, + "reservoir_size": 869286281216 + } + }, + "omicron_zones": { + "1762267a-a0cb-4c9a-88b4-8ce828cbc021": { + "time_collected": "2024-03-01T19:18:13.422277Z", + "source": "http://[fd00:1122:3344:101::1]:12345", + "sled_id": "1762267a-a0cb-4c9a-88b4-8ce828cbc021", + "zones": { + "generation": 5, + "zones": [ + { + "id": "0706860a-3801-42a7-8000-fff741f7665b", + "underlay_address": "fd00:1122:3344:101::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::c]:32345", + "dataset": { + "pool_name": "oxp_1d864940-c723-4f58-82e8-b03772b42356" + } + } + }, + { + "id": "34a279a2-a025-41ff-a392-1addac400f38", + "underlay_address": "fd00:1122:3344:101::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:101::3]:32221", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + } + } + }, + { + "id": "3d344935-6f48-405f-8794-2c6e8fa4b1c2", + "underlay_address": "fd00:1122:3344:1::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + }, + "dns_address": "[fd00:1122:3344:1::1]:53", + "gz_address": "fd00:1122:3344:1::2", + "gz_address_index": 0, + "http_address": "[fd00:1122:3344:1::1]:5353" + } + }, + { + "id": "59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "underlay_address": "fd00:1122:3344:101::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.2", + "external_tls": true, + "internal_address": "[fd00:1122:3344:101::5]:12221", + "nic": { + "id": "81a0385e-86b9-4109-b63d-61c1407c50d1", + "ip": "172.30.2.5", + "kind": { + "type": "service", + "id": "59c3ed4f-3670-44b1-9ea0-9a6162a08629" + }, + "mac": "A8:40:25:FF:E1:17", + "name": "nexus-59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "67852e14-ce7f-4e50-a61d-4059d0c057b9", + "underlay_address": "fd00:1122:3344:101::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::10]:32345", + "dataset": { + "pool_name": "oxp_eae1515c-cd4a-4d7b-9a99-dbc24cf2e337" + } + } + }, + { + "id": "6ace1a69-d55a-45f0-a127-99c90f1b070c", + "underlay_address": "fd00:1122:3344:101::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::f]:32345", + "dataset": { + "pool_name": "oxp_466351db-2ced-45fa-9431-22de1c4bd480" + } + } + }, + { + "id": "84d6b7fc-b5c2-499f-8d17-0eb1ea5affbe", + "underlay_address": "fd00:1122:3344:101::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::9]:32345", + "dataset": { + "pool_name": "oxp_e8d6b9a2-bad4-43f0-b5c0-0618555a7f8c" + } + } + }, + { + "id": "854c1544-d0ea-4d65-9cf4-776e91a4fcb5", + "underlay_address": "fd00:1122:3344:101::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::8]:32345", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + } + } + }, + { + "id": "86ee7359-aa35-41d9-8a76-54961bc516a1", + "underlay_address": "fd00:1122:3344:101::4", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:101::4]:32221", + "dataset": { + "pool_name": "oxp_e8d6b9a2-bad4-43f0-b5c0-0618555a7f8c" + } + } + }, + { + "id": "925ea436-fad4-4fff-8ae0-24edd725a2b0", + "underlay_address": "fd00:1122:3344:101::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::d]:32345", + "dataset": { + "pool_name": "oxp_88ddd98f-3181-4797-864a-7e8d5b028657" + } + } + }, + { + "id": "be272d1e-4d34-4478-94b4-a76f5fcbef36", + "underlay_address": "fd00:1122:3344:101::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::a]:32345", + "dataset": { + "pool_name": "oxp_9041d780-b5ee-4159-a1db-8c4075e2176d" + } + } + }, + { + "id": "bebd754d-d06c-45f4-99bf-e1aab6253ab8", + "underlay_address": "fd00:1122:3344:101::6", + "zone_type": { + "type": "oximeter", + "address": "[fd00:1122:3344:101::6]:12223" + } + }, + { + "id": "d630cca7-e3f7-47a8-aee8-ad0790895abc", + "underlay_address": "fd00:1122:3344:101::11", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:101::11]:123", + "dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "nic": { + "id": "9741f03f-f150-4229-9f8d-e29a69c87883", + "ip": "172.30.3.5", + "kind": { + "type": "service", + "id": "d630cca7-e3f7-47a8-aee8-ad0790895abc" + }, + "mac": "A8:40:25:FF:E7:A8", + "name": "ntp-d630cca7-e3f7-47a8-aee8-ad0790895abc", + "primary": true, + "slot": 0, + "subnet": "172.30.3.0/24", + "vni": 100 + }, + "ntp_servers": [ + "ntp.eng.oxide.computer" + ], + "snat_cfg": { + "ip": "172.20.28.5", + "first_port": 0, + "last_port": 16383 + } + } + }, + { + "id": "d8c51c79-46ff-44bd-80be-08d6283f4e21", + "underlay_address": "fd00:1122:3344:101::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::e]:32345", + "dataset": { + "pool_name": "oxp_3f95c036-d5d8-4d08-8c78-c0635ca34698" + } + } + }, + { + "id": "dbbc0422-2b23-4d9e-b1db-8003cfc29ad0", + "underlay_address": "fd00:1122:3344:101::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::b]:32345", + "dataset": { + "pool_name": "oxp_1a4447be-5c84-4dad-b22c-d35e5e1911b0" + } + } + }, + { + "id": "f33a2375-212d-42e4-b539-6079aeb4e5b7", + "underlay_address": "fd00:1122:3344:101::7", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:101::7]:17000" + } + } + ] + } + }, + "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f": { + "time_collected": "2024-03-01T19:18:13.464998Z", + "source": "http://[fd00:1122:3344:121::1]:12345", + "sled_id": "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f", + "zones": { + "generation": 3, + "zones": [ + { + "id": "0eca39c6-62e2-4a1f-8ea2-e9332a774a26", + "underlay_address": "fd00:1122:3344:121::28", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::28]:32345", + "dataset": { + "pool_name": "oxp_cc36b0cc-818a-4c7f-b50d-057ff7e7e8e3" + } + } + }, + { + "id": "12431d3e-1516-4822-98b3-3711ff912e69", + "underlay_address": "fd00:1122:3344:121::22", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::22]:32345", + "dataset": { + "pool_name": "oxp_0622b2cf-297e-4356-b94c-d74309f443ba" + } + } + }, + { + "id": "2d5c29f7-4915-4876-82f8-e388b15c5067", + "underlay_address": "fd00:1122:3344:121::21", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:121::21]:123", + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "ntp_servers": [ + "d630cca7-e3f7-47a8-aee8-ad0790895abc.host.control-plane.oxide.internal", + "c74ba7d7-3d5f-4a29-aadc-effe33a90909.host.control-plane.oxide.internal" + ] + } + }, + { + "id": "56414d76-9b8c-4e15-acf1-cd269c2d8638", + "underlay_address": "fd00:1122:3344:121::25", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::25]:32345", + "dataset": { + "pool_name": "oxp_574728ca-1559-48f3-a46c-1e608966e6a0" + } + } + }, + { + "id": "891cb9da-301d-40cc-b93d-5fcb350fa968", + "underlay_address": "fd00:1122:3344:121::29", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::29]:32345", + "dataset": { + "pool_name": "oxp_e4a4a09a-43b8-460c-bc43-1f935600b681" + } + } + }, + { + "id": "d9014907-24c0-4c33-a475-db15942f4f8f", + "underlay_address": "fd00:1122:3344:121::23", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::23]:32345", + "dataset": { + "pool_name": "oxp_276822e2-d7ce-4eae-b5a4-38d2e8586a37" + } + } + }, + { + "id": "e950de25-7306-4cf7-a6f4-6db11adaa310", + "underlay_address": "fd00:1122:3344:121::27", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::27]:32345", + "dataset": { + "pool_name": "oxp_8c7a7d84-c269-4178-9dbb-37f363e7958c" + } + } + }, + { + "id": "fe3dc092-ea23-42d7-9f1d-36eed72b539c", + "underlay_address": "fd00:1122:3344:121::26", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::26]:32345", + "dataset": { + "pool_name": "oxp_6d2b29c5-aa58-40b7-b630-88301f08e945" + } + } + }, + { + "id": "ff8cce49-a721-4ad1-b09e-2bd6e4e42d00", + "underlay_address": "fd00:1122:3344:121::24", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::24]:32345", + "dataset": { + "pool_name": "oxp_2b0da7bc-2afc-4a16-95e1-5126bcb9b6b1" + } + } + } + ] + } + }, + "a6634f64-f6fb-426c-b00b-9b30aed9f53a": { + "time_collected": "2024-03-01T19:18:13.507881Z", + "source": "http://[fd00:1122:3344:102::1]:12345", + "sled_id": "a6634f64-f6fb-426c-b00b-9b30aed9f53a", + "zones": { + "generation": 5, + "zones": [ + { + "id": "0ebd2b5b-2dec-4d17-8e23-e468c04dacb6", + "underlay_address": "fd00:1122:3344:102::11", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::11]:32345", + "dataset": { + "pool_name": "oxp_0c07bcc1-7666-4754-881d-4135992ff561" + } + } + }, + { + "id": "18b99404-d8b2-4463-bc99-638d9b678ab3", + "underlay_address": "fd00:1122:3344:102::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::e]:32345", + "dataset": { + "pool_name": "oxp_f9a037da-7d15-40d9-957d-7e42aadb333c" + } + } + }, + { + "id": "479811e7-6409-45d4-b294-5333534e47ed", + "underlay_address": "fd00:1122:3344:102::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::9]:32345", + "dataset": { + "pool_name": "oxp_1fb80902-d979-461d-9c6b-a6541a5359c9" + } + } + }, + { + "id": "54d1b693-bc4d-4c47-9009-c6f624073801", + "underlay_address": "fd00:1122:3344:102::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::a]:32345", + "dataset": { + "pool_name": "oxp_02cc2369-9924-41c7-aa74-79d6d548f2dd" + } + } + }, + { + "id": "5b743072-9369-45d7-b7c9-dbdf736301f8", + "underlay_address": "fd00:1122:3344:2::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + }, + "dns_address": "[fd00:1122:3344:2::1]:53", + "gz_address": "fd00:1122:3344:2::2", + "gz_address_index": 1, + "http_address": "[fd00:1122:3344:2::1]:5353" + } + }, + { + "id": "5c4163fa-d6a6-4a0c-9476-3dda135bc9a4", + "underlay_address": "fd00:1122:3344:102::7", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:102::7]:17000" + } + }, + { + "id": "61a39188-dd82-43f6-a87c-ebb4c2d54ac4", + "underlay_address": "fd00:1122:3344:102::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::8]:32345", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "658076ed-fb43-49e7-a813-df3d23497848", + "underlay_address": "fd00:1122:3344:102::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::10]:32345", + "dataset": { + "pool_name": "oxp_7929b5fb-4ef2-4791-92e6-1a46c7f608b9" + } + } + }, + { + "id": "6edb836e-b100-4080-bb03-738d6441d571", + "underlay_address": "fd00:1122:3344:102::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::c]:32345", + "dataset": { + "pool_name": "oxp_fe74ae4d-d7bc-4293-9ec9-69132aca4aba" + } + } + }, + { + "id": "7f83d784-9c5c-498f-b559-4176bbafbc4b", + "underlay_address": "fd00:1122:3344:102::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.3", + "external_tls": true, + "internal_address": "[fd00:1122:3344:102::5]:12221", + "nic": { + "id": "3771a19c-cc40-4fd5-b99d-ed10071018a7", + "ip": "172.30.2.6", + "kind": { + "type": "service", + "id": "7f83d784-9c5c-498f-b559-4176bbafbc4b" + }, + "mac": "A8:40:25:FF:B2:AF", + "name": "nexus-7f83d784-9c5c-498f-b559-4176bbafbc4b", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "8052938f-73bc-4c54-8ed0-c62f6d77b9fd", + "underlay_address": "fd00:1122:3344:102::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::d]:32345", + "dataset": { + "pool_name": "oxp_93e4e338-0ad1-4ae1-a120-2cfb59b4df42" + } + } + }, + { + "id": "86bc0a2a-e24a-4bc1-8222-89207a0937f9", + "underlay_address": "fd00:1122:3344:102::4", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:102::4]:32221", + "dataset": { + "pool_name": "oxp_1fb80902-d979-461d-9c6b-a6541a5359c9" + } + } + }, + { + "id": "9241b9da-8188-4aae-a9dc-62acb453618f", + "underlay_address": "fd00:1122:3344:102::6", + "zone_type": { + "type": "clickhouse", + "address": "[fd00:1122:3344:102::6]:8123", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "a781a349-62b2-41f0-9dd7-c64d290aef82", + "underlay_address": "fd00:1122:3344:102::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::f]:32345", + "dataset": { + "pool_name": "oxp_4e92cbcf-f194-4ffc-97e3-fdf3df7cc9ae" + } + } + }, + { + "id": "c67814bf-9972-4bb0-a666-24778ca70a16", + "underlay_address": "fd00:1122:3344:102::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:102::3]:32221", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "underlay_address": "fd00:1122:3344:102::12", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:102::12]:123", + "dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "nic": { + "id": "9f03c82c-3192-498d-978d-a175049ac389", + "ip": "172.30.3.6", + "kind": { + "type": "service", + "id": "c74ba7d7-3d5f-4a29-aadc-effe33a90909" + }, + "mac": "A8:40:25:FF:E4:11", + "name": "ntp-c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "primary": true, + "slot": 0, + "subnet": "172.30.3.0/24", + "vni": 100 + }, + "ntp_servers": [ + "ntp.eng.oxide.computer" + ], + "snat_cfg": { + "ip": "172.20.28.6", + "first_port": 16384, + "last_port": 32767 + } + } + }, + { + "id": "dfc27183-ed85-44c3-ac3c-4427c4cb03e3", + "underlay_address": "fd00:1122:3344:102::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::b]:32345", + "dataset": { + "pool_name": "oxp_742699b5-4ded-406d-9a08-24648d3b2efb" + } + } + } + ] + } + }, + "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89": { + "time_collected": "2024-03-01T19:18:13.547769Z", + "source": "http://[fd00:1122:3344:103::1]:12345", + "sled_id": "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89", + "zones": { + "generation": 5, + "zones": [ + { + "id": "185e6b00-7e18-456c-9dfc-edd0194ac207", + "underlay_address": "fd00:1122:3344:103::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::a]:32345", + "dataset": { + "pool_name": "oxp_6aa59516-2018-4494-af6b-46704b20f6dc" + } + } + }, + { + "id": "1fc78bc7-feca-4a10-9beb-58b53fda5210", + "underlay_address": "fd00:1122:3344:103::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::10]:32345", + "dataset": { + "pool_name": "oxp_07d66b6b-8059-4d07-a0b3-8ce41e2a8c98" + } + } + }, + { + "id": "45dccfd6-4674-43c6-a0e7-4dde5f24d90d", + "underlay_address": "fd00:1122:3344:3::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + }, + "dns_address": "[fd00:1122:3344:3::1]:53", + "gz_address": "fd00:1122:3344:3::2", + "gz_address_index": 2, + "http_address": "[fd00:1122:3344:3::1]:5353" + } + }, + { + "id": "4b74c418-0a08-4489-b652-e97267c1c220", + "underlay_address": "fd00:1122:3344:103::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::7]:32345", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + } + } + }, + { + "id": "54c947d2-6355-453c-80fc-8f49cc2129ee", + "underlay_address": "fd00:1122:3344:103::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.4", + "external_tls": true, + "internal_address": "[fd00:1122:3344:103::5]:12221", + "nic": { + "id": "1f55dc2b-18ee-485c-8831-b603a8df78e6", + "ip": "172.30.2.7", + "kind": { + "type": "service", + "id": "54c947d2-6355-453c-80fc-8f49cc2129ee" + }, + "mac": "A8:40:25:FF:96:05", + "name": "nexus-54c947d2-6355-453c-80fc-8f49cc2129ee", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "60d64b70-89f5-4774-9e41-2c79c4173911", + "underlay_address": "fd00:1122:3344:103::6", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:103::6]:17000" + } + }, + { + "id": "921958c0-7761-4294-945d-2a078327bf2c", + "underlay_address": "fd00:1122:3344:103::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::8]:32345", + "dataset": { + "pool_name": "oxp_7eabbdcc-28a3-4af9-bc4a-8d9b745d4df3" + } + } + }, + { + "id": "9df22034-59f6-431f-82e7-258fe983879b", + "underlay_address": "fd00:1122:3344:103::4", + "zone_type": { + "type": "external_dns", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + }, + "dns_address": "172.20.28.1:53", + "http_address": "[fd00:1122:3344:103::4]:5353", + "nic": { + "id": "0376a963-e3c1-44bc-9cbf-9e8cca58410b", + "ip": "172.30.1.5", + "kind": { + "type": "service", + "id": "9df22034-59f6-431f-82e7-258fe983879b" + }, + "mac": "A8:40:25:FF:90:12", + "name": "external-dns-9df22034-59f6-431f-82e7-258fe983879b", + "primary": true, + "slot": 0, + "subnet": "172.30.1.0/24", + "vni": 100 + } + } + }, + { + "id": "bb21c0c4-c43f-4913-9bf9-707bd7f1769a", + "underlay_address": "fd00:1122:3344:103::11", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:103::11]:123", + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "ntp_servers": [ + "d630cca7-e3f7-47a8-aee8-ad0790895abc.host.control-plane.oxide.internal", + "c74ba7d7-3d5f-4a29-aadc-effe33a90909.host.control-plane.oxide.internal" + ] + } + }, + { + "id": "ccb0d612-9d7c-41ba-bd51-661c3c75ec91", + "underlay_address": "fd00:1122:3344:103::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::9]:32345", + "dataset": { + "pool_name": "oxp_c8feed09-6556-4409-af8e-563c97d556eb" + } + } + }, + { + "id": "ccef3e95-cae5-4072-9b7d-49e56fa9dff4", + "underlay_address": "fd00:1122:3344:103::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::d]:32345", + "dataset": { + "pool_name": "oxp_5b8b10a4-d527-4176-a049-1e9c2a10edea" + } + } + }, + { + "id": "d42fc874-8416-4212-b5a0-febba4c6b0ac", + "underlay_address": "fd00:1122:3344:103::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::b]:32345", + "dataset": { + "pool_name": "oxp_83a60822-1299-471c-bc79-3be0f87cf7c3" + } + } + }, + { + "id": "d5ba7d45-26d7-45ca-8c0a-a560f243b5fa", + "underlay_address": "fd00:1122:3344:103::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::e]:32345", + "dataset": { + "pool_name": "oxp_4ffc6336-04d1-4f21-9c27-a95aa640f960" + } + } + }, + { + "id": "da27ee6a-9841-46aa-acda-7cce3e04cb60", + "underlay_address": "fd00:1122:3344:103::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:103::3]:32221", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + } + } + }, + { + "id": "e09525cf-9c87-4d05-8860-1356dd0dee8a", + "underlay_address": "fd00:1122:3344:103::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::f]:32345", + "dataset": { + "pool_name": "oxp_e2244950-dda0-4dfa-8b63-5ffac16c5a62" + } + } + }, + { + "id": "f7264a3e-e8e7-4476-8df3-76c9d00a383b", + "underlay_address": "fd00:1122:3344:103::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::c]:32345", + "dataset": { + "pool_name": "oxp_f6813b0c-ab5a-4fd0-8e24-95de9b65280d" + } + } + } + ] + } + } + } + } + ], + "blueprints": [ + { + "id": "486de160-c8f3-4600-acca-b0c78e33aca4", + "omicron_zones": { + "1762267a-a0cb-4c9a-88b4-8ce828cbc021": { + "generation": 5, + "zones": [ + { + "id": "0706860a-3801-42a7-8000-fff741f7665b", + "underlay_address": "fd00:1122:3344:101::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::c]:32345", + "dataset": { + "pool_name": "oxp_1d864940-c723-4f58-82e8-b03772b42356" + } + } + }, + { + "id": "34a279a2-a025-41ff-a392-1addac400f38", + "underlay_address": "fd00:1122:3344:101::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:101::3]:32221", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + } + } + }, + { + "id": "3d344935-6f48-405f-8794-2c6e8fa4b1c2", + "underlay_address": "fd00:1122:3344:1::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + }, + "dns_address": "[fd00:1122:3344:1::1]:53", + "gz_address": "fd00:1122:3344:1::2", + "gz_address_index": 0, + "http_address": "[fd00:1122:3344:1::1]:5353" + } + }, + { + "id": "59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "underlay_address": "fd00:1122:3344:101::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.2", + "external_tls": true, + "internal_address": "[fd00:1122:3344:101::5]:12221", + "nic": { + "id": "81a0385e-86b9-4109-b63d-61c1407c50d1", + "ip": "172.30.2.5", + "kind": { + "type": "service", + "id": "59c3ed4f-3670-44b1-9ea0-9a6162a08629" + }, + "mac": "A8:40:25:FF:E1:17", + "name": "nexus-59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "67852e14-ce7f-4e50-a61d-4059d0c057b9", + "underlay_address": "fd00:1122:3344:101::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::10]:32345", + "dataset": { + "pool_name": "oxp_eae1515c-cd4a-4d7b-9a99-dbc24cf2e337" + } + } + }, + { + "id": "6ace1a69-d55a-45f0-a127-99c90f1b070c", + "underlay_address": "fd00:1122:3344:101::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::f]:32345", + "dataset": { + "pool_name": "oxp_466351db-2ced-45fa-9431-22de1c4bd480" + } + } + }, + { + "id": "84d6b7fc-b5c2-499f-8d17-0eb1ea5affbe", + "underlay_address": "fd00:1122:3344:101::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::9]:32345", + "dataset": { + "pool_name": "oxp_e8d6b9a2-bad4-43f0-b5c0-0618555a7f8c" + } + } + }, + { + "id": "854c1544-d0ea-4d65-9cf4-776e91a4fcb5", + "underlay_address": "fd00:1122:3344:101::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::8]:32345", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + } + } + }, + { + "id": "86ee7359-aa35-41d9-8a76-54961bc516a1", + "underlay_address": "fd00:1122:3344:101::4", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:101::4]:32221", + "dataset": { + "pool_name": "oxp_e8d6b9a2-bad4-43f0-b5c0-0618555a7f8c" + } + } + }, + { + "id": "925ea436-fad4-4fff-8ae0-24edd725a2b0", + "underlay_address": "fd00:1122:3344:101::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::d]:32345", + "dataset": { + "pool_name": "oxp_88ddd98f-3181-4797-864a-7e8d5b028657" + } + } + }, + { + "id": "be272d1e-4d34-4478-94b4-a76f5fcbef36", + "underlay_address": "fd00:1122:3344:101::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::a]:32345", + "dataset": { + "pool_name": "oxp_9041d780-b5ee-4159-a1db-8c4075e2176d" + } + } + }, + { + "id": "bebd754d-d06c-45f4-99bf-e1aab6253ab8", + "underlay_address": "fd00:1122:3344:101::6", + "zone_type": { + "type": "oximeter", + "address": "[fd00:1122:3344:101::6]:12223" + } + }, + { + "id": "d630cca7-e3f7-47a8-aee8-ad0790895abc", + "underlay_address": "fd00:1122:3344:101::11", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:101::11]:123", + "dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "nic": { + "id": "9741f03f-f150-4229-9f8d-e29a69c87883", + "ip": "172.30.3.5", + "kind": { + "type": "service", + "id": "d630cca7-e3f7-47a8-aee8-ad0790895abc" + }, + "mac": "A8:40:25:FF:E7:A8", + "name": "ntp-d630cca7-e3f7-47a8-aee8-ad0790895abc", + "primary": true, + "slot": 0, + "subnet": "172.30.3.0/24", + "vni": 100 + }, + "ntp_servers": [ + "ntp.eng.oxide.computer" + ], + "snat_cfg": { + "ip": "172.20.28.5", + "first_port": 0, + "last_port": 16383 + } + } + }, + { + "id": "d8c51c79-46ff-44bd-80be-08d6283f4e21", + "underlay_address": "fd00:1122:3344:101::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::e]:32345", + "dataset": { + "pool_name": "oxp_3f95c036-d5d8-4d08-8c78-c0635ca34698" + } + } + }, + { + "id": "dbbc0422-2b23-4d9e-b1db-8003cfc29ad0", + "underlay_address": "fd00:1122:3344:101::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::b]:32345", + "dataset": { + "pool_name": "oxp_1a4447be-5c84-4dad-b22c-d35e5e1911b0" + } + } + }, + { + "id": "f33a2375-212d-42e4-b539-6079aeb4e5b7", + "underlay_address": "fd00:1122:3344:101::7", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:101::7]:17000" + } + } + ] + }, + "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f": { + "generation": 1, + "zones": [] + }, + "a6634f64-f6fb-426c-b00b-9b30aed9f53a": { + "generation": 5, + "zones": [ + { + "id": "0ebd2b5b-2dec-4d17-8e23-e468c04dacb6", + "underlay_address": "fd00:1122:3344:102::11", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::11]:32345", + "dataset": { + "pool_name": "oxp_0c07bcc1-7666-4754-881d-4135992ff561" + } + } + }, + { + "id": "18b99404-d8b2-4463-bc99-638d9b678ab3", + "underlay_address": "fd00:1122:3344:102::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::e]:32345", + "dataset": { + "pool_name": "oxp_f9a037da-7d15-40d9-957d-7e42aadb333c" + } + } + }, + { + "id": "479811e7-6409-45d4-b294-5333534e47ed", + "underlay_address": "fd00:1122:3344:102::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::9]:32345", + "dataset": { + "pool_name": "oxp_1fb80902-d979-461d-9c6b-a6541a5359c9" + } + } + }, + { + "id": "54d1b693-bc4d-4c47-9009-c6f624073801", + "underlay_address": "fd00:1122:3344:102::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::a]:32345", + "dataset": { + "pool_name": "oxp_02cc2369-9924-41c7-aa74-79d6d548f2dd" + } + } + }, + { + "id": "5b743072-9369-45d7-b7c9-dbdf736301f8", + "underlay_address": "fd00:1122:3344:2::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + }, + "dns_address": "[fd00:1122:3344:2::1]:53", + "gz_address": "fd00:1122:3344:2::2", + "gz_address_index": 1, + "http_address": "[fd00:1122:3344:2::1]:5353" + } + }, + { + "id": "5c4163fa-d6a6-4a0c-9476-3dda135bc9a4", + "underlay_address": "fd00:1122:3344:102::7", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:102::7]:17000" + } + }, + { + "id": "61a39188-dd82-43f6-a87c-ebb4c2d54ac4", + "underlay_address": "fd00:1122:3344:102::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::8]:32345", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "658076ed-fb43-49e7-a813-df3d23497848", + "underlay_address": "fd00:1122:3344:102::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::10]:32345", + "dataset": { + "pool_name": "oxp_7929b5fb-4ef2-4791-92e6-1a46c7f608b9" + } + } + }, + { + "id": "6edb836e-b100-4080-bb03-738d6441d571", + "underlay_address": "fd00:1122:3344:102::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::c]:32345", + "dataset": { + "pool_name": "oxp_fe74ae4d-d7bc-4293-9ec9-69132aca4aba" + } + } + }, + { + "id": "7f83d784-9c5c-498f-b559-4176bbafbc4b", + "underlay_address": "fd00:1122:3344:102::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.3", + "external_tls": true, + "internal_address": "[fd00:1122:3344:102::5]:12221", + "nic": { + "id": "3771a19c-cc40-4fd5-b99d-ed10071018a7", + "ip": "172.30.2.6", + "kind": { + "type": "service", + "id": "7f83d784-9c5c-498f-b559-4176bbafbc4b" + }, + "mac": "A8:40:25:FF:B2:AF", + "name": "nexus-7f83d784-9c5c-498f-b559-4176bbafbc4b", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "8052938f-73bc-4c54-8ed0-c62f6d77b9fd", + "underlay_address": "fd00:1122:3344:102::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::d]:32345", + "dataset": { + "pool_name": "oxp_93e4e338-0ad1-4ae1-a120-2cfb59b4df42" + } + } + }, + { + "id": "86bc0a2a-e24a-4bc1-8222-89207a0937f9", + "underlay_address": "fd00:1122:3344:102::4", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:102::4]:32221", + "dataset": { + "pool_name": "oxp_1fb80902-d979-461d-9c6b-a6541a5359c9" + } + } + }, + { + "id": "9241b9da-8188-4aae-a9dc-62acb453618f", + "underlay_address": "fd00:1122:3344:102::6", + "zone_type": { + "type": "clickhouse", + "address": "[fd00:1122:3344:102::6]:8123", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "a781a349-62b2-41f0-9dd7-c64d290aef82", + "underlay_address": "fd00:1122:3344:102::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::f]:32345", + "dataset": { + "pool_name": "oxp_4e92cbcf-f194-4ffc-97e3-fdf3df7cc9ae" + } + } + }, + { + "id": "c67814bf-9972-4bb0-a666-24778ca70a16", + "underlay_address": "fd00:1122:3344:102::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:102::3]:32221", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "underlay_address": "fd00:1122:3344:102::12", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:102::12]:123", + "dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "nic": { + "id": "9f03c82c-3192-498d-978d-a175049ac389", + "ip": "172.30.3.6", + "kind": { + "type": "service", + "id": "c74ba7d7-3d5f-4a29-aadc-effe33a90909" + }, + "mac": "A8:40:25:FF:E4:11", + "name": "ntp-c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "primary": true, + "slot": 0, + "subnet": "172.30.3.0/24", + "vni": 100 + }, + "ntp_servers": [ + "ntp.eng.oxide.computer" + ], + "snat_cfg": { + "ip": "172.20.28.6", + "first_port": 16384, + "last_port": 32767 + } + } + }, + { + "id": "dfc27183-ed85-44c3-ac3c-4427c4cb03e3", + "underlay_address": "fd00:1122:3344:102::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::b]:32345", + "dataset": { + "pool_name": "oxp_742699b5-4ded-406d-9a08-24648d3b2efb" + } + } + } + ] + }, + "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89": { + "generation": 5, + "zones": [ + { + "id": "185e6b00-7e18-456c-9dfc-edd0194ac207", + "underlay_address": "fd00:1122:3344:103::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::a]:32345", + "dataset": { + "pool_name": "oxp_6aa59516-2018-4494-af6b-46704b20f6dc" + } + } + }, + { + "id": "1fc78bc7-feca-4a10-9beb-58b53fda5210", + "underlay_address": "fd00:1122:3344:103::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::10]:32345", + "dataset": { + "pool_name": "oxp_07d66b6b-8059-4d07-a0b3-8ce41e2a8c98" + } + } + }, + { + "id": "45dccfd6-4674-43c6-a0e7-4dde5f24d90d", + "underlay_address": "fd00:1122:3344:3::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + }, + "dns_address": "[fd00:1122:3344:3::1]:53", + "gz_address": "fd00:1122:3344:3::2", + "gz_address_index": 2, + "http_address": "[fd00:1122:3344:3::1]:5353" + } + }, + { + "id": "4b74c418-0a08-4489-b652-e97267c1c220", + "underlay_address": "fd00:1122:3344:103::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::7]:32345", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + } + } + }, + { + "id": "54c947d2-6355-453c-80fc-8f49cc2129ee", + "underlay_address": "fd00:1122:3344:103::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.4", + "external_tls": true, + "internal_address": "[fd00:1122:3344:103::5]:12221", + "nic": { + "id": "1f55dc2b-18ee-485c-8831-b603a8df78e6", + "ip": "172.30.2.7", + "kind": { + "type": "service", + "id": "54c947d2-6355-453c-80fc-8f49cc2129ee" + }, + "mac": "A8:40:25:FF:96:05", + "name": "nexus-54c947d2-6355-453c-80fc-8f49cc2129ee", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "60d64b70-89f5-4774-9e41-2c79c4173911", + "underlay_address": "fd00:1122:3344:103::6", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:103::6]:17000" + } + }, + { + "id": "921958c0-7761-4294-945d-2a078327bf2c", + "underlay_address": "fd00:1122:3344:103::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::8]:32345", + "dataset": { + "pool_name": "oxp_7eabbdcc-28a3-4af9-bc4a-8d9b745d4df3" + } + } + }, + { + "id": "9df22034-59f6-431f-82e7-258fe983879b", + "underlay_address": "fd00:1122:3344:103::4", + "zone_type": { + "type": "external_dns", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + }, + "dns_address": "172.20.28.1:53", + "http_address": "[fd00:1122:3344:103::4]:5353", + "nic": { + "id": "0376a963-e3c1-44bc-9cbf-9e8cca58410b", + "ip": "172.30.1.5", + "kind": { + "type": "service", + "id": "9df22034-59f6-431f-82e7-258fe983879b" + }, + "mac": "A8:40:25:FF:90:12", + "name": "external-dns-9df22034-59f6-431f-82e7-258fe983879b", + "primary": true, + "slot": 0, + "subnet": "172.30.1.0/24", + "vni": 100 + } + } + }, + { + "id": "bb21c0c4-c43f-4913-9bf9-707bd7f1769a", + "underlay_address": "fd00:1122:3344:103::11", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:103::11]:123", + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "ntp_servers": [ + "d630cca7-e3f7-47a8-aee8-ad0790895abc.host.control-plane.oxide.internal", + "c74ba7d7-3d5f-4a29-aadc-effe33a90909.host.control-plane.oxide.internal" + ] + } + }, + { + "id": "ccb0d612-9d7c-41ba-bd51-661c3c75ec91", + "underlay_address": "fd00:1122:3344:103::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::9]:32345", + "dataset": { + "pool_name": "oxp_c8feed09-6556-4409-af8e-563c97d556eb" + } + } + }, + { + "id": "ccef3e95-cae5-4072-9b7d-49e56fa9dff4", + "underlay_address": "fd00:1122:3344:103::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::d]:32345", + "dataset": { + "pool_name": "oxp_5b8b10a4-d527-4176-a049-1e9c2a10edea" + } + } + }, + { + "id": "d42fc874-8416-4212-b5a0-febba4c6b0ac", + "underlay_address": "fd00:1122:3344:103::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::b]:32345", + "dataset": { + "pool_name": "oxp_83a60822-1299-471c-bc79-3be0f87cf7c3" + } + } + }, + { + "id": "d5ba7d45-26d7-45ca-8c0a-a560f243b5fa", + "underlay_address": "fd00:1122:3344:103::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::e]:32345", + "dataset": { + "pool_name": "oxp_4ffc6336-04d1-4f21-9c27-a95aa640f960" + } + } + }, + { + "id": "da27ee6a-9841-46aa-acda-7cce3e04cb60", + "underlay_address": "fd00:1122:3344:103::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:103::3]:32221", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + } + } + }, + { + "id": "e09525cf-9c87-4d05-8860-1356dd0dee8a", + "underlay_address": "fd00:1122:3344:103::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::f]:32345", + "dataset": { + "pool_name": "oxp_e2244950-dda0-4dfa-8b63-5ffac16c5a62" + } + } + }, + { + "id": "f7264a3e-e8e7-4476-8df3-76c9d00a383b", + "underlay_address": "fd00:1122:3344:103::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::c]:32345", + "dataset": { + "pool_name": "oxp_f6813b0c-ab5a-4fd0-8e24-95de9b65280d" + } + } + } + ] + } + }, + "zones_in_service": [ + "0706860a-3801-42a7-8000-fff741f7665b", + "0ebd2b5b-2dec-4d17-8e23-e468c04dacb6", + "185e6b00-7e18-456c-9dfc-edd0194ac207", + "18b99404-d8b2-4463-bc99-638d9b678ab3", + "1fc78bc7-feca-4a10-9beb-58b53fda5210", + "34a279a2-a025-41ff-a392-1addac400f38", + "3d344935-6f48-405f-8794-2c6e8fa4b1c2", + "45dccfd6-4674-43c6-a0e7-4dde5f24d90d", + "479811e7-6409-45d4-b294-5333534e47ed", + "4b74c418-0a08-4489-b652-e97267c1c220", + "54c947d2-6355-453c-80fc-8f49cc2129ee", + "54d1b693-bc4d-4c47-9009-c6f624073801", + "59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "5b743072-9369-45d7-b7c9-dbdf736301f8", + "5c4163fa-d6a6-4a0c-9476-3dda135bc9a4", + "60d64b70-89f5-4774-9e41-2c79c4173911", + "61a39188-dd82-43f6-a87c-ebb4c2d54ac4", + "658076ed-fb43-49e7-a813-df3d23497848", + "67852e14-ce7f-4e50-a61d-4059d0c057b9", + "6ace1a69-d55a-45f0-a127-99c90f1b070c", + "6edb836e-b100-4080-bb03-738d6441d571", + "7f83d784-9c5c-498f-b559-4176bbafbc4b", + "8052938f-73bc-4c54-8ed0-c62f6d77b9fd", + "84d6b7fc-b5c2-499f-8d17-0eb1ea5affbe", + "854c1544-d0ea-4d65-9cf4-776e91a4fcb5", + "86bc0a2a-e24a-4bc1-8222-89207a0937f9", + "86ee7359-aa35-41d9-8a76-54961bc516a1", + "921958c0-7761-4294-945d-2a078327bf2c", + "9241b9da-8188-4aae-a9dc-62acb453618f", + "925ea436-fad4-4fff-8ae0-24edd725a2b0", + "9df22034-59f6-431f-82e7-258fe983879b", + "a781a349-62b2-41f0-9dd7-c64d290aef82", + "bb21c0c4-c43f-4913-9bf9-707bd7f1769a", + "be272d1e-4d34-4478-94b4-a76f5fcbef36", + "bebd754d-d06c-45f4-99bf-e1aab6253ab8", + "c67814bf-9972-4bb0-a666-24778ca70a16", + "c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "ccb0d612-9d7c-41ba-bd51-661c3c75ec91", + "ccef3e95-cae5-4072-9b7d-49e56fa9dff4", + "d42fc874-8416-4212-b5a0-febba4c6b0ac", + "d5ba7d45-26d7-45ca-8c0a-a560f243b5fa", + "d630cca7-e3f7-47a8-aee8-ad0790895abc", + "d8c51c79-46ff-44bd-80be-08d6283f4e21", + "da27ee6a-9841-46aa-acda-7cce3e04cb60", + "dbbc0422-2b23-4d9e-b1db-8003cfc29ad0", + "dfc27183-ed85-44c3-ac3c-4427c4cb03e3", + "e09525cf-9c87-4d05-8860-1356dd0dee8a", + "f33a2375-212d-42e4-b539-6079aeb4e5b7", + "f7264a3e-e8e7-4476-8df3-76c9d00a383b" + ], + "parent_blueprint_id": null, + "internal_dns_version": 1, + "time_created": "2024-03-01T19:06:56.467313Z", + "creator": "54c947d2-6355-453c-80fc-8f49cc2129ee", + "comment": "from collection df8caafd-c444-4f65-a304-b9ceb62a96c2" + }, + { + "id": "6c127695-ba15-408d-a992-325a1a888380", + "omicron_zones": { + "1762267a-a0cb-4c9a-88b4-8ce828cbc021": { + "generation": 5, + "zones": [ + { + "id": "0706860a-3801-42a7-8000-fff741f7665b", + "underlay_address": "fd00:1122:3344:101::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::c]:32345", + "dataset": { + "pool_name": "oxp_1d864940-c723-4f58-82e8-b03772b42356" + } + } + }, + { + "id": "34a279a2-a025-41ff-a392-1addac400f38", + "underlay_address": "fd00:1122:3344:101::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:101::3]:32221", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + } + } + }, + { + "id": "3d344935-6f48-405f-8794-2c6e8fa4b1c2", + "underlay_address": "fd00:1122:3344:1::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + }, + "dns_address": "[fd00:1122:3344:1::1]:53", + "gz_address": "fd00:1122:3344:1::2", + "gz_address_index": 0, + "http_address": "[fd00:1122:3344:1::1]:5353" + } + }, + { + "id": "59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "underlay_address": "fd00:1122:3344:101::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.2", + "external_tls": true, + "internal_address": "[fd00:1122:3344:101::5]:12221", + "nic": { + "id": "81a0385e-86b9-4109-b63d-61c1407c50d1", + "ip": "172.30.2.5", + "kind": { + "type": "service", + "id": "59c3ed4f-3670-44b1-9ea0-9a6162a08629" + }, + "mac": "A8:40:25:FF:E1:17", + "name": "nexus-59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "67852e14-ce7f-4e50-a61d-4059d0c057b9", + "underlay_address": "fd00:1122:3344:101::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::10]:32345", + "dataset": { + "pool_name": "oxp_eae1515c-cd4a-4d7b-9a99-dbc24cf2e337" + } + } + }, + { + "id": "6ace1a69-d55a-45f0-a127-99c90f1b070c", + "underlay_address": "fd00:1122:3344:101::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::f]:32345", + "dataset": { + "pool_name": "oxp_466351db-2ced-45fa-9431-22de1c4bd480" + } + } + }, + { + "id": "84d6b7fc-b5c2-499f-8d17-0eb1ea5affbe", + "underlay_address": "fd00:1122:3344:101::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::9]:32345", + "dataset": { + "pool_name": "oxp_e8d6b9a2-bad4-43f0-b5c0-0618555a7f8c" + } + } + }, + { + "id": "854c1544-d0ea-4d65-9cf4-776e91a4fcb5", + "underlay_address": "fd00:1122:3344:101::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::8]:32345", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + } + } + }, + { + "id": "86ee7359-aa35-41d9-8a76-54961bc516a1", + "underlay_address": "fd00:1122:3344:101::4", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:101::4]:32221", + "dataset": { + "pool_name": "oxp_e8d6b9a2-bad4-43f0-b5c0-0618555a7f8c" + } + } + }, + { + "id": "925ea436-fad4-4fff-8ae0-24edd725a2b0", + "underlay_address": "fd00:1122:3344:101::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::d]:32345", + "dataset": { + "pool_name": "oxp_88ddd98f-3181-4797-864a-7e8d5b028657" + } + } + }, + { + "id": "be272d1e-4d34-4478-94b4-a76f5fcbef36", + "underlay_address": "fd00:1122:3344:101::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::a]:32345", + "dataset": { + "pool_name": "oxp_9041d780-b5ee-4159-a1db-8c4075e2176d" + } + } + }, + { + "id": "bebd754d-d06c-45f4-99bf-e1aab6253ab8", + "underlay_address": "fd00:1122:3344:101::6", + "zone_type": { + "type": "oximeter", + "address": "[fd00:1122:3344:101::6]:12223" + } + }, + { + "id": "d630cca7-e3f7-47a8-aee8-ad0790895abc", + "underlay_address": "fd00:1122:3344:101::11", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:101::11]:123", + "dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "nic": { + "id": "9741f03f-f150-4229-9f8d-e29a69c87883", + "ip": "172.30.3.5", + "kind": { + "type": "service", + "id": "d630cca7-e3f7-47a8-aee8-ad0790895abc" + }, + "mac": "A8:40:25:FF:E7:A8", + "name": "ntp-d630cca7-e3f7-47a8-aee8-ad0790895abc", + "primary": true, + "slot": 0, + "subnet": "172.30.3.0/24", + "vni": 100 + }, + "ntp_servers": [ + "ntp.eng.oxide.computer" + ], + "snat_cfg": { + "ip": "172.20.28.5", + "first_port": 0, + "last_port": 16383 + } + } + }, + { + "id": "d8c51c79-46ff-44bd-80be-08d6283f4e21", + "underlay_address": "fd00:1122:3344:101::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::e]:32345", + "dataset": { + "pool_name": "oxp_3f95c036-d5d8-4d08-8c78-c0635ca34698" + } + } + }, + { + "id": "dbbc0422-2b23-4d9e-b1db-8003cfc29ad0", + "underlay_address": "fd00:1122:3344:101::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::b]:32345", + "dataset": { + "pool_name": "oxp_1a4447be-5c84-4dad-b22c-d35e5e1911b0" + } + } + }, + { + "id": "f33a2375-212d-42e4-b539-6079aeb4e5b7", + "underlay_address": "fd00:1122:3344:101::7", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:101::7]:17000" + } + } + ] + }, + "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f": { + "generation": 2, + "zones": [ + { + "id": "2d5c29f7-4915-4876-82f8-e388b15c5067", + "underlay_address": "fd00:1122:3344:121::21", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:121::21]:123", + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "ntp_servers": [ + "d630cca7-e3f7-47a8-aee8-ad0790895abc.host.control-plane.oxide.internal", + "c74ba7d7-3d5f-4a29-aadc-effe33a90909.host.control-plane.oxide.internal" + ] + } + } + ] + }, + "a6634f64-f6fb-426c-b00b-9b30aed9f53a": { + "generation": 5, + "zones": [ + { + "id": "0ebd2b5b-2dec-4d17-8e23-e468c04dacb6", + "underlay_address": "fd00:1122:3344:102::11", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::11]:32345", + "dataset": { + "pool_name": "oxp_0c07bcc1-7666-4754-881d-4135992ff561" + } + } + }, + { + "id": "18b99404-d8b2-4463-bc99-638d9b678ab3", + "underlay_address": "fd00:1122:3344:102::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::e]:32345", + "dataset": { + "pool_name": "oxp_f9a037da-7d15-40d9-957d-7e42aadb333c" + } + } + }, + { + "id": "479811e7-6409-45d4-b294-5333534e47ed", + "underlay_address": "fd00:1122:3344:102::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::9]:32345", + "dataset": { + "pool_name": "oxp_1fb80902-d979-461d-9c6b-a6541a5359c9" + } + } + }, + { + "id": "54d1b693-bc4d-4c47-9009-c6f624073801", + "underlay_address": "fd00:1122:3344:102::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::a]:32345", + "dataset": { + "pool_name": "oxp_02cc2369-9924-41c7-aa74-79d6d548f2dd" + } + } + }, + { + "id": "5b743072-9369-45d7-b7c9-dbdf736301f8", + "underlay_address": "fd00:1122:3344:2::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + }, + "dns_address": "[fd00:1122:3344:2::1]:53", + "gz_address": "fd00:1122:3344:2::2", + "gz_address_index": 1, + "http_address": "[fd00:1122:3344:2::1]:5353" + } + }, + { + "id": "5c4163fa-d6a6-4a0c-9476-3dda135bc9a4", + "underlay_address": "fd00:1122:3344:102::7", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:102::7]:17000" + } + }, + { + "id": "61a39188-dd82-43f6-a87c-ebb4c2d54ac4", + "underlay_address": "fd00:1122:3344:102::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::8]:32345", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "658076ed-fb43-49e7-a813-df3d23497848", + "underlay_address": "fd00:1122:3344:102::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::10]:32345", + "dataset": { + "pool_name": "oxp_7929b5fb-4ef2-4791-92e6-1a46c7f608b9" + } + } + }, + { + "id": "6edb836e-b100-4080-bb03-738d6441d571", + "underlay_address": "fd00:1122:3344:102::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::c]:32345", + "dataset": { + "pool_name": "oxp_fe74ae4d-d7bc-4293-9ec9-69132aca4aba" + } + } + }, + { + "id": "7f83d784-9c5c-498f-b559-4176bbafbc4b", + "underlay_address": "fd00:1122:3344:102::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.3", + "external_tls": true, + "internal_address": "[fd00:1122:3344:102::5]:12221", + "nic": { + "id": "3771a19c-cc40-4fd5-b99d-ed10071018a7", + "ip": "172.30.2.6", + "kind": { + "type": "service", + "id": "7f83d784-9c5c-498f-b559-4176bbafbc4b" + }, + "mac": "A8:40:25:FF:B2:AF", + "name": "nexus-7f83d784-9c5c-498f-b559-4176bbafbc4b", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "8052938f-73bc-4c54-8ed0-c62f6d77b9fd", + "underlay_address": "fd00:1122:3344:102::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::d]:32345", + "dataset": { + "pool_name": "oxp_93e4e338-0ad1-4ae1-a120-2cfb59b4df42" + } + } + }, + { + "id": "86bc0a2a-e24a-4bc1-8222-89207a0937f9", + "underlay_address": "fd00:1122:3344:102::4", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:102::4]:32221", + "dataset": { + "pool_name": "oxp_1fb80902-d979-461d-9c6b-a6541a5359c9" + } + } + }, + { + "id": "9241b9da-8188-4aae-a9dc-62acb453618f", + "underlay_address": "fd00:1122:3344:102::6", + "zone_type": { + "type": "clickhouse", + "address": "[fd00:1122:3344:102::6]:8123", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "a781a349-62b2-41f0-9dd7-c64d290aef82", + "underlay_address": "fd00:1122:3344:102::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::f]:32345", + "dataset": { + "pool_name": "oxp_4e92cbcf-f194-4ffc-97e3-fdf3df7cc9ae" + } + } + }, + { + "id": "c67814bf-9972-4bb0-a666-24778ca70a16", + "underlay_address": "fd00:1122:3344:102::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:102::3]:32221", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "underlay_address": "fd00:1122:3344:102::12", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:102::12]:123", + "dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "nic": { + "id": "9f03c82c-3192-498d-978d-a175049ac389", + "ip": "172.30.3.6", + "kind": { + "type": "service", + "id": "c74ba7d7-3d5f-4a29-aadc-effe33a90909" + }, + "mac": "A8:40:25:FF:E4:11", + "name": "ntp-c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "primary": true, + "slot": 0, + "subnet": "172.30.3.0/24", + "vni": 100 + }, + "ntp_servers": [ + "ntp.eng.oxide.computer" + ], + "snat_cfg": { + "ip": "172.20.28.6", + "first_port": 16384, + "last_port": 32767 + } + } + }, + { + "id": "dfc27183-ed85-44c3-ac3c-4427c4cb03e3", + "underlay_address": "fd00:1122:3344:102::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::b]:32345", + "dataset": { + "pool_name": "oxp_742699b5-4ded-406d-9a08-24648d3b2efb" + } + } + } + ] + }, + "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89": { + "generation": 5, + "zones": [ + { + "id": "185e6b00-7e18-456c-9dfc-edd0194ac207", + "underlay_address": "fd00:1122:3344:103::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::a]:32345", + "dataset": { + "pool_name": "oxp_6aa59516-2018-4494-af6b-46704b20f6dc" + } + } + }, + { + "id": "1fc78bc7-feca-4a10-9beb-58b53fda5210", + "underlay_address": "fd00:1122:3344:103::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::10]:32345", + "dataset": { + "pool_name": "oxp_07d66b6b-8059-4d07-a0b3-8ce41e2a8c98" + } + } + }, + { + "id": "45dccfd6-4674-43c6-a0e7-4dde5f24d90d", + "underlay_address": "fd00:1122:3344:3::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + }, + "dns_address": "[fd00:1122:3344:3::1]:53", + "gz_address": "fd00:1122:3344:3::2", + "gz_address_index": 2, + "http_address": "[fd00:1122:3344:3::1]:5353" + } + }, + { + "id": "4b74c418-0a08-4489-b652-e97267c1c220", + "underlay_address": "fd00:1122:3344:103::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::7]:32345", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + } + } + }, + { + "id": "54c947d2-6355-453c-80fc-8f49cc2129ee", + "underlay_address": "fd00:1122:3344:103::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.4", + "external_tls": true, + "internal_address": "[fd00:1122:3344:103::5]:12221", + "nic": { + "id": "1f55dc2b-18ee-485c-8831-b603a8df78e6", + "ip": "172.30.2.7", + "kind": { + "type": "service", + "id": "54c947d2-6355-453c-80fc-8f49cc2129ee" + }, + "mac": "A8:40:25:FF:96:05", + "name": "nexus-54c947d2-6355-453c-80fc-8f49cc2129ee", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "60d64b70-89f5-4774-9e41-2c79c4173911", + "underlay_address": "fd00:1122:3344:103::6", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:103::6]:17000" + } + }, + { + "id": "921958c0-7761-4294-945d-2a078327bf2c", + "underlay_address": "fd00:1122:3344:103::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::8]:32345", + "dataset": { + "pool_name": "oxp_7eabbdcc-28a3-4af9-bc4a-8d9b745d4df3" + } + } + }, + { + "id": "9df22034-59f6-431f-82e7-258fe983879b", + "underlay_address": "fd00:1122:3344:103::4", + "zone_type": { + "type": "external_dns", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + }, + "dns_address": "172.20.28.1:53", + "http_address": "[fd00:1122:3344:103::4]:5353", + "nic": { + "id": "0376a963-e3c1-44bc-9cbf-9e8cca58410b", + "ip": "172.30.1.5", + "kind": { + "type": "service", + "id": "9df22034-59f6-431f-82e7-258fe983879b" + }, + "mac": "A8:40:25:FF:90:12", + "name": "external-dns-9df22034-59f6-431f-82e7-258fe983879b", + "primary": true, + "slot": 0, + "subnet": "172.30.1.0/24", + "vni": 100 + } + } + }, + { + "id": "bb21c0c4-c43f-4913-9bf9-707bd7f1769a", + "underlay_address": "fd00:1122:3344:103::11", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:103::11]:123", + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "ntp_servers": [ + "d630cca7-e3f7-47a8-aee8-ad0790895abc.host.control-plane.oxide.internal", + "c74ba7d7-3d5f-4a29-aadc-effe33a90909.host.control-plane.oxide.internal" + ] + } + }, + { + "id": "ccb0d612-9d7c-41ba-bd51-661c3c75ec91", + "underlay_address": "fd00:1122:3344:103::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::9]:32345", + "dataset": { + "pool_name": "oxp_c8feed09-6556-4409-af8e-563c97d556eb" + } + } + }, + { + "id": "ccef3e95-cae5-4072-9b7d-49e56fa9dff4", + "underlay_address": "fd00:1122:3344:103::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::d]:32345", + "dataset": { + "pool_name": "oxp_5b8b10a4-d527-4176-a049-1e9c2a10edea" + } + } + }, + { + "id": "d42fc874-8416-4212-b5a0-febba4c6b0ac", + "underlay_address": "fd00:1122:3344:103::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::b]:32345", + "dataset": { + "pool_name": "oxp_83a60822-1299-471c-bc79-3be0f87cf7c3" + } + } + }, + { + "id": "d5ba7d45-26d7-45ca-8c0a-a560f243b5fa", + "underlay_address": "fd00:1122:3344:103::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::e]:32345", + "dataset": { + "pool_name": "oxp_4ffc6336-04d1-4f21-9c27-a95aa640f960" + } + } + }, + { + "id": "da27ee6a-9841-46aa-acda-7cce3e04cb60", + "underlay_address": "fd00:1122:3344:103::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:103::3]:32221", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + } + } + }, + { + "id": "e09525cf-9c87-4d05-8860-1356dd0dee8a", + "underlay_address": "fd00:1122:3344:103::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::f]:32345", + "dataset": { + "pool_name": "oxp_e2244950-dda0-4dfa-8b63-5ffac16c5a62" + } + } + }, + { + "id": "f7264a3e-e8e7-4476-8df3-76c9d00a383b", + "underlay_address": "fd00:1122:3344:103::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::c]:32345", + "dataset": { + "pool_name": "oxp_f6813b0c-ab5a-4fd0-8e24-95de9b65280d" + } + } + } + ] + } + }, + "zones_in_service": [ + "0706860a-3801-42a7-8000-fff741f7665b", + "0ebd2b5b-2dec-4d17-8e23-e468c04dacb6", + "185e6b00-7e18-456c-9dfc-edd0194ac207", + "18b99404-d8b2-4463-bc99-638d9b678ab3", + "1fc78bc7-feca-4a10-9beb-58b53fda5210", + "2d5c29f7-4915-4876-82f8-e388b15c5067", + "34a279a2-a025-41ff-a392-1addac400f38", + "3d344935-6f48-405f-8794-2c6e8fa4b1c2", + "45dccfd6-4674-43c6-a0e7-4dde5f24d90d", + "479811e7-6409-45d4-b294-5333534e47ed", + "4b74c418-0a08-4489-b652-e97267c1c220", + "54c947d2-6355-453c-80fc-8f49cc2129ee", + "54d1b693-bc4d-4c47-9009-c6f624073801", + "59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "5b743072-9369-45d7-b7c9-dbdf736301f8", + "5c4163fa-d6a6-4a0c-9476-3dda135bc9a4", + "60d64b70-89f5-4774-9e41-2c79c4173911", + "61a39188-dd82-43f6-a87c-ebb4c2d54ac4", + "658076ed-fb43-49e7-a813-df3d23497848", + "67852e14-ce7f-4e50-a61d-4059d0c057b9", + "6ace1a69-d55a-45f0-a127-99c90f1b070c", + "6edb836e-b100-4080-bb03-738d6441d571", + "7f83d784-9c5c-498f-b559-4176bbafbc4b", + "8052938f-73bc-4c54-8ed0-c62f6d77b9fd", + "84d6b7fc-b5c2-499f-8d17-0eb1ea5affbe", + "854c1544-d0ea-4d65-9cf4-776e91a4fcb5", + "86bc0a2a-e24a-4bc1-8222-89207a0937f9", + "86ee7359-aa35-41d9-8a76-54961bc516a1", + "921958c0-7761-4294-945d-2a078327bf2c", + "9241b9da-8188-4aae-a9dc-62acb453618f", + "925ea436-fad4-4fff-8ae0-24edd725a2b0", + "9df22034-59f6-431f-82e7-258fe983879b", + "a781a349-62b2-41f0-9dd7-c64d290aef82", + "bb21c0c4-c43f-4913-9bf9-707bd7f1769a", + "be272d1e-4d34-4478-94b4-a76f5fcbef36", + "bebd754d-d06c-45f4-99bf-e1aab6253ab8", + "c67814bf-9972-4bb0-a666-24778ca70a16", + "c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "ccb0d612-9d7c-41ba-bd51-661c3c75ec91", + "ccef3e95-cae5-4072-9b7d-49e56fa9dff4", + "d42fc874-8416-4212-b5a0-febba4c6b0ac", + "d5ba7d45-26d7-45ca-8c0a-a560f243b5fa", + "d630cca7-e3f7-47a8-aee8-ad0790895abc", + "d8c51c79-46ff-44bd-80be-08d6283f4e21", + "da27ee6a-9841-46aa-acda-7cce3e04cb60", + "dbbc0422-2b23-4d9e-b1db-8003cfc29ad0", + "dfc27183-ed85-44c3-ac3c-4427c4cb03e3", + "e09525cf-9c87-4d05-8860-1356dd0dee8a", + "f33a2375-212d-42e4-b539-6079aeb4e5b7", + "f7264a3e-e8e7-4476-8df3-76c9d00a383b" + ], + "parent_blueprint_id": "486de160-c8f3-4600-acca-b0c78e33aca4", + "internal_dns_version": 1, + "time_created": "2024-03-01T19:07:58.105708Z", + "creator": "54c947d2-6355-453c-80fc-8f49cc2129ee", + "comment": "sled a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f: add NTP zone" + }, + { + "id": "cfa03594-f438-4736-a416-6307f7bf8f2e", + "omicron_zones": { + "1762267a-a0cb-4c9a-88b4-8ce828cbc021": { + "generation": 5, + "zones": [ + { + "id": "0706860a-3801-42a7-8000-fff741f7665b", + "underlay_address": "fd00:1122:3344:101::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::c]:32345", + "dataset": { + "pool_name": "oxp_1d864940-c723-4f58-82e8-b03772b42356" + } + } + }, + { + "id": "34a279a2-a025-41ff-a392-1addac400f38", + "underlay_address": "fd00:1122:3344:101::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:101::3]:32221", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + } + } + }, + { + "id": "3d344935-6f48-405f-8794-2c6e8fa4b1c2", + "underlay_address": "fd00:1122:3344:1::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + }, + "dns_address": "[fd00:1122:3344:1::1]:53", + "gz_address": "fd00:1122:3344:1::2", + "gz_address_index": 0, + "http_address": "[fd00:1122:3344:1::1]:5353" + } + }, + { + "id": "59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "underlay_address": "fd00:1122:3344:101::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.2", + "external_tls": true, + "internal_address": "[fd00:1122:3344:101::5]:12221", + "nic": { + "id": "81a0385e-86b9-4109-b63d-61c1407c50d1", + "ip": "172.30.2.5", + "kind": { + "type": "service", + "id": "59c3ed4f-3670-44b1-9ea0-9a6162a08629" + }, + "mac": "A8:40:25:FF:E1:17", + "name": "nexus-59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "67852e14-ce7f-4e50-a61d-4059d0c057b9", + "underlay_address": "fd00:1122:3344:101::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::10]:32345", + "dataset": { + "pool_name": "oxp_eae1515c-cd4a-4d7b-9a99-dbc24cf2e337" + } + } + }, + { + "id": "6ace1a69-d55a-45f0-a127-99c90f1b070c", + "underlay_address": "fd00:1122:3344:101::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::f]:32345", + "dataset": { + "pool_name": "oxp_466351db-2ced-45fa-9431-22de1c4bd480" + } + } + }, + { + "id": "84d6b7fc-b5c2-499f-8d17-0eb1ea5affbe", + "underlay_address": "fd00:1122:3344:101::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::9]:32345", + "dataset": { + "pool_name": "oxp_e8d6b9a2-bad4-43f0-b5c0-0618555a7f8c" + } + } + }, + { + "id": "854c1544-d0ea-4d65-9cf4-776e91a4fcb5", + "underlay_address": "fd00:1122:3344:101::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::8]:32345", + "dataset": { + "pool_name": "oxp_88d39560-b091-4fad-bad0-b0b1c514033b" + } + } + }, + { + "id": "86ee7359-aa35-41d9-8a76-54961bc516a1", + "underlay_address": "fd00:1122:3344:101::4", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:101::4]:32221", + "dataset": { + "pool_name": "oxp_e8d6b9a2-bad4-43f0-b5c0-0618555a7f8c" + } + } + }, + { + "id": "925ea436-fad4-4fff-8ae0-24edd725a2b0", + "underlay_address": "fd00:1122:3344:101::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::d]:32345", + "dataset": { + "pool_name": "oxp_88ddd98f-3181-4797-864a-7e8d5b028657" + } + } + }, + { + "id": "be272d1e-4d34-4478-94b4-a76f5fcbef36", + "underlay_address": "fd00:1122:3344:101::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::a]:32345", + "dataset": { + "pool_name": "oxp_9041d780-b5ee-4159-a1db-8c4075e2176d" + } + } + }, + { + "id": "bebd754d-d06c-45f4-99bf-e1aab6253ab8", + "underlay_address": "fd00:1122:3344:101::6", + "zone_type": { + "type": "oximeter", + "address": "[fd00:1122:3344:101::6]:12223" + } + }, + { + "id": "d630cca7-e3f7-47a8-aee8-ad0790895abc", + "underlay_address": "fd00:1122:3344:101::11", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:101::11]:123", + "dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "nic": { + "id": "9741f03f-f150-4229-9f8d-e29a69c87883", + "ip": "172.30.3.5", + "kind": { + "type": "service", + "id": "d630cca7-e3f7-47a8-aee8-ad0790895abc" + }, + "mac": "A8:40:25:FF:E7:A8", + "name": "ntp-d630cca7-e3f7-47a8-aee8-ad0790895abc", + "primary": true, + "slot": 0, + "subnet": "172.30.3.0/24", + "vni": 100 + }, + "ntp_servers": [ + "ntp.eng.oxide.computer" + ], + "snat_cfg": { + "ip": "172.20.28.5", + "first_port": 0, + "last_port": 16383 + } + } + }, + { + "id": "d8c51c79-46ff-44bd-80be-08d6283f4e21", + "underlay_address": "fd00:1122:3344:101::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::e]:32345", + "dataset": { + "pool_name": "oxp_3f95c036-d5d8-4d08-8c78-c0635ca34698" + } + } + }, + { + "id": "dbbc0422-2b23-4d9e-b1db-8003cfc29ad0", + "underlay_address": "fd00:1122:3344:101::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::b]:32345", + "dataset": { + "pool_name": "oxp_1a4447be-5c84-4dad-b22c-d35e5e1911b0" + } + } + }, + { + "id": "f33a2375-212d-42e4-b539-6079aeb4e5b7", + "underlay_address": "fd00:1122:3344:101::7", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:101::7]:17000" + } + } + ] + }, + "a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f": { + "generation": 3, + "zones": [ + { + "id": "0eca39c6-62e2-4a1f-8ea2-e9332a774a26", + "underlay_address": "fd00:1122:3344:121::28", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::28]:32345", + "dataset": { + "pool_name": "oxp_cc36b0cc-818a-4c7f-b50d-057ff7e7e8e3" + } + } + }, + { + "id": "12431d3e-1516-4822-98b3-3711ff912e69", + "underlay_address": "fd00:1122:3344:121::22", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::22]:32345", + "dataset": { + "pool_name": "oxp_0622b2cf-297e-4356-b94c-d74309f443ba" + } + } + }, + { + "id": "2d5c29f7-4915-4876-82f8-e388b15c5067", + "underlay_address": "fd00:1122:3344:121::21", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:121::21]:123", + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "ntp_servers": [ + "d630cca7-e3f7-47a8-aee8-ad0790895abc.host.control-plane.oxide.internal", + "c74ba7d7-3d5f-4a29-aadc-effe33a90909.host.control-plane.oxide.internal" + ] + } + }, + { + "id": "56414d76-9b8c-4e15-acf1-cd269c2d8638", + "underlay_address": "fd00:1122:3344:121::25", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::25]:32345", + "dataset": { + "pool_name": "oxp_574728ca-1559-48f3-a46c-1e608966e6a0" + } + } + }, + { + "id": "891cb9da-301d-40cc-b93d-5fcb350fa968", + "underlay_address": "fd00:1122:3344:121::29", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::29]:32345", + "dataset": { + "pool_name": "oxp_e4a4a09a-43b8-460c-bc43-1f935600b681" + } + } + }, + { + "id": "d9014907-24c0-4c33-a475-db15942f4f8f", + "underlay_address": "fd00:1122:3344:121::23", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::23]:32345", + "dataset": { + "pool_name": "oxp_276822e2-d7ce-4eae-b5a4-38d2e8586a37" + } + } + }, + { + "id": "e950de25-7306-4cf7-a6f4-6db11adaa310", + "underlay_address": "fd00:1122:3344:121::27", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::27]:32345", + "dataset": { + "pool_name": "oxp_8c7a7d84-c269-4178-9dbb-37f363e7958c" + } + } + }, + { + "id": "fe3dc092-ea23-42d7-9f1d-36eed72b539c", + "underlay_address": "fd00:1122:3344:121::26", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::26]:32345", + "dataset": { + "pool_name": "oxp_6d2b29c5-aa58-40b7-b630-88301f08e945" + } + } + }, + { + "id": "ff8cce49-a721-4ad1-b09e-2bd6e4e42d00", + "underlay_address": "fd00:1122:3344:121::24", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:121::24]:32345", + "dataset": { + "pool_name": "oxp_2b0da7bc-2afc-4a16-95e1-5126bcb9b6b1" + } + } + } + ] + }, + "a6634f64-f6fb-426c-b00b-9b30aed9f53a": { + "generation": 5, + "zones": [ + { + "id": "0ebd2b5b-2dec-4d17-8e23-e468c04dacb6", + "underlay_address": "fd00:1122:3344:102::11", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::11]:32345", + "dataset": { + "pool_name": "oxp_0c07bcc1-7666-4754-881d-4135992ff561" + } + } + }, + { + "id": "18b99404-d8b2-4463-bc99-638d9b678ab3", + "underlay_address": "fd00:1122:3344:102::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::e]:32345", + "dataset": { + "pool_name": "oxp_f9a037da-7d15-40d9-957d-7e42aadb333c" + } + } + }, + { + "id": "479811e7-6409-45d4-b294-5333534e47ed", + "underlay_address": "fd00:1122:3344:102::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::9]:32345", + "dataset": { + "pool_name": "oxp_1fb80902-d979-461d-9c6b-a6541a5359c9" + } + } + }, + { + "id": "54d1b693-bc4d-4c47-9009-c6f624073801", + "underlay_address": "fd00:1122:3344:102::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::a]:32345", + "dataset": { + "pool_name": "oxp_02cc2369-9924-41c7-aa74-79d6d548f2dd" + } + } + }, + { + "id": "5b743072-9369-45d7-b7c9-dbdf736301f8", + "underlay_address": "fd00:1122:3344:2::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + }, + "dns_address": "[fd00:1122:3344:2::1]:53", + "gz_address": "fd00:1122:3344:2::2", + "gz_address_index": 1, + "http_address": "[fd00:1122:3344:2::1]:5353" + } + }, + { + "id": "5c4163fa-d6a6-4a0c-9476-3dda135bc9a4", + "underlay_address": "fd00:1122:3344:102::7", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:102::7]:17000" + } + }, + { + "id": "61a39188-dd82-43f6-a87c-ebb4c2d54ac4", + "underlay_address": "fd00:1122:3344:102::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::8]:32345", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "658076ed-fb43-49e7-a813-df3d23497848", + "underlay_address": "fd00:1122:3344:102::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::10]:32345", + "dataset": { + "pool_name": "oxp_7929b5fb-4ef2-4791-92e6-1a46c7f608b9" + } + } + }, + { + "id": "6edb836e-b100-4080-bb03-738d6441d571", + "underlay_address": "fd00:1122:3344:102::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::c]:32345", + "dataset": { + "pool_name": "oxp_fe74ae4d-d7bc-4293-9ec9-69132aca4aba" + } + } + }, + { + "id": "7f83d784-9c5c-498f-b559-4176bbafbc4b", + "underlay_address": "fd00:1122:3344:102::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.3", + "external_tls": true, + "internal_address": "[fd00:1122:3344:102::5]:12221", + "nic": { + "id": "3771a19c-cc40-4fd5-b99d-ed10071018a7", + "ip": "172.30.2.6", + "kind": { + "type": "service", + "id": "7f83d784-9c5c-498f-b559-4176bbafbc4b" + }, + "mac": "A8:40:25:FF:B2:AF", + "name": "nexus-7f83d784-9c5c-498f-b559-4176bbafbc4b", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "8052938f-73bc-4c54-8ed0-c62f6d77b9fd", + "underlay_address": "fd00:1122:3344:102::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::d]:32345", + "dataset": { + "pool_name": "oxp_93e4e338-0ad1-4ae1-a120-2cfb59b4df42" + } + } + }, + { + "id": "86bc0a2a-e24a-4bc1-8222-89207a0937f9", + "underlay_address": "fd00:1122:3344:102::4", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:102::4]:32221", + "dataset": { + "pool_name": "oxp_1fb80902-d979-461d-9c6b-a6541a5359c9" + } + } + }, + { + "id": "9241b9da-8188-4aae-a9dc-62acb453618f", + "underlay_address": "fd00:1122:3344:102::6", + "zone_type": { + "type": "clickhouse", + "address": "[fd00:1122:3344:102::6]:8123", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "a781a349-62b2-41f0-9dd7-c64d290aef82", + "underlay_address": "fd00:1122:3344:102::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::f]:32345", + "dataset": { + "pool_name": "oxp_4e92cbcf-f194-4ffc-97e3-fdf3df7cc9ae" + } + } + }, + { + "id": "c67814bf-9972-4bb0-a666-24778ca70a16", + "underlay_address": "fd00:1122:3344:102::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:102::3]:32221", + "dataset": { + "pool_name": "oxp_6a275151-d85f-488b-81a6-e558fdede658" + } + } + }, + { + "id": "c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "underlay_address": "fd00:1122:3344:102::12", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:102::12]:123", + "dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "nic": { + "id": "9f03c82c-3192-498d-978d-a175049ac389", + "ip": "172.30.3.6", + "kind": { + "type": "service", + "id": "c74ba7d7-3d5f-4a29-aadc-effe33a90909" + }, + "mac": "A8:40:25:FF:E4:11", + "name": "ntp-c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "primary": true, + "slot": 0, + "subnet": "172.30.3.0/24", + "vni": 100 + }, + "ntp_servers": [ + "ntp.eng.oxide.computer" + ], + "snat_cfg": { + "ip": "172.20.28.6", + "first_port": 16384, + "last_port": 32767 + } + } + }, + { + "id": "dfc27183-ed85-44c3-ac3c-4427c4cb03e3", + "underlay_address": "fd00:1122:3344:102::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::b]:32345", + "dataset": { + "pool_name": "oxp_742699b5-4ded-406d-9a08-24648d3b2efb" + } + } + } + ] + }, + "fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89": { + "generation": 5, + "zones": [ + { + "id": "185e6b00-7e18-456c-9dfc-edd0194ac207", + "underlay_address": "fd00:1122:3344:103::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::a]:32345", + "dataset": { + "pool_name": "oxp_6aa59516-2018-4494-af6b-46704b20f6dc" + } + } + }, + { + "id": "1fc78bc7-feca-4a10-9beb-58b53fda5210", + "underlay_address": "fd00:1122:3344:103::10", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::10]:32345", + "dataset": { + "pool_name": "oxp_07d66b6b-8059-4d07-a0b3-8ce41e2a8c98" + } + } + }, + { + "id": "45dccfd6-4674-43c6-a0e7-4dde5f24d90d", + "underlay_address": "fd00:1122:3344:3::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + }, + "dns_address": "[fd00:1122:3344:3::1]:53", + "gz_address": "fd00:1122:3344:3::2", + "gz_address_index": 2, + "http_address": "[fd00:1122:3344:3::1]:5353" + } + }, + { + "id": "4b74c418-0a08-4489-b652-e97267c1c220", + "underlay_address": "fd00:1122:3344:103::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::7]:32345", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + } + } + }, + { + "id": "54c947d2-6355-453c-80fc-8f49cc2129ee", + "underlay_address": "fd00:1122:3344:103::5", + "zone_type": { + "type": "nexus", + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "external_ip": "172.20.28.4", + "external_tls": true, + "internal_address": "[fd00:1122:3344:103::5]:12221", + "nic": { + "id": "1f55dc2b-18ee-485c-8831-b603a8df78e6", + "ip": "172.30.2.7", + "kind": { + "type": "service", + "id": "54c947d2-6355-453c-80fc-8f49cc2129ee" + }, + "mac": "A8:40:25:FF:96:05", + "name": "nexus-54c947d2-6355-453c-80fc-8f49cc2129ee", + "primary": true, + "slot": 0, + "subnet": "172.30.2.0/24", + "vni": 100 + } + } + }, + { + "id": "60d64b70-89f5-4774-9e41-2c79c4173911", + "underlay_address": "fd00:1122:3344:103::6", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:103::6]:17000" + } + }, + { + "id": "921958c0-7761-4294-945d-2a078327bf2c", + "underlay_address": "fd00:1122:3344:103::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::8]:32345", + "dataset": { + "pool_name": "oxp_7eabbdcc-28a3-4af9-bc4a-8d9b745d4df3" + } + } + }, + { + "id": "9df22034-59f6-431f-82e7-258fe983879b", + "underlay_address": "fd00:1122:3344:103::4", + "zone_type": { + "type": "external_dns", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + }, + "dns_address": "172.20.28.1:53", + "http_address": "[fd00:1122:3344:103::4]:5353", + "nic": { + "id": "0376a963-e3c1-44bc-9cbf-9e8cca58410b", + "ip": "172.30.1.5", + "kind": { + "type": "service", + "id": "9df22034-59f6-431f-82e7-258fe983879b" + }, + "mac": "A8:40:25:FF:90:12", + "name": "external-dns-9df22034-59f6-431f-82e7-258fe983879b", + "primary": true, + "slot": 0, + "subnet": "172.30.1.0/24", + "vni": 100 + } + } + }, + { + "id": "bb21c0c4-c43f-4913-9bf9-707bd7f1769a", + "underlay_address": "fd00:1122:3344:103::11", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:103::11]:123", + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "ntp_servers": [ + "d630cca7-e3f7-47a8-aee8-ad0790895abc.host.control-plane.oxide.internal", + "c74ba7d7-3d5f-4a29-aadc-effe33a90909.host.control-plane.oxide.internal" + ] + } + }, + { + "id": "ccb0d612-9d7c-41ba-bd51-661c3c75ec91", + "underlay_address": "fd00:1122:3344:103::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::9]:32345", + "dataset": { + "pool_name": "oxp_c8feed09-6556-4409-af8e-563c97d556eb" + } + } + }, + { + "id": "ccef3e95-cae5-4072-9b7d-49e56fa9dff4", + "underlay_address": "fd00:1122:3344:103::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::d]:32345", + "dataset": { + "pool_name": "oxp_5b8b10a4-d527-4176-a049-1e9c2a10edea" + } + } + }, + { + "id": "d42fc874-8416-4212-b5a0-febba4c6b0ac", + "underlay_address": "fd00:1122:3344:103::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::b]:32345", + "dataset": { + "pool_name": "oxp_83a60822-1299-471c-bc79-3be0f87cf7c3" + } + } + }, + { + "id": "d5ba7d45-26d7-45ca-8c0a-a560f243b5fa", + "underlay_address": "fd00:1122:3344:103::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::e]:32345", + "dataset": { + "pool_name": "oxp_4ffc6336-04d1-4f21-9c27-a95aa640f960" + } + } + }, + { + "id": "da27ee6a-9841-46aa-acda-7cce3e04cb60", + "underlay_address": "fd00:1122:3344:103::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:103::3]:32221", + "dataset": { + "pool_name": "oxp_d4e08191-c3c4-43f7-9818-1cb1f1e1285a" + } + } + }, + { + "id": "e09525cf-9c87-4d05-8860-1356dd0dee8a", + "underlay_address": "fd00:1122:3344:103::f", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::f]:32345", + "dataset": { + "pool_name": "oxp_e2244950-dda0-4dfa-8b63-5ffac16c5a62" + } + } + }, + { + "id": "f7264a3e-e8e7-4476-8df3-76c9d00a383b", + "underlay_address": "fd00:1122:3344:103::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::c]:32345", + "dataset": { + "pool_name": "oxp_f6813b0c-ab5a-4fd0-8e24-95de9b65280d" + } + } + } + ] + } + }, + "zones_in_service": [ + "0706860a-3801-42a7-8000-fff741f7665b", + "0ebd2b5b-2dec-4d17-8e23-e468c04dacb6", + "0eca39c6-62e2-4a1f-8ea2-e9332a774a26", + "12431d3e-1516-4822-98b3-3711ff912e69", + "185e6b00-7e18-456c-9dfc-edd0194ac207", + "18b99404-d8b2-4463-bc99-638d9b678ab3", + "1fc78bc7-feca-4a10-9beb-58b53fda5210", + "2d5c29f7-4915-4876-82f8-e388b15c5067", + "34a279a2-a025-41ff-a392-1addac400f38", + "3d344935-6f48-405f-8794-2c6e8fa4b1c2", + "45dccfd6-4674-43c6-a0e7-4dde5f24d90d", + "479811e7-6409-45d4-b294-5333534e47ed", + "4b74c418-0a08-4489-b652-e97267c1c220", + "54c947d2-6355-453c-80fc-8f49cc2129ee", + "54d1b693-bc4d-4c47-9009-c6f624073801", + "56414d76-9b8c-4e15-acf1-cd269c2d8638", + "59c3ed4f-3670-44b1-9ea0-9a6162a08629", + "5b743072-9369-45d7-b7c9-dbdf736301f8", + "5c4163fa-d6a6-4a0c-9476-3dda135bc9a4", + "60d64b70-89f5-4774-9e41-2c79c4173911", + "61a39188-dd82-43f6-a87c-ebb4c2d54ac4", + "658076ed-fb43-49e7-a813-df3d23497848", + "67852e14-ce7f-4e50-a61d-4059d0c057b9", + "6ace1a69-d55a-45f0-a127-99c90f1b070c", + "6edb836e-b100-4080-bb03-738d6441d571", + "7f83d784-9c5c-498f-b559-4176bbafbc4b", + "8052938f-73bc-4c54-8ed0-c62f6d77b9fd", + "84d6b7fc-b5c2-499f-8d17-0eb1ea5affbe", + "854c1544-d0ea-4d65-9cf4-776e91a4fcb5", + "86bc0a2a-e24a-4bc1-8222-89207a0937f9", + "86ee7359-aa35-41d9-8a76-54961bc516a1", + "891cb9da-301d-40cc-b93d-5fcb350fa968", + "921958c0-7761-4294-945d-2a078327bf2c", + "9241b9da-8188-4aae-a9dc-62acb453618f", + "925ea436-fad4-4fff-8ae0-24edd725a2b0", + "9df22034-59f6-431f-82e7-258fe983879b", + "a781a349-62b2-41f0-9dd7-c64d290aef82", + "bb21c0c4-c43f-4913-9bf9-707bd7f1769a", + "be272d1e-4d34-4478-94b4-a76f5fcbef36", + "bebd754d-d06c-45f4-99bf-e1aab6253ab8", + "c67814bf-9972-4bb0-a666-24778ca70a16", + "c74ba7d7-3d5f-4a29-aadc-effe33a90909", + "ccb0d612-9d7c-41ba-bd51-661c3c75ec91", + "ccef3e95-cae5-4072-9b7d-49e56fa9dff4", + "d42fc874-8416-4212-b5a0-febba4c6b0ac", + "d5ba7d45-26d7-45ca-8c0a-a560f243b5fa", + "d630cca7-e3f7-47a8-aee8-ad0790895abc", + "d8c51c79-46ff-44bd-80be-08d6283f4e21", + "d9014907-24c0-4c33-a475-db15942f4f8f", + "da27ee6a-9841-46aa-acda-7cce3e04cb60", + "dbbc0422-2b23-4d9e-b1db-8003cfc29ad0", + "dfc27183-ed85-44c3-ac3c-4427c4cb03e3", + "e09525cf-9c87-4d05-8860-1356dd0dee8a", + "e950de25-7306-4cf7-a6f4-6db11adaa310", + "f33a2375-212d-42e4-b539-6079aeb4e5b7", + "f7264a3e-e8e7-4476-8df3-76c9d00a383b", + "fe3dc092-ea23-42d7-9f1d-36eed72b539c", + "ff8cce49-a721-4ad1-b09e-2bd6e4e42d00" + ], + "parent_blueprint_id": "6c127695-ba15-408d-a992-325a1a888380", + "internal_dns_version": 2, + "time_created": "2024-03-01T19:08:52.730520Z", + "creator": "54c947d2-6355-453c-80fc-8f49cc2129ee", + "comment": "sled a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f: add zones" + } + ] +} \ No newline at end of file diff --git a/dev-tools/reconfigurator-cli/tests/output/cmd-complex-stdout b/dev-tools/reconfigurator-cli/tests/output/cmd-complex-stdout new file mode 100644 index 0000000000..0cc5fe01ba --- /dev/null +++ b/dev-tools/reconfigurator-cli/tests/output/cmd-complex-stdout @@ -0,0 +1,385 @@ +> sled-list +ID NZPOOLS SUBNET + +> + +> inventory-list +ID NERRORS TIME_DONE + +> + +> blueprint-list +ID + +> + +> file-contents tests/input/complex.json +sled: 1762267a-a0cb-4c9a-88b4-8ce828cbc021 (subnet: fd00:1122:3344:101::/64, zpools: 9) +sled: a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f (subnet: fd00:1122:3344:121::/64, zpools: 8) +sled: a6634f64-f6fb-426c-b00b-9b30aed9f53a (subnet: fd00:1122:3344:102::/64, zpools: 10) +sled: fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89 (subnet: fd00:1122:3344:103::/64, zpools: 10) +collection: 83ed3949-d221-44f3-a901-02e9ff16d2a5 (errors: 8, completed at: 2024-03-01T19:17:10.594Z) +collection: 0a8dc8ff-9013-43aa-9f06-9540b24ea902 (errors: 8, completed at: 2024-03-01T19:17:11.153Z) +collection: cd3547d7-f172-47d2-9bbf-9730e88b0559 (errors: 8, completed at: 2024-03-01T19:18:07.486Z) +collection: 623f7e48-6c47-4e17-8aa3-cebe98ca4287 (errors: 8, completed at: 2024-03-01T19:18:12.110Z) +collection: 4e9fbfc2-eb1d-4d45-bdb9-64d965a490c5 (errors: 8, completed at: 2024-03-01T19:18:13.548Z) +blueprint: 486de160-c8f3-4600-acca-b0c78e33aca4 (created at: 2024-03-01 19:06:56.467313 UTC) +blueprint: 6c127695-ba15-408d-a992-325a1a888380 (created at: 2024-03-01 19:07:58.105708 UTC) +blueprint: cfa03594-f438-4736-a416-6307f7bf8f2e (created at: 2024-03-01 19:08:52.730520 UTC) + + +> + +> load tests/input/complex.json 83ed3949-d221-44f3-a901-02e9ff16d2a5 +using collection 83ed3949-d221-44f3-a901-02e9ff16d2a5 as source of sled inventory data +sled 1762267a-a0cb-4c9a-88b4-8ce828cbc021 loaded +sled a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f loaded +sled a6634f64-f6fb-426c-b00b-9b30aed9f53a loaded +sled fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89 loaded +collection 83ed3949-d221-44f3-a901-02e9ff16d2a5 loaded +collection 0a8dc8ff-9013-43aa-9f06-9540b24ea902 loaded +collection cd3547d7-f172-47d2-9bbf-9730e88b0559 loaded +collection 623f7e48-6c47-4e17-8aa3-cebe98ca4287 loaded +collection 4e9fbfc2-eb1d-4d45-bdb9-64d965a490c5 loaded +blueprint 486de160-c8f3-4600-acca-b0c78e33aca4 loaded +blueprint 6c127695-ba15-408d-a992-325a1a888380 loaded +blueprint cfa03594-f438-4736-a416-6307f7bf8f2e loaded +loaded data from "tests/input/complex.json" + + +> + +> sled-list +ID NZPOOLS SUBNET +1762267a-a0cb-4c9a-88b4-8ce828cbc021 9 fd00:1122:3344:101::/64 +a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f 8 fd00:1122:3344:121::/64 +a6634f64-f6fb-426c-b00b-9b30aed9f53a 10 fd00:1122:3344:102::/64 +fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89 10 fd00:1122:3344:103::/64 + +> + +> inventory-list +ID NERRORS TIME_DONE +83ed3949-d221-44f3-a901-02e9ff16d2a5 8 2024-03-01T19:17:10.594Z +0a8dc8ff-9013-43aa-9f06-9540b24ea902 8 2024-03-01T19:17:11.153Z +cd3547d7-f172-47d2-9bbf-9730e88b0559 8 2024-03-01T19:18:07.486Z +623f7e48-6c47-4e17-8aa3-cebe98ca4287 8 2024-03-01T19:18:12.110Z +4e9fbfc2-eb1d-4d45-bdb9-64d965a490c5 8 2024-03-01T19:18:13.548Z + +> + +> blueprint-list +ID +486de160-c8f3-4600-acca-b0c78e33aca4 +6c127695-ba15-408d-a992-325a1a888380 +cfa03594-f438-4736-a416-6307f7bf8f2e + +> + +> blueprint-show cfa03594-f438-4736-a416-6307f7bf8f2e +blueprint cfa03594-f438-4736-a416-6307f7bf8f2e +parent: 6c127695-ba15-408d-a992-325a1a888380 +created by 54c947d2-6355-453c-80fc-8f49cc2129ee (likely a Nexus instance) +created at 2024-03-01T19:08:52.730Z +internal DNS version: 2 +comment: sled a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f: add zones +zones: + + sled 1762267a-a0cb-4c9a-88b4-8ce828cbc021: Omicron zones at generation 5 + 0706860a-3801-42a7-8000-fff741f7665b in service crucible + 34a279a2-a025-41ff-a392-1addac400f38 in service cockroach_db + 3d344935-6f48-405f-8794-2c6e8fa4b1c2 in service internal_dns + 59c3ed4f-3670-44b1-9ea0-9a6162a08629 in service nexus + 67852e14-ce7f-4e50-a61d-4059d0c057b9 in service crucible + 6ace1a69-d55a-45f0-a127-99c90f1b070c in service crucible + 84d6b7fc-b5c2-499f-8d17-0eb1ea5affbe in service crucible + 854c1544-d0ea-4d65-9cf4-776e91a4fcb5 in service crucible + 86ee7359-aa35-41d9-8a76-54961bc516a1 in service cockroach_db + 925ea436-fad4-4fff-8ae0-24edd725a2b0 in service crucible + be272d1e-4d34-4478-94b4-a76f5fcbef36 in service crucible + bebd754d-d06c-45f4-99bf-e1aab6253ab8 in service oximeter + d630cca7-e3f7-47a8-aee8-ad0790895abc in service boundary_ntp + d8c51c79-46ff-44bd-80be-08d6283f4e21 in service crucible + dbbc0422-2b23-4d9e-b1db-8003cfc29ad0 in service crucible + f33a2375-212d-42e4-b539-6079aeb4e5b7 in service crucible_pantry + sled a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f: Omicron zones at generation 3 + 0eca39c6-62e2-4a1f-8ea2-e9332a774a26 in service crucible + 12431d3e-1516-4822-98b3-3711ff912e69 in service crucible + 2d5c29f7-4915-4876-82f8-e388b15c5067 in service internal_ntp + 56414d76-9b8c-4e15-acf1-cd269c2d8638 in service crucible + 891cb9da-301d-40cc-b93d-5fcb350fa968 in service crucible + d9014907-24c0-4c33-a475-db15942f4f8f in service crucible + e950de25-7306-4cf7-a6f4-6db11adaa310 in service crucible + fe3dc092-ea23-42d7-9f1d-36eed72b539c in service crucible + ff8cce49-a721-4ad1-b09e-2bd6e4e42d00 in service crucible + sled a6634f64-f6fb-426c-b00b-9b30aed9f53a: Omicron zones at generation 5 + 0ebd2b5b-2dec-4d17-8e23-e468c04dacb6 in service crucible + 18b99404-d8b2-4463-bc99-638d9b678ab3 in service crucible + 479811e7-6409-45d4-b294-5333534e47ed in service crucible + 54d1b693-bc4d-4c47-9009-c6f624073801 in service crucible + 5b743072-9369-45d7-b7c9-dbdf736301f8 in service internal_dns + 5c4163fa-d6a6-4a0c-9476-3dda135bc9a4 in service crucible_pantry + 61a39188-dd82-43f6-a87c-ebb4c2d54ac4 in service crucible + 658076ed-fb43-49e7-a813-df3d23497848 in service crucible + 6edb836e-b100-4080-bb03-738d6441d571 in service crucible + 7f83d784-9c5c-498f-b559-4176bbafbc4b in service nexus + 8052938f-73bc-4c54-8ed0-c62f6d77b9fd in service crucible + 86bc0a2a-e24a-4bc1-8222-89207a0937f9 in service cockroach_db + 9241b9da-8188-4aae-a9dc-62acb453618f in service clickhouse + a781a349-62b2-41f0-9dd7-c64d290aef82 in service crucible + c67814bf-9972-4bb0-a666-24778ca70a16 in service cockroach_db + c74ba7d7-3d5f-4a29-aadc-effe33a90909 in service boundary_ntp + dfc27183-ed85-44c3-ac3c-4427c4cb03e3 in service crucible + sled fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89: Omicron zones at generation 5 + 185e6b00-7e18-456c-9dfc-edd0194ac207 in service crucible + 1fc78bc7-feca-4a10-9beb-58b53fda5210 in service crucible + 45dccfd6-4674-43c6-a0e7-4dde5f24d90d in service internal_dns + 4b74c418-0a08-4489-b652-e97267c1c220 in service crucible + 54c947d2-6355-453c-80fc-8f49cc2129ee in service nexus + 60d64b70-89f5-4774-9e41-2c79c4173911 in service crucible_pantry + 921958c0-7761-4294-945d-2a078327bf2c in service crucible + 9df22034-59f6-431f-82e7-258fe983879b in service external_dns + bb21c0c4-c43f-4913-9bf9-707bd7f1769a in service internal_ntp + ccb0d612-9d7c-41ba-bd51-661c3c75ec91 in service crucible + ccef3e95-cae5-4072-9b7d-49e56fa9dff4 in service crucible + d42fc874-8416-4212-b5a0-febba4c6b0ac in service crucible + d5ba7d45-26d7-45ca-8c0a-a560f243b5fa in service crucible + da27ee6a-9841-46aa-acda-7cce3e04cb60 in service cockroach_db + e09525cf-9c87-4d05-8860-1356dd0dee8a in service crucible + f7264a3e-e8e7-4476-8df3-76c9d00a383b in service crucible + + +> + +> blueprint-diff-inventory 83ed3949-d221-44f3-a901-02e9ff16d2a5 cfa03594-f438-4736-a416-6307f7bf8f2e +diff collection 83ed3949-d221-44f3-a901-02e9ff16d2a5 blueprint cfa03594-f438-4736-a416-6307f7bf8f2e +--- collection 83ed3949-d221-44f3-a901-02e9ff16d2a5 ++++ blueprint cfa03594-f438-4736-a416-6307f7bf8f2e + sled 1762267a-a0cb-4c9a-88b4-8ce828cbc021 + zone config generation 5 + zone 0706860a-3801-42a7-8000-fff741f7665b type crucible underlay IP fd00:1122:3344:101::c (unchanged) + zone 34a279a2-a025-41ff-a392-1addac400f38 type cockroach_db underlay IP fd00:1122:3344:101::3 (unchanged) + zone 3d344935-6f48-405f-8794-2c6e8fa4b1c2 type internal_dns underlay IP fd00:1122:3344:1::1 (unchanged) + zone 59c3ed4f-3670-44b1-9ea0-9a6162a08629 type nexus underlay IP fd00:1122:3344:101::5 (unchanged) + zone 67852e14-ce7f-4e50-a61d-4059d0c057b9 type crucible underlay IP fd00:1122:3344:101::10 (unchanged) + zone 6ace1a69-d55a-45f0-a127-99c90f1b070c type crucible underlay IP fd00:1122:3344:101::f (unchanged) + zone 84d6b7fc-b5c2-499f-8d17-0eb1ea5affbe type crucible underlay IP fd00:1122:3344:101::9 (unchanged) + zone 854c1544-d0ea-4d65-9cf4-776e91a4fcb5 type crucible underlay IP fd00:1122:3344:101::8 (unchanged) + zone 86ee7359-aa35-41d9-8a76-54961bc516a1 type cockroach_db underlay IP fd00:1122:3344:101::4 (unchanged) + zone 925ea436-fad4-4fff-8ae0-24edd725a2b0 type crucible underlay IP fd00:1122:3344:101::d (unchanged) + zone be272d1e-4d34-4478-94b4-a76f5fcbef36 type crucible underlay IP fd00:1122:3344:101::a (unchanged) + zone bebd754d-d06c-45f4-99bf-e1aab6253ab8 type oximeter underlay IP fd00:1122:3344:101::6 (unchanged) + zone d630cca7-e3f7-47a8-aee8-ad0790895abc type boundary_ntp underlay IP fd00:1122:3344:101::11 (unchanged) + zone d8c51c79-46ff-44bd-80be-08d6283f4e21 type crucible underlay IP fd00:1122:3344:101::e (unchanged) + zone dbbc0422-2b23-4d9e-b1db-8003cfc29ad0 type crucible underlay IP fd00:1122:3344:101::b (unchanged) + zone f33a2375-212d-42e4-b539-6079aeb4e5b7 type crucible_pantry underlay IP fd00:1122:3344:101::7 (unchanged) + sled a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f + zone config generation 3 + zone 0eca39c6-62e2-4a1f-8ea2-e9332a774a26 type crucible underlay IP fd00:1122:3344:121::28 (unchanged) + zone 12431d3e-1516-4822-98b3-3711ff912e69 type crucible underlay IP fd00:1122:3344:121::22 (unchanged) + zone 2d5c29f7-4915-4876-82f8-e388b15c5067 type internal_ntp underlay IP fd00:1122:3344:121::21 (unchanged) + zone 56414d76-9b8c-4e15-acf1-cd269c2d8638 type crucible underlay IP fd00:1122:3344:121::25 (unchanged) + zone 891cb9da-301d-40cc-b93d-5fcb350fa968 type crucible underlay IP fd00:1122:3344:121::29 (unchanged) + zone d9014907-24c0-4c33-a475-db15942f4f8f type crucible underlay IP fd00:1122:3344:121::23 (unchanged) + zone e950de25-7306-4cf7-a6f4-6db11adaa310 type crucible underlay IP fd00:1122:3344:121::27 (unchanged) + zone fe3dc092-ea23-42d7-9f1d-36eed72b539c type crucible underlay IP fd00:1122:3344:121::26 (unchanged) + zone ff8cce49-a721-4ad1-b09e-2bd6e4e42d00 type crucible underlay IP fd00:1122:3344:121::24 (unchanged) + sled a6634f64-f6fb-426c-b00b-9b30aed9f53a + zone config generation 5 + zone 0ebd2b5b-2dec-4d17-8e23-e468c04dacb6 type crucible underlay IP fd00:1122:3344:102::11 (unchanged) + zone 18b99404-d8b2-4463-bc99-638d9b678ab3 type crucible underlay IP fd00:1122:3344:102::e (unchanged) + zone 479811e7-6409-45d4-b294-5333534e47ed type crucible underlay IP fd00:1122:3344:102::9 (unchanged) + zone 54d1b693-bc4d-4c47-9009-c6f624073801 type crucible underlay IP fd00:1122:3344:102::a (unchanged) + zone 5b743072-9369-45d7-b7c9-dbdf736301f8 type internal_dns underlay IP fd00:1122:3344:2::1 (unchanged) + zone 5c4163fa-d6a6-4a0c-9476-3dda135bc9a4 type crucible_pantry underlay IP fd00:1122:3344:102::7 (unchanged) + zone 61a39188-dd82-43f6-a87c-ebb4c2d54ac4 type crucible underlay IP fd00:1122:3344:102::8 (unchanged) + zone 658076ed-fb43-49e7-a813-df3d23497848 type crucible underlay IP fd00:1122:3344:102::10 (unchanged) + zone 6edb836e-b100-4080-bb03-738d6441d571 type crucible underlay IP fd00:1122:3344:102::c (unchanged) + zone 7f83d784-9c5c-498f-b559-4176bbafbc4b type nexus underlay IP fd00:1122:3344:102::5 (unchanged) + zone 8052938f-73bc-4c54-8ed0-c62f6d77b9fd type crucible underlay IP fd00:1122:3344:102::d (unchanged) + zone 86bc0a2a-e24a-4bc1-8222-89207a0937f9 type cockroach_db underlay IP fd00:1122:3344:102::4 (unchanged) + zone 9241b9da-8188-4aae-a9dc-62acb453618f type clickhouse underlay IP fd00:1122:3344:102::6 (unchanged) + zone a781a349-62b2-41f0-9dd7-c64d290aef82 type crucible underlay IP fd00:1122:3344:102::f (unchanged) + zone c67814bf-9972-4bb0-a666-24778ca70a16 type cockroach_db underlay IP fd00:1122:3344:102::3 (unchanged) + zone c74ba7d7-3d5f-4a29-aadc-effe33a90909 type boundary_ntp underlay IP fd00:1122:3344:102::12 (unchanged) + zone dfc27183-ed85-44c3-ac3c-4427c4cb03e3 type crucible underlay IP fd00:1122:3344:102::b (unchanged) + sled fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89 + zone config generation 5 + zone 185e6b00-7e18-456c-9dfc-edd0194ac207 type crucible underlay IP fd00:1122:3344:103::a (unchanged) + zone 1fc78bc7-feca-4a10-9beb-58b53fda5210 type crucible underlay IP fd00:1122:3344:103::10 (unchanged) + zone 45dccfd6-4674-43c6-a0e7-4dde5f24d90d type internal_dns underlay IP fd00:1122:3344:3::1 (unchanged) + zone 4b74c418-0a08-4489-b652-e97267c1c220 type crucible underlay IP fd00:1122:3344:103::7 (unchanged) + zone 54c947d2-6355-453c-80fc-8f49cc2129ee type nexus underlay IP fd00:1122:3344:103::5 (unchanged) + zone 60d64b70-89f5-4774-9e41-2c79c4173911 type crucible_pantry underlay IP fd00:1122:3344:103::6 (unchanged) + zone 921958c0-7761-4294-945d-2a078327bf2c type crucible underlay IP fd00:1122:3344:103::8 (unchanged) + zone 9df22034-59f6-431f-82e7-258fe983879b type external_dns underlay IP fd00:1122:3344:103::4 (unchanged) + zone bb21c0c4-c43f-4913-9bf9-707bd7f1769a type internal_ntp underlay IP fd00:1122:3344:103::11 (unchanged) + zone ccb0d612-9d7c-41ba-bd51-661c3c75ec91 type crucible underlay IP fd00:1122:3344:103::9 (unchanged) + zone ccef3e95-cae5-4072-9b7d-49e56fa9dff4 type crucible underlay IP fd00:1122:3344:103::d (unchanged) + zone d42fc874-8416-4212-b5a0-febba4c6b0ac type crucible underlay IP fd00:1122:3344:103::b (unchanged) + zone d5ba7d45-26d7-45ca-8c0a-a560f243b5fa type crucible underlay IP fd00:1122:3344:103::e (unchanged) + zone da27ee6a-9841-46aa-acda-7cce3e04cb60 type cockroach_db underlay IP fd00:1122:3344:103::3 (unchanged) + zone e09525cf-9c87-4d05-8860-1356dd0dee8a type crucible underlay IP fd00:1122:3344:103::f (unchanged) + zone f7264a3e-e8e7-4476-8df3-76c9d00a383b type crucible underlay IP fd00:1122:3344:103::c (unchanged) + + +> + +> blueprint-diff 6c127695-ba15-408d-a992-325a1a888380 cfa03594-f438-4736-a416-6307f7bf8f2e +diff blueprint 6c127695-ba15-408d-a992-325a1a888380 blueprint cfa03594-f438-4736-a416-6307f7bf8f2e +--- blueprint 6c127695-ba15-408d-a992-325a1a888380 ++++ blueprint cfa03594-f438-4736-a416-6307f7bf8f2e + sled 1762267a-a0cb-4c9a-88b4-8ce828cbc021 + zone config generation 5 + zone 0706860a-3801-42a7-8000-fff741f7665b type crucible underlay IP fd00:1122:3344:101::c (unchanged) + zone 34a279a2-a025-41ff-a392-1addac400f38 type cockroach_db underlay IP fd00:1122:3344:101::3 (unchanged) + zone 3d344935-6f48-405f-8794-2c6e8fa4b1c2 type internal_dns underlay IP fd00:1122:3344:1::1 (unchanged) + zone 59c3ed4f-3670-44b1-9ea0-9a6162a08629 type nexus underlay IP fd00:1122:3344:101::5 (unchanged) + zone 67852e14-ce7f-4e50-a61d-4059d0c057b9 type crucible underlay IP fd00:1122:3344:101::10 (unchanged) + zone 6ace1a69-d55a-45f0-a127-99c90f1b070c type crucible underlay IP fd00:1122:3344:101::f (unchanged) + zone 84d6b7fc-b5c2-499f-8d17-0eb1ea5affbe type crucible underlay IP fd00:1122:3344:101::9 (unchanged) + zone 854c1544-d0ea-4d65-9cf4-776e91a4fcb5 type crucible underlay IP fd00:1122:3344:101::8 (unchanged) + zone 86ee7359-aa35-41d9-8a76-54961bc516a1 type cockroach_db underlay IP fd00:1122:3344:101::4 (unchanged) + zone 925ea436-fad4-4fff-8ae0-24edd725a2b0 type crucible underlay IP fd00:1122:3344:101::d (unchanged) + zone be272d1e-4d34-4478-94b4-a76f5fcbef36 type crucible underlay IP fd00:1122:3344:101::a (unchanged) + zone bebd754d-d06c-45f4-99bf-e1aab6253ab8 type oximeter underlay IP fd00:1122:3344:101::6 (unchanged) + zone d630cca7-e3f7-47a8-aee8-ad0790895abc type boundary_ntp underlay IP fd00:1122:3344:101::11 (unchanged) + zone d8c51c79-46ff-44bd-80be-08d6283f4e21 type crucible underlay IP fd00:1122:3344:101::e (unchanged) + zone dbbc0422-2b23-4d9e-b1db-8003cfc29ad0 type crucible underlay IP fd00:1122:3344:101::b (unchanged) + zone f33a2375-212d-42e4-b539-6079aeb4e5b7 type crucible_pantry underlay IP fd00:1122:3344:101::7 (unchanged) + sled a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f +- zone config generation 2 ++ zone config generation 3 + zone 2d5c29f7-4915-4876-82f8-e388b15c5067 type internal_ntp underlay IP fd00:1122:3344:121::21 (unchanged) ++ zone 0eca39c6-62e2-4a1f-8ea2-e9332a774a26 type crucible underlay IP fd00:1122:3344:121::28 (added) ++ zone 12431d3e-1516-4822-98b3-3711ff912e69 type crucible underlay IP fd00:1122:3344:121::22 (added) ++ zone 56414d76-9b8c-4e15-acf1-cd269c2d8638 type crucible underlay IP fd00:1122:3344:121::25 (added) ++ zone 891cb9da-301d-40cc-b93d-5fcb350fa968 type crucible underlay IP fd00:1122:3344:121::29 (added) ++ zone d9014907-24c0-4c33-a475-db15942f4f8f type crucible underlay IP fd00:1122:3344:121::23 (added) ++ zone e950de25-7306-4cf7-a6f4-6db11adaa310 type crucible underlay IP fd00:1122:3344:121::27 (added) ++ zone fe3dc092-ea23-42d7-9f1d-36eed72b539c type crucible underlay IP fd00:1122:3344:121::26 (added) ++ zone ff8cce49-a721-4ad1-b09e-2bd6e4e42d00 type crucible underlay IP fd00:1122:3344:121::24 (added) + sled a6634f64-f6fb-426c-b00b-9b30aed9f53a + zone config generation 5 + zone 0ebd2b5b-2dec-4d17-8e23-e468c04dacb6 type crucible underlay IP fd00:1122:3344:102::11 (unchanged) + zone 18b99404-d8b2-4463-bc99-638d9b678ab3 type crucible underlay IP fd00:1122:3344:102::e (unchanged) + zone 479811e7-6409-45d4-b294-5333534e47ed type crucible underlay IP fd00:1122:3344:102::9 (unchanged) + zone 54d1b693-bc4d-4c47-9009-c6f624073801 type crucible underlay IP fd00:1122:3344:102::a (unchanged) + zone 5b743072-9369-45d7-b7c9-dbdf736301f8 type internal_dns underlay IP fd00:1122:3344:2::1 (unchanged) + zone 5c4163fa-d6a6-4a0c-9476-3dda135bc9a4 type crucible_pantry underlay IP fd00:1122:3344:102::7 (unchanged) + zone 61a39188-dd82-43f6-a87c-ebb4c2d54ac4 type crucible underlay IP fd00:1122:3344:102::8 (unchanged) + zone 658076ed-fb43-49e7-a813-df3d23497848 type crucible underlay IP fd00:1122:3344:102::10 (unchanged) + zone 6edb836e-b100-4080-bb03-738d6441d571 type crucible underlay IP fd00:1122:3344:102::c (unchanged) + zone 7f83d784-9c5c-498f-b559-4176bbafbc4b type nexus underlay IP fd00:1122:3344:102::5 (unchanged) + zone 8052938f-73bc-4c54-8ed0-c62f6d77b9fd type crucible underlay IP fd00:1122:3344:102::d (unchanged) + zone 86bc0a2a-e24a-4bc1-8222-89207a0937f9 type cockroach_db underlay IP fd00:1122:3344:102::4 (unchanged) + zone 9241b9da-8188-4aae-a9dc-62acb453618f type clickhouse underlay IP fd00:1122:3344:102::6 (unchanged) + zone a781a349-62b2-41f0-9dd7-c64d290aef82 type crucible underlay IP fd00:1122:3344:102::f (unchanged) + zone c67814bf-9972-4bb0-a666-24778ca70a16 type cockroach_db underlay IP fd00:1122:3344:102::3 (unchanged) + zone c74ba7d7-3d5f-4a29-aadc-effe33a90909 type boundary_ntp underlay IP fd00:1122:3344:102::12 (unchanged) + zone dfc27183-ed85-44c3-ac3c-4427c4cb03e3 type crucible underlay IP fd00:1122:3344:102::b (unchanged) + sled fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89 + zone config generation 5 + zone 185e6b00-7e18-456c-9dfc-edd0194ac207 type crucible underlay IP fd00:1122:3344:103::a (unchanged) + zone 1fc78bc7-feca-4a10-9beb-58b53fda5210 type crucible underlay IP fd00:1122:3344:103::10 (unchanged) + zone 45dccfd6-4674-43c6-a0e7-4dde5f24d90d type internal_dns underlay IP fd00:1122:3344:3::1 (unchanged) + zone 4b74c418-0a08-4489-b652-e97267c1c220 type crucible underlay IP fd00:1122:3344:103::7 (unchanged) + zone 54c947d2-6355-453c-80fc-8f49cc2129ee type nexus underlay IP fd00:1122:3344:103::5 (unchanged) + zone 60d64b70-89f5-4774-9e41-2c79c4173911 type crucible_pantry underlay IP fd00:1122:3344:103::6 (unchanged) + zone 921958c0-7761-4294-945d-2a078327bf2c type crucible underlay IP fd00:1122:3344:103::8 (unchanged) + zone 9df22034-59f6-431f-82e7-258fe983879b type external_dns underlay IP fd00:1122:3344:103::4 (unchanged) + zone bb21c0c4-c43f-4913-9bf9-707bd7f1769a type internal_ntp underlay IP fd00:1122:3344:103::11 (unchanged) + zone ccb0d612-9d7c-41ba-bd51-661c3c75ec91 type crucible underlay IP fd00:1122:3344:103::9 (unchanged) + zone ccef3e95-cae5-4072-9b7d-49e56fa9dff4 type crucible underlay IP fd00:1122:3344:103::d (unchanged) + zone d42fc874-8416-4212-b5a0-febba4c6b0ac type crucible underlay IP fd00:1122:3344:103::b (unchanged) + zone d5ba7d45-26d7-45ca-8c0a-a560f243b5fa type crucible underlay IP fd00:1122:3344:103::e (unchanged) + zone da27ee6a-9841-46aa-acda-7cce3e04cb60 type cockroach_db underlay IP fd00:1122:3344:103::3 (unchanged) + zone e09525cf-9c87-4d05-8860-1356dd0dee8a type crucible underlay IP fd00:1122:3344:103::f (unchanged) + zone f7264a3e-e8e7-4476-8df3-76c9d00a383b type crucible underlay IP fd00:1122:3344:103::c (unchanged) + + +> + +> blueprint-diff cfa03594-f438-4736-a416-6307f7bf8f2e 6c127695-ba15-408d-a992-325a1a888380 +diff blueprint cfa03594-f438-4736-a416-6307f7bf8f2e blueprint 6c127695-ba15-408d-a992-325a1a888380 +--- blueprint cfa03594-f438-4736-a416-6307f7bf8f2e ++++ blueprint 6c127695-ba15-408d-a992-325a1a888380 + sled 1762267a-a0cb-4c9a-88b4-8ce828cbc021 + zone config generation 5 + zone 0706860a-3801-42a7-8000-fff741f7665b type crucible underlay IP fd00:1122:3344:101::c (unchanged) + zone 34a279a2-a025-41ff-a392-1addac400f38 type cockroach_db underlay IP fd00:1122:3344:101::3 (unchanged) + zone 3d344935-6f48-405f-8794-2c6e8fa4b1c2 type internal_dns underlay IP fd00:1122:3344:1::1 (unchanged) + zone 59c3ed4f-3670-44b1-9ea0-9a6162a08629 type nexus underlay IP fd00:1122:3344:101::5 (unchanged) + zone 67852e14-ce7f-4e50-a61d-4059d0c057b9 type crucible underlay IP fd00:1122:3344:101::10 (unchanged) + zone 6ace1a69-d55a-45f0-a127-99c90f1b070c type crucible underlay IP fd00:1122:3344:101::f (unchanged) + zone 84d6b7fc-b5c2-499f-8d17-0eb1ea5affbe type crucible underlay IP fd00:1122:3344:101::9 (unchanged) + zone 854c1544-d0ea-4d65-9cf4-776e91a4fcb5 type crucible underlay IP fd00:1122:3344:101::8 (unchanged) + zone 86ee7359-aa35-41d9-8a76-54961bc516a1 type cockroach_db underlay IP fd00:1122:3344:101::4 (unchanged) + zone 925ea436-fad4-4fff-8ae0-24edd725a2b0 type crucible underlay IP fd00:1122:3344:101::d (unchanged) + zone be272d1e-4d34-4478-94b4-a76f5fcbef36 type crucible underlay IP fd00:1122:3344:101::a (unchanged) + zone bebd754d-d06c-45f4-99bf-e1aab6253ab8 type oximeter underlay IP fd00:1122:3344:101::6 (unchanged) + zone d630cca7-e3f7-47a8-aee8-ad0790895abc type boundary_ntp underlay IP fd00:1122:3344:101::11 (unchanged) + zone d8c51c79-46ff-44bd-80be-08d6283f4e21 type crucible underlay IP fd00:1122:3344:101::e (unchanged) + zone dbbc0422-2b23-4d9e-b1db-8003cfc29ad0 type crucible underlay IP fd00:1122:3344:101::b (unchanged) + zone f33a2375-212d-42e4-b539-6079aeb4e5b7 type crucible_pantry underlay IP fd00:1122:3344:101::7 (unchanged) + sled a243c1d0-9051-4b94-ab3e-f2a93fd0ae4f +- zone config generation 3 ++ zone config generation 2 +- zone 0eca39c6-62e2-4a1f-8ea2-e9332a774a26 type crucible (removed) +- zone 12431d3e-1516-4822-98b3-3711ff912e69 type crucible (removed) +- zone 56414d76-9b8c-4e15-acf1-cd269c2d8638 type crucible (removed) +- zone 891cb9da-301d-40cc-b93d-5fcb350fa968 type crucible (removed) +- zone d9014907-24c0-4c33-a475-db15942f4f8f type crucible (removed) +- zone e950de25-7306-4cf7-a6f4-6db11adaa310 type crucible (removed) +- zone fe3dc092-ea23-42d7-9f1d-36eed72b539c type crucible (removed) +- zone ff8cce49-a721-4ad1-b09e-2bd6e4e42d00 type crucible (removed) + zone 2d5c29f7-4915-4876-82f8-e388b15c5067 type internal_ntp underlay IP fd00:1122:3344:121::21 (unchanged) + sled a6634f64-f6fb-426c-b00b-9b30aed9f53a + zone config generation 5 + zone 0ebd2b5b-2dec-4d17-8e23-e468c04dacb6 type crucible underlay IP fd00:1122:3344:102::11 (unchanged) + zone 18b99404-d8b2-4463-bc99-638d9b678ab3 type crucible underlay IP fd00:1122:3344:102::e (unchanged) + zone 479811e7-6409-45d4-b294-5333534e47ed type crucible underlay IP fd00:1122:3344:102::9 (unchanged) + zone 54d1b693-bc4d-4c47-9009-c6f624073801 type crucible underlay IP fd00:1122:3344:102::a (unchanged) + zone 5b743072-9369-45d7-b7c9-dbdf736301f8 type internal_dns underlay IP fd00:1122:3344:2::1 (unchanged) + zone 5c4163fa-d6a6-4a0c-9476-3dda135bc9a4 type crucible_pantry underlay IP fd00:1122:3344:102::7 (unchanged) + zone 61a39188-dd82-43f6-a87c-ebb4c2d54ac4 type crucible underlay IP fd00:1122:3344:102::8 (unchanged) + zone 658076ed-fb43-49e7-a813-df3d23497848 type crucible underlay IP fd00:1122:3344:102::10 (unchanged) + zone 6edb836e-b100-4080-bb03-738d6441d571 type crucible underlay IP fd00:1122:3344:102::c (unchanged) + zone 7f83d784-9c5c-498f-b559-4176bbafbc4b type nexus underlay IP fd00:1122:3344:102::5 (unchanged) + zone 8052938f-73bc-4c54-8ed0-c62f6d77b9fd type crucible underlay IP fd00:1122:3344:102::d (unchanged) + zone 86bc0a2a-e24a-4bc1-8222-89207a0937f9 type cockroach_db underlay IP fd00:1122:3344:102::4 (unchanged) + zone 9241b9da-8188-4aae-a9dc-62acb453618f type clickhouse underlay IP fd00:1122:3344:102::6 (unchanged) + zone a781a349-62b2-41f0-9dd7-c64d290aef82 type crucible underlay IP fd00:1122:3344:102::f (unchanged) + zone c67814bf-9972-4bb0-a666-24778ca70a16 type cockroach_db underlay IP fd00:1122:3344:102::3 (unchanged) + zone c74ba7d7-3d5f-4a29-aadc-effe33a90909 type boundary_ntp underlay IP fd00:1122:3344:102::12 (unchanged) + zone dfc27183-ed85-44c3-ac3c-4427c4cb03e3 type crucible underlay IP fd00:1122:3344:102::b (unchanged) + sled fa3c33bf-bc6c-4d29-8dcc-c16b9ab1ec89 + zone config generation 5 + zone 185e6b00-7e18-456c-9dfc-edd0194ac207 type crucible underlay IP fd00:1122:3344:103::a (unchanged) + zone 1fc78bc7-feca-4a10-9beb-58b53fda5210 type crucible underlay IP fd00:1122:3344:103::10 (unchanged) + zone 45dccfd6-4674-43c6-a0e7-4dde5f24d90d type internal_dns underlay IP fd00:1122:3344:3::1 (unchanged) + zone 4b74c418-0a08-4489-b652-e97267c1c220 type crucible underlay IP fd00:1122:3344:103::7 (unchanged) + zone 54c947d2-6355-453c-80fc-8f49cc2129ee type nexus underlay IP fd00:1122:3344:103::5 (unchanged) + zone 60d64b70-89f5-4774-9e41-2c79c4173911 type crucible_pantry underlay IP fd00:1122:3344:103::6 (unchanged) + zone 921958c0-7761-4294-945d-2a078327bf2c type crucible underlay IP fd00:1122:3344:103::8 (unchanged) + zone 9df22034-59f6-431f-82e7-258fe983879b type external_dns underlay IP fd00:1122:3344:103::4 (unchanged) + zone bb21c0c4-c43f-4913-9bf9-707bd7f1769a type internal_ntp underlay IP fd00:1122:3344:103::11 (unchanged) + zone ccb0d612-9d7c-41ba-bd51-661c3c75ec91 type crucible underlay IP fd00:1122:3344:103::9 (unchanged) + zone ccef3e95-cae5-4072-9b7d-49e56fa9dff4 type crucible underlay IP fd00:1122:3344:103::d (unchanged) + zone d42fc874-8416-4212-b5a0-febba4c6b0ac type crucible underlay IP fd00:1122:3344:103::b (unchanged) + zone d5ba7d45-26d7-45ca-8c0a-a560f243b5fa type crucible underlay IP fd00:1122:3344:103::e (unchanged) + zone da27ee6a-9841-46aa-acda-7cce3e04cb60 type cockroach_db underlay IP fd00:1122:3344:103::3 (unchanged) + zone e09525cf-9c87-4d05-8860-1356dd0dee8a type crucible underlay IP fd00:1122:3344:103::f (unchanged) + zone f7264a3e-e8e7-4476-8df3-76c9d00a383b type crucible underlay IP fd00:1122:3344:103::c (unchanged) + + +> + +> sled-add dde1c0e2-b10d-4621-b420-f179f7a7a00a +added sled + +> + +> blueprint-plan cfa03594-f438-4736-a416-6307f7bf8f2e 83ed3949-d221-44f3-a901-02e9ff16d2a5 +generated blueprint REDACTED_UUID based on parent blueprint cfa03594-f438-4736-a416-6307f7bf8f2e + diff --git a/dev-tools/reconfigurator-cli/tests/output/cmd-stderr b/dev-tools/reconfigurator-cli/tests/output/cmd-stderr new file mode 100644 index 0000000000..e69de29bb2 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmd-stdout b/dev-tools/reconfigurator-cli/tests/output/cmd-stdout new file mode 100644 index 0000000000..10b158f218 --- /dev/null +++ b/dev-tools/reconfigurator-cli/tests/output/cmd-stdout @@ -0,0 +1,58 @@ +> sled-list +ID NZPOOLS SUBNET + +> inventory-list +ID NERRORS TIME_DONE + +> blueprint-list +ID + +> + +> sled-show REDACTED_UUID_REDACTED_UUID_REDACTED +error: no sled with id REDACTED_UUID_REDACTED_UUID_REDACTED + +> sled-add REDACTED_UUID_REDACTED_UUID_REDACTED +added sled + +> sled-list +ID NZPOOLS SUBNET +REDACTED_UUID_REDACTED_UUID_REDACTED 10 fd00:1122:3344:101::/64 + +> sled-show REDACTED_UUID_REDACTED_UUID_REDACTED +sled REDACTED_UUID_REDACTED_UUID_REDACTED +subnet fd00:1122:3344:101::/64 +zpools (10): + ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") + ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") + ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") + ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") + ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") + ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") + ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") + ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") + ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") + ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") + + +> sled-add REDACTED_UUID_REDACTED_UUID_REDACTED +added sled + +> sled-add REDACTED_UUID_REDACTED_UUID_REDACTED +added sled + +> sled-list +ID NZPOOLS SUBNET +REDACTED_UUID_REDACTED_UUID_REDACTED 10 fd00:1122:3344:103::/64 +REDACTED_UUID_REDACTED_UUID_REDACTED 10 fd00:1122:3344:102::/64 +REDACTED_UUID_REDACTED_UUID_REDACTED 10 fd00:1122:3344:101::/64 + +> + +> inventory-generate +generated inventory collection REDACTED_UUID_REDACTED_UUID_REDACTED from configured sleds + +> inventory-list +ID NERRORS TIME_DONE +REDACTED_UUID_REDACTED_UUID_REDACTED 0 + diff --git a/dev-tools/reconfigurator-cli/tests/test_basic.rs b/dev-tools/reconfigurator-cli/tests/test_basic.rs new file mode 100644 index 0000000000..f70fcb1366 --- /dev/null +++ b/dev-tools/reconfigurator-cli/tests/test_basic.rs @@ -0,0 +1,115 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use anyhow::Context; +use camino::Utf8Path; +use expectorate::assert_contents; +use nexus_types::deployment::UnstableReconfiguratorState; +use omicron_test_utils::dev::test_cmds::assert_exit_code; +use omicron_test_utils::dev::test_cmds::path_to_executable; +use omicron_test_utils::dev::test_cmds::redact_variable; +use omicron_test_utils::dev::test_cmds::run_command; +use omicron_test_utils::dev::test_cmds::EXIT_SUCCESS; +use std::path::PathBuf; +use subprocess::Exec; + +fn path_to_cli() -> PathBuf { + path_to_executable(env!("CARGO_BIN_EXE_reconfigurator-cli")) +} + +// Run a battery of simple commands and make sure things basically seem to work. +#[test] +fn test_basic() { + let exec = Exec::cmd(path_to_cli()).arg("tests/input/cmds.txt"); + let (exit_status, stdout_text, stderr_text) = run_command(exec); + assert_exit_code(exit_status, EXIT_SUCCESS, &stderr_text); + let stdout_text = redact_variable(&stdout_text, &[]); + assert_contents("tests/output/cmd-stdout", &stdout_text); + assert_contents("tests/output/cmd-stderr", &stderr_text); +} + +// To do more interesting testing, we need more interesting data than we can +// currently cons up in the REPL environment. This test uses a file saved from +// the "madrid" rig using `omdb db reconfigurator-save`. +// +// This test may be broken if the format of this file changes (see the +// UnstableReconfiguratorState struct). To fix it, generate a new file from a +// real system with at least one inventory collection and at least two +// blueprints. You will also need to set EXPECTORATE=overwrite when running +// this for the first time with the new input file in order to generate the new +// output file. Check it. +// +// If this test becomes too burdensome, we could drop it. But for now it's +// useful to have a smoke test for the command. +#[test] +fn test_complex() { + let input_path = Utf8Path::new("tests/input/complex.json"); + let input_file = std::fs::File::open(input_path) + .with_context(|| format!("open {:?}", input_path)) + .unwrap(); + let input_data: UnstableReconfiguratorState = + serde_json::from_reader(&input_file) + .with_context(|| format!("read {:?}", input_path)) + .unwrap(); + assert!( + input_data.collections.len() > 0, + "input file must have at least one inventory collection" + ); + assert!( + input_data.blueprints.len() > 1, + "input file must have at least two blueprints" + ); + + let collection = input_data.collections.iter().next().unwrap().id; + let mut blueprints = input_data.blueprints.iter().rev(); + let blueprint2 = blueprints.next().unwrap().id; + let blueprint1 = blueprints.next().unwrap().id; + let tmpdir = camino_tempfile::tempdir().expect("failed to create tmpdir"); + let tmpfile = tmpdir.path().join("cmds.txt"); + + // We construct the list of commands dynamically to avoid having to re-do it + // when the input file has to be regenerated. + let input_cmds = [ + "sled-list", + "inventory-list", + "blueprint-list", + &format!("file-contents {}", input_path), + &format!("load {} {}", input_path, collection), + "sled-list", + "inventory-list", + "blueprint-list", + &format!("blueprint-show {}", blueprint2), + &format!("blueprint-diff-inventory {} {}", collection, blueprint2), + &format!("blueprint-diff {} {}", blueprint1, blueprint2), + &format!("blueprint-diff {} {}", blueprint2, blueprint1), + "sled-add dde1c0e2-b10d-4621-b420-f179f7a7a00a", + &format!("blueprint-plan {} {}", blueprint2, collection), + ] + .into_iter() + .map(|s| format!("{}\n", s)) + .collect::>() + .join("\n"); + + println!("will execute commands:\n{}", input_cmds); + + std::fs::write(&tmpfile, &input_cmds) + .with_context(|| format!("write {:?}", &tmpfile)) + .unwrap(); + + let exec = Exec::cmd(path_to_cli()).arg(&tmpfile); + let (exit_status, stdout_text, stderr_text) = run_command(exec); + assert_exit_code(exit_status, EXIT_SUCCESS, &stderr_text); + + // This is a much lighter form of redaction than `redact_variable()` does. + let stdout_text = + regex::Regex::new(r"generated blueprint .* based on parent blueprint") + .unwrap() + .replace_all( + &stdout_text, + "generated blueprint REDACTED_UUID based on parent blueprint", + ) + .to_string(); + + assert_contents("tests/output/cmd-complex-stdout", &stdout_text); +} diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index de79f3429d..45d1c34382 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -84,6 +84,7 @@ nexus-db-queries.workspace = true nexus-inventory.workspace = true nexus-reconfigurator-execution.workspace = true nexus-reconfigurator-planning.workspace = true +nexus-reconfigurator-preparation.workspace = true nexus-types.workspace = true omicron-common.workspace = true omicron-passwords.workspace = true diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index a00318c9dc..1dae1030f0 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -7,6 +7,7 @@ use crate::authz; use crate::authz::ApiResource; use crate::context::OpContext; use crate::db; +use crate::db::datastore::SQL_BATCH_SIZE; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::pagination::paginated; @@ -49,18 +50,8 @@ use omicron_common::api::external::ResourceType; use omicron_common::bail_unless; use std::collections::BTreeMap; use std::collections::BTreeSet; -use std::num::NonZeroU32; use uuid::Uuid; -/// "limit" used in SQL queries that paginate through all sleds, omicron -/// zones, etc. -/// -/// While we always load an entire blueprint in one operation, we use a -/// [`Paginator`] to guard against single queries returning an unchecked number -/// of rows. -// unsafe: `new_unchecked` is only unsound if the argument is 0. -const SQL_BATCH_SIZE: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1000) }; - impl DataStore { /// List blueprints pub async fn blueprints_list( diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 4a37efd612..a9062e6ee3 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -5,6 +5,7 @@ //! [`DataStore`] methods on [`IpPool`]s. use super::DataStore; +use super::SQL_BATCH_SIZE; use crate::authz; use crate::context::OpContext; use crate::db; @@ -25,6 +26,7 @@ use crate::db::model::IpPoolResourceType; use crate::db::model::IpPoolUpdate; use crate::db::model::Name; use crate::db::pagination::paginated; +use crate::db::pagination::Paginator; use crate::db::pool::DbConnection; use crate::db::queries::ip_pool::FilterOverlappingIpRanges; use crate::db::TransactionError; @@ -669,6 +671,33 @@ impl DataStore { }) } + /// List all IP pool ranges for a given pool, making as many queries as + /// needed to get them all + /// + /// This should generally not be used in API handlers or other + /// latency-sensitive contexts, but it can make sense in saga actions or + /// background tasks. + pub async fn ip_pool_list_ranges_batched( + &self, + opctx: &OpContext, + authz_pool: &authz::IpPool, + ) -> ListResultVec { + opctx.check_complex_operations_allowed()?; + let mut ip_ranges = Vec::new(); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + let batch = self + .ip_pool_list_ranges(opctx, &authz_pool, &p.current_pagparams()) + .await?; + // The use of `last_address` here assumes `paginator` is sorting + // in Ascending order (which it does - see the implementation of + // `current_pagparams()`). + paginator = p.found_batch(&batch, &|r| r.last_address); + ip_ranges.extend(batch); + } + Ok(ip_ranges) + } + pub async fn ip_pool_add_range( &self, opctx: &OpContext, diff --git a/nexus/db-queries/src/db/datastore/zpool.rs b/nexus/db-queries/src/db/datastore/zpool.rs index 79e5f5a55a..5424c126eb 100644 --- a/nexus/db-queries/src/db/datastore/zpool.rs +++ b/nexus/db-queries/src/db/datastore/zpool.rs @@ -5,6 +5,7 @@ //! [`DataStore`] methods on [`Zpool`]s. use super::DataStore; +use super::SQL_BATCH_SIZE; use crate::authz; use crate::db; use crate::db::collection_insert::AsyncInsertError; @@ -16,6 +17,7 @@ use crate::db::identity::Asset; use crate::db::model::Sled; use crate::db::model::Zpool; use crate::db::pagination::paginated; +use crate::db::pagination::Paginator; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; @@ -66,8 +68,8 @@ impl DataStore { }) } - /// Paginates through all zpools on U.2 disks in all sleds - pub async fn zpool_list_all_external( + /// Fetches a page of the list of all zpools on U.2 disks in all sleds + async fn zpool_list_all_external( &self, opctx: &OpContext, pagparams: &DataPageParams<'_, Uuid>, @@ -90,4 +92,30 @@ impl DataStore { .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } + + /// List all zpools on U.2 disks in all sleds, making as many queries as + /// needed to get them all + /// + /// This should generally not be used in API handlers or other + /// latency-sensitive contexts, but it can make sense in saga actions or + /// background tasks. + pub async fn zpool_list_all_external_batched( + &self, + opctx: &OpContext, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + opctx.check_complex_operations_allowed()?; + let mut zpools = Vec::new(); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + let batch = self + .zpool_list_all_external(opctx, &p.current_pagparams()) + .await?; + paginator = + p.found_batch(&batch, &|z: &nexus_db_model::Zpool| z.id()); + zpools.extend(batch); + } + + Ok(zpools) + } } diff --git a/nexus/inventory/src/builder.rs b/nexus/inventory/src/builder.rs index 08a905143c..602890a440 100644 --- a/nexus/inventory/src/builder.rs +++ b/nexus/inventory/src/builder.rs @@ -93,11 +93,14 @@ impl CollectionBuilder { /// `collector` is an arbitrary string describing the agent that collected /// this data. It's generally a Nexus instance uuid but it can be anything. /// It's just for debugging. - pub fn new(collector: &str) -> Self { + pub fn new(collector: S) -> Self + where + String: From, + { CollectionBuilder { errors: vec![], time_started: now_db_precision(), - collector: collector.to_owned(), + collector: String::from(collector), baseboards: BTreeSet::new(), cabooses: BTreeSet::new(), rot_pages: BTreeSet::new(), diff --git a/nexus/reconfigurator/planning/Cargo.toml b/nexus/reconfigurator/planning/Cargo.toml index 3ec616168c..41ea17d56e 100644 --- a/nexus/reconfigurator/planning/Cargo.toml +++ b/nexus/reconfigurator/planning/Cargo.toml @@ -6,6 +6,8 @@ edition = "2021" [dependencies] anyhow.workspace = true chrono.workspace = true +gateway-client.workspace = true +indexmap.workspace = true internal-dns.workspace = true ipnet.workspace = true ipnetwork.workspace = true @@ -13,6 +15,7 @@ nexus-config.workspace = true nexus-inventory.workspace = true nexus-types.workspace = true omicron-common.workspace = true +sled-agent-client.workspace = true slog.workspace = true thiserror.workspace = true uuid.workspace = true diff --git a/nexus/reconfigurator/planning/src/blueprint_builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder.rs index ebb64c36f9..d541e112d5 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder.rs @@ -462,21 +462,6 @@ impl<'a> BlueprintBuilder<'a> { sled_id: Uuid, desired_zone_count: usize, ) -> Result { - // How many Nexus zones do we need to add? - let nexus_count = self.sled_num_nexus_zones(sled_id); - let num_nexus_to_add = match desired_zone_count.checked_sub(nexus_count) - { - Some(0) => return Ok(EnsureMultiple::NotNeeded), - Some(n) => n, - None => { - return Err(Error::Planner(anyhow!( - "removing a Nexus zone not yet supported \ - (sled {sled_id} has {nexus_count}; \ - planner wants {desired_zone_count})" - ))); - } - }; - // Whether Nexus should use TLS and what the external DNS servers it // should use are currently provided at rack-setup time, and should be // consistent across all Nexus instances. We'll assume we can copy them @@ -503,6 +488,35 @@ impl<'a> BlueprintBuilder<'a> { }) }) .ok_or(Error::NoNexusZonesInParentBlueprint)?; + self.sled_ensure_zone_multiple_nexus_with_config( + sled_id, + desired_zone_count, + external_tls, + external_dns_servers, + ) + } + + pub fn sled_ensure_zone_multiple_nexus_with_config( + &mut self, + sled_id: Uuid, + desired_zone_count: usize, + external_tls: bool, + external_dns_servers: Vec, + ) -> Result { + // How many Nexus zones do we need to add? + let nexus_count = self.sled_num_nexus_zones(sled_id); + let num_nexus_to_add = match desired_zone_count.checked_sub(nexus_count) + { + Some(0) => return Ok(EnsureMultiple::NotNeeded), + Some(n) => n, + None => { + return Err(Error::Planner(anyhow!( + "removing a Nexus zone not yet supported \ + (sled {sled_id} has {nexus_count}; \ + planner wants {desired_zone_count})" + ))); + } + }; for _ in 0..num_nexus_to_add { let nexus_id = Uuid::new_v4(); @@ -736,203 +750,124 @@ impl<'a> BlueprintZones<'a> { #[cfg(test)] pub mod test { use super::*; - use nexus_types::external_api::views::SledPolicy; - use nexus_types::external_api::views::SledState; + use crate::system::SledBuilder; + use crate::system::SystemDescription; use omicron_common::address::IpRange; - use omicron_common::address::Ipv4Range; - use omicron_common::address::Ipv6Subnet; - use omicron_common::address::SLED_PREFIX; - use omicron_common::api::external::ByteCount; use omicron_test_utils::dev::test_setup_log; use sled_agent_client::types::{ - Baseboard, Inventory, OmicronZoneConfig, OmicronZoneDataset, - OmicronZoneType, OmicronZonesConfig, SledRole, + OmicronZoneConfig, OmicronZoneType, OmicronZonesConfig, }; - use std::str::FromStr; pub const DEFAULT_N_SLEDS: usize = 3; - /// Returns a collection and policy describing a pretty simple system. - /// - /// `n_sleds` is the number of sleds supported. Currently, this value can - /// be anywhere between 0 and 5. (More can be added in the future if - /// necessary.) - pub fn example(n_sleds: usize) -> (Collection, Policy) { - let mut builder = nexus_inventory::CollectionBuilder::new("test-suite"); - - if n_sleds > 5 { - panic!("example() only supports up to 5 sleds, but got {n_sleds}"); - } + pub struct ExampleSystem { + pub system: SystemDescription, + pub policy: Policy, + pub collection: Collection, + pub blueprint: Blueprint, + } - let sled_ids = [ - "72443b6c-b8bb-4ffa-ab3a-aeaa428ed79b", - "a5f3db3a-61aa-4f90-ad3e-02833c253bf5", - "0d168386-2551-44e8-98dd-ae7a7570f8a0", - "aaaaa1a1-0c3f-4928-aba7-6ec5c1db05f7", - "85e88acb-7b86-45ff-9c88-734e1da71c3d", - ]; - let mut policy = Policy { - sleds: BTreeMap::new(), - // IPs from TEST-NET-1 (RFC 5737) - service_ip_pool_ranges: vec![Ipv4Range::new( - "192.0.2.2".parse().unwrap(), - "192.0.2.20".parse().unwrap(), - ) - .unwrap() - .into()], - target_nexus_zone_count: 3, - }; - let mut service_ip_pool_range = policy.service_ip_pool_ranges[0].iter(); - let mut nexus_nic_ips = NEXUS_OPTE_IPV4_SUBNET - .iter() - .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES); - let mut nexus_nic_macs = { - let mut used = HashSet::new(); - std::iter::from_fn(move || { - let mut mac = MacAddr::random_system(); - while !used.insert(mac) { - mac = MacAddr::random_system(); - } - Some(mac) - }) - }; + impl ExampleSystem { + pub fn new(log: &slog::Logger, nsleds: usize) -> ExampleSystem { + let mut system = SystemDescription::new(); + let sled_ids: Vec<_> = + (0..nsleds).map(|_| Uuid::new_v4()).collect(); + for sled_id in &sled_ids { + let _ = system.sled(SledBuilder::new().id(*sled_id)).unwrap(); + } - for sled_id_str in sled_ids.iter().take(n_sleds) { - let sled_id: Uuid = sled_id_str.parse().unwrap(); - let sled_ip = policy_add_sled(&mut policy, sled_id); - let serial_number = format!("s{}", policy.sleds.len()); - builder - .found_sled_inventory( - "test-suite", - Inventory { - baseboard: Baseboard::Gimlet { - identifier: serial_number, - model: String::from("model1"), - revision: 0, + let policy = system.to_policy().expect("failed to make policy"); + let mut inventory_builder = system + .to_collection_builder() + .expect("failed to build collection"); + + // For each sled, have it report 0 zones in the initial inventory. + // This will enable us to build a blueprint from the initial + // inventory, which we can then use to build new blueprints. + for sled_id in &sled_ids { + inventory_builder + .found_sled_omicron_zones( + "fake sled agent", + *sled_id, + OmicronZonesConfig { + generation: Generation::new(), + zones: vec![], }, - reservoir_size: ByteCount::from(1024), - sled_role: SledRole::Gimlet, - sled_agent_address: SocketAddrV6::new( - sled_ip, 12345, 0, 0, - ) - .to_string(), - sled_id, - usable_hardware_threads: 10, - usable_physical_ram: ByteCount::from(1024 * 1024), - }, + ) + .expect("recording Omicron zones"); + } + + let empty_zone_inventory = inventory_builder.build(); + let initial_blueprint = + BlueprintBuilder::build_initial_from_collection( + &empty_zone_inventory, + Generation::new(), + &policy, + "test suite", ) .unwrap(); - let zpools = &policy.sleds.get(&sled_id).unwrap().zpools; - let mut sled_ips = - std::iter::successors(Some(sled_ip.saturating_add(1)), |ip| { - println!("sled_ips iterator: currently {ip:?}"); - Some(ip.saturating_add(1)) - }); - let zones: Vec<_> = std::iter::once({ - let ip = sled_ips.next().unwrap(); - OmicronZoneConfig { - id: Uuid::new_v4(), - underlay_address: ip, - zone_type: OmicronZoneType::InternalNtp { - address: SocketAddrV6::new(ip, 12345, 0, 0).to_string(), - dns_servers: vec![], - domain: None, - ntp_servers: vec![], - }, - } - }) - .chain(std::iter::once({ - let id = Uuid::new_v4(); - let ip = sled_ips.next().unwrap(); - let external_ip = - service_ip_pool_range.next().expect("no service IPs left"); - let nic_ip = - nexus_nic_ips.next().expect("no nexus nic IPs left"); - OmicronZoneConfig { - id, - underlay_address: ip, - zone_type: OmicronZoneType::Nexus { - internal_address: SocketAddrV6::new(ip, 12346, 0, 0) - .to_string(), - external_ip, - nic: NetworkInterface { - id: Uuid::new_v4(), - kind: NetworkInterfaceKind::Service(id), - name: format!("nexus-{id}").parse().unwrap(), - ip: nic_ip.into(), - mac: nexus_nic_macs - .next() - .expect("no nexus nic MACs left"), - subnet: IpNet::from(*NEXUS_OPTE_IPV4_SUBNET).into(), - vni: Vni::SERVICES_VNI, - primary: true, - slot: 0, - }, - external_tls: false, - external_dns_servers: Vec::new(), - }, - } - })) - .chain(zpools.iter().map(|zpool_name| { - let ip = sled_ips.next().unwrap(); - OmicronZoneConfig { - id: Uuid::new_v4(), - underlay_address: ip, - zone_type: OmicronZoneType::Crucible { - address: String::from("[::1]:12345"), - dataset: OmicronZoneDataset { - pool_name: zpool_name.clone(), - }, - }, + // Now make a blueprint and collection with some zones on each sled. + let mut builder = BlueprintBuilder::new_based_on( + &log, + &initial_blueprint, + Generation::new(), + &policy, + "test suite", + ) + .unwrap(); + for (sled_id, sled_resources) in &policy.sleds { + let _ = builder.sled_ensure_zone_ntp(*sled_id).unwrap(); + let _ = builder + .sled_ensure_zone_multiple_nexus_with_config( + *sled_id, + 1, + false, + vec![], + ) + .unwrap(); + for pool_name in &sled_resources.zpools { + let _ = builder + .sled_ensure_zone_crucible(*sled_id, pool_name.clone()) + .unwrap(); } - })) - .collect(); + } - builder - .found_sled_omicron_zones( - "test-suite", - sled_id, - OmicronZonesConfig { - generation: Generation::new().next(), - zones, - }, - ) - .unwrap(); - } + let blueprint = builder.build(); + let mut builder = system + .to_collection_builder() + .expect("failed to build collection"); - let collection = builder.build(); + for sled_id in blueprint.sleds() { + let Some(zones) = blueprint.omicron_zones.get(&sled_id) else { + continue; + }; + builder + .found_sled_omicron_zones( + "fake sled agent", + sled_id, + zones.clone(), + ) + .unwrap(); + } - (collection, policy) + ExampleSystem { + system, + policy, + collection: builder.build(), + blueprint, + } + } } - pub fn policy_add_sled(policy: &mut Policy, sled_id: Uuid) -> Ipv6Addr { - let i = policy.sleds.len() + 1; - let sled_ip: Ipv6Addr = - format!("fd00:1122:3344:{}::1", i + 1).parse().unwrap(); - - let zpools: BTreeSet = [ - "oxp_be776cf5-4cba-4b7d-8109-3dfd020f22ee", - "oxp_aee23a17-b2ce-43f2-9302-c738d92cca28", - "oxp_f7940a6b-c865-41cf-ad61-1b831d594286", - ] - .iter() - .map(|name_str| { - ZpoolName::from_str(name_str).expect("not a valid zpool name") - }) - .collect(); - - let subnet = Ipv6Subnet::::new(sled_ip); - policy.sleds.insert( - sled_id, - SledResources { - policy: SledPolicy::provisionable(), - state: SledState::Active, - zpools, - subnet, - }, - ); - sled_ip + /// Returns a collection and policy describing a pretty simple system. + /// + /// `n_sleds` is the number of sleds supported. Currently, this value can + /// be anywhere between 0 and 5. (More can be added in the future if + /// necessary.) + pub fn example(log: &slog::Logger, nsleds: usize) -> (Collection, Policy) { + let example = ExampleSystem::new(log, nsleds); + (example.collection, example.policy) } /// Checks various conditions that should be true for all blueprints @@ -959,7 +894,7 @@ pub mod test { // Test creating a blueprint from a collection and verifying that it // describes no changes. let logctx = test_setup_log("blueprint_builder_test_initial"); - let (collection, policy) = example(DEFAULT_N_SLEDS); + let (collection, policy) = example(&logctx.log, DEFAULT_N_SLEDS); let blueprint_initial = BlueprintBuilder::build_initial_from_collection( &collection, @@ -1009,29 +944,23 @@ pub mod test { #[test] fn test_basic() { let logctx = test_setup_log("blueprint_builder_test_basic"); - let (collection, mut policy) = example(DEFAULT_N_SLEDS); - let blueprint1 = BlueprintBuilder::build_initial_from_collection( - &collection, - Generation::new(), - &policy, - "the_test", - ) - .expect("failed to create initial blueprint"); - verify_blueprint(&blueprint1); + let mut example = ExampleSystem::new(&logctx.log, DEFAULT_N_SLEDS); + let blueprint1 = &example.blueprint; + verify_blueprint(blueprint1); let mut builder = BlueprintBuilder::new_based_on( &logctx.log, - &blueprint1, + blueprint1, Generation::new(), - &policy, + &example.policy, "test_basic", ) .expect("failed to create builder"); - // The initial blueprint should have internal NTP zones on all the + // The example blueprint should have internal NTP zones on all the // existing sleds, plus Crucible zones on all pools. So if we ensure // all these zones exist, we should see no change. - for (sled_id, sled_resources) in &policy.sleds { + for (sled_id, sled_resources) in &example.policy.sleds { builder.sled_ensure_zone_ntp(*sled_id).unwrap(); for pool_name in &sled_resources.zpools { builder @@ -1053,7 +982,9 @@ pub mod test { // The next step is adding these zones to a new sled. let new_sled_id = Uuid::new_v4(); - let _ = policy_add_sled(&mut policy, new_sled_id); + let _ = + example.system.sled(SledBuilder::new().id(new_sled_id)).unwrap(); + let policy = example.system.to_policy().unwrap(); let mut builder = BlueprintBuilder::new_based_on( &logctx.log, &blueprint2, @@ -1135,7 +1066,7 @@ pub mod test { let logctx = test_setup_log( "blueprint_builder_test_add_nexus_with_no_existing_nexus_zones", ); - let (mut collection, policy) = example(DEFAULT_N_SLEDS); + let (mut collection, policy) = example(&logctx.log, DEFAULT_N_SLEDS); // We don't care about the internal DNS version here. let internal_dns_version = Generation::new(); @@ -1191,7 +1122,7 @@ pub mod test { fn test_add_nexus_error_cases() { let logctx = test_setup_log("blueprint_builder_test_add_nexus_error_cases"); - let (mut collection, policy) = example(DEFAULT_N_SLEDS); + let (mut collection, policy) = example(&logctx.log, DEFAULT_N_SLEDS); // We don't care about the internal DNS version here. let internal_dns_version = Generation::new(); @@ -1312,7 +1243,7 @@ pub mod test { "blueprint_builder_test_invalid_parent_blueprint_\ two_zones_with_same_external_ip", ); - let (mut collection, policy) = example(DEFAULT_N_SLEDS); + let (mut collection, policy) = example(&logctx.log, DEFAULT_N_SLEDS); // We should fail if the parent blueprint claims to contain two // zones with the same external IP. Skim through the zones, copy the @@ -1370,7 +1301,7 @@ pub mod test { "blueprint_builder_test_invalid_parent_blueprint_\ two_nexus_zones_with_same_nic_ip", ); - let (mut collection, policy) = example(DEFAULT_N_SLEDS); + let (mut collection, policy) = example(&logctx.log, DEFAULT_N_SLEDS); // We should fail if the parent blueprint claims to contain two // Nexus zones with the same NIC IP. Skim through the zones, copy @@ -1426,7 +1357,7 @@ pub mod test { "blueprint_builder_test_invalid_parent_blueprint_\ two_zones_with_same_vnic_mac", ); - let (mut collection, policy) = example(DEFAULT_N_SLEDS); + let (mut collection, policy) = example(&logctx.log, DEFAULT_N_SLEDS); // We should fail if the parent blueprint claims to contain two // zones with the same service vNIC MAC address. Skim through the diff --git a/nexus/reconfigurator/planning/src/lib.rs b/nexus/reconfigurator/planning/src/lib.rs index 546f2c1dc1..e0a61826f0 100644 --- a/nexus/reconfigurator/planning/src/lib.rs +++ b/nexus/reconfigurator/planning/src/lib.rs @@ -118,3 +118,4 @@ pub mod blueprint_builder; mod ip_allocator; pub mod planner; +pub mod system; diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 9f4b653507..bae3dfd68a 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -319,10 +319,11 @@ impl<'a> Planner<'a> { mod test { use super::Planner; use crate::blueprint_builder::test::example; - use crate::blueprint_builder::test::policy_add_sled; use crate::blueprint_builder::test::verify_blueprint; + use crate::blueprint_builder::test::ExampleSystem; use crate::blueprint_builder::test::DEFAULT_N_SLEDS; use crate::blueprint_builder::BlueprintBuilder; + use crate::system::SledBuilder; use nexus_inventory::now_db_precision; use nexus_types::external_api::views::SledPolicy; use nexus_types::external_api::views::SledProvisionPolicy; @@ -341,14 +342,14 @@ mod test { let internal_dns_version = Generation::new(); // Use our example inventory collection. - let (mut collection, mut policy) = example(DEFAULT_N_SLEDS); + let mut example = ExampleSystem::new(&logctx.log, DEFAULT_N_SLEDS); // Build the initial blueprint. We don't bother verifying it here // because there's a separate test for that. let blueprint1 = BlueprintBuilder::build_initial_from_collection( - &collection, + &example.collection, internal_dns_version, - &policy, + &example.policy, "the_test", ) .expect("failed to create initial blueprint"); @@ -361,9 +362,9 @@ mod test { logctx.log.clone(), &blueprint1, internal_dns_version, - &policy, + &example.policy, "no-op?", - &collection, + &example.collection, ) .expect("failed to create planner") .plan() @@ -379,7 +380,9 @@ mod test { // Now add a new sled. let new_sled_id = "7097f5b3-5896-4fff-bd97-63a9a69563a9".parse().unwrap(); - let _ = policy_add_sled(&mut policy, new_sled_id); + let _ = + example.system.sled(SledBuilder::new().id(new_sled_id)).unwrap(); + let policy = example.system.to_policy().unwrap(); // Check that the first step is to add an NTP zone let blueprint3 = Planner::new_based_on( @@ -388,7 +391,7 @@ mod test { internal_dns_version, &policy, "test: add NTP?", - &collection, + &example.collection, ) .expect("failed to create planner") .plan() @@ -421,7 +424,7 @@ mod test { internal_dns_version, &policy, "test: add nothing more", - &collection, + &example.collection, ) .expect("failed to create planner") .plan() @@ -434,6 +437,7 @@ mod test { verify_blueprint(&blueprint4); // Now update the inventory to have the requested NTP zone. + let mut collection = example.collection.clone(); assert!(collection .omicron_zones .insert( @@ -479,7 +483,7 @@ mod test { assert_eq!(sled_changes.zones_removed().count(), 0); assert_eq!(sled_changes.zones_changed().count(), 0); let zones = sled_changes.zones_added().collect::>(); - assert_eq!(zones.len(), 3); + assert_eq!(zones.len(), 10); for zone in &zones { let OmicronZoneType::Crucible { .. } = zone.zone_type else { panic!("unexpectedly added a non-Crucible zone: {zone:?}"); @@ -522,7 +526,8 @@ mod test { // Use our example inventory collection as a starting point, but strip // it down to just one sled. let (sled_id, collection, mut policy) = { - let (mut collection, mut policy) = example(DEFAULT_N_SLEDS); + let (mut collection, mut policy) = + example(&logctx.log, DEFAULT_N_SLEDS); // Pick one sled ID to keep and remove the rest. let keep_sled_id = @@ -606,7 +611,7 @@ mod test { ); // Use our example inventory collection as a starting point. - let (collection, mut policy) = example(DEFAULT_N_SLEDS); + let (collection, mut policy) = example(&logctx.log, DEFAULT_N_SLEDS); // Build the initial blueprint. let blueprint1 = BlueprintBuilder::build_initial_from_collection( @@ -692,7 +697,7 @@ mod test { // and decommissioned sleds. (When we add more kinds of // non-provisionable states in the future, we'll have to add more // sleds.) - let (collection, mut policy) = example(5); + let (collection, mut policy) = example(&logctx.log, 5); // Build the initial blueprint. let blueprint1 = BlueprintBuilder::build_initial_from_collection( diff --git a/nexus/reconfigurator/planning/src/system.rs b/nexus/reconfigurator/planning/src/system.rs new file mode 100644 index 0000000000..5aff89df0d --- /dev/null +++ b/nexus/reconfigurator/planning/src/system.rs @@ -0,0 +1,578 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Builders for constructing descriptions of systems (real or synthetic) and +//! associated inventory collections and blueprints + +use anyhow::{anyhow, bail, ensure, Context}; +use gateway_client::types::RotState; +use gateway_client::types::SpState; +use indexmap::IndexMap; +use nexus_inventory::CollectionBuilder; +use nexus_types::deployment::Policy; +use nexus_types::deployment::SledResources; +use nexus_types::external_api::views::SledPolicy; +use nexus_types::external_api::views::SledProvisionPolicy; +use nexus_types::external_api::views::SledState; +use nexus_types::inventory::BaseboardId; +use nexus_types::inventory::PowerState; +use nexus_types::inventory::RotSlot; +use nexus_types::inventory::SledRole; +use nexus_types::inventory::SpType; +use nexus_types::inventory::ZpoolName; +use omicron_common::address::get_sled_address; +use omicron_common::address::IpRange; +use omicron_common::address::Ipv6Subnet; +use omicron_common::address::NEXUS_REDUNDANCY; +use omicron_common::address::RACK_PREFIX; +use omicron_common::address::SLED_PREFIX; +use omicron_common::api::external::ByteCount; +use std::collections::BTreeSet; +use std::fmt::Debug; +use std::net::Ipv4Addr; +use std::net::Ipv6Addr; +use uuid::Uuid; + +trait SubnetIterator: Iterator> + Debug {} +impl SubnetIterator for T where + T: Iterator> + Debug +{ +} + +/// Describes an actual or synthetic Oxide rack for planning and testing +/// +/// From this description, you can extract a `Policy` or inventory `Collection`. +/// There are a few intended purposes here: +/// +/// 1. to easily construct fake racks in automated tests for the Planner and +/// other parts of Reconfigurator +/// +/// 2. to explore the Planner's behavior via the `reconfigurator-cli` tool +/// +/// 3. eventually: to commonize code between Reconfigurator and RSS. This is +/// more speculative at this point, but the idea here is that RSS itself +/// could construct a `SystemDescription` and then use the facilities here to +/// assign subnets and maybe even lay out the initial set of zones (which +/// does not exist here yet). This way Reconfigurator and RSS are using the +/// same code to do this. +#[derive(Debug)] +pub struct SystemDescription { + collector: Option, + sleds: IndexMap, + sled_subnets: Box, + available_non_scrimlet_slots: BTreeSet, + available_scrimlet_slots: BTreeSet, + target_nexus_zone_count: usize, + service_ip_pool_ranges: Vec, +} + +impl SystemDescription { + pub fn new() -> Self { + // Prepare sets of available slots (cubby numbers) for (1) all + // non-Scrimlet sleds, and (2) Scrimlets in particular. These do not + // overlap. + // + // We will use these in two places: + // + // (1) when the caller specifies what slot a sled should go into, to + // validate that the slot is available and make sure we don't use it + // again + // + // (2) when the caller adds a sled but leaves the slot unspecified, so + // that we can assign it a slot number + // + // We use `BTreeSet` because it efficiently expresses what we want, + // though the set sizes are small enough that it doesn't much matter. + let available_scrimlet_slots: BTreeSet = BTreeSet::from([14, 16]); + let available_non_scrimlet_slots: BTreeSet = (0..=31) + .collect::>() + .difference(&available_scrimlet_slots) + .copied() + .collect(); + + // Prepare an iterator to allow us to assign sled subnets. + let rack_subnet_base: Ipv6Addr = + "fd00:1122:3344:0100::".parse().unwrap(); + let rack_subnet = + ipnet::Ipv6Net::new(rack_subnet_base, RACK_PREFIX).unwrap(); + // Skip the initial DNS subnet. + // (The same behavior is replicated in RSS in `Plan::create()` in + // sled-agent/src/rack_setup/plan/sled.rs.) + let sled_subnets = Box::new( + rack_subnet + .subnets(SLED_PREFIX) + .unwrap() + .skip(1) + .map(|s| Ipv6Subnet::new(s.network())), + ); + + // Policy defaults + let target_nexus_zone_count = NEXUS_REDUNDANCY; + // IPs from TEST-NET-1 (RFC 5737) + let service_ip_pool_ranges = vec![IpRange::try_from(( + "192.0.2.2".parse::().unwrap(), + "192.0.2.20".parse::().unwrap(), + )) + .unwrap()]; + + SystemDescription { + sleds: IndexMap::new(), + collector: None, + sled_subnets, + available_non_scrimlet_slots, + available_scrimlet_slots, + target_nexus_zone_count, + service_ip_pool_ranges, + } + } + + /// Returns a complete system deployed on a single Sled + pub fn single_sled() -> anyhow::Result { + let mut builder = SystemDescription::new(); + let sled = SledBuilder::new(); + builder.sled(sled)?; + Ok(builder) + } + + /// Returns a complete system resembling a full rack + pub fn full_rack() -> anyhow::Result { + let mut builder = SystemDescription::new(); + for slot_number in 1..32 { + let mut sled = SledBuilder::new(); + if slot_number == 14 || slot_number == 16 { + sled = sled.sled_role(SledRole::Scrimlet); + } + builder.sled(sled)?; + } + Ok(builder) + } + + pub fn collector_label(&mut self, collector_label: S) -> &mut Self + where + String: From, + { + self.collector = Some(String::from(collector_label)); + self + } + + pub fn target_nexus_zone_count(&mut self, count: usize) -> &mut Self { + self.target_nexus_zone_count = count; + self + } + + pub fn service_ip_pool_ranges( + &mut self, + ranges: Vec, + ) -> &mut Self { + self.service_ip_pool_ranges = ranges; + self + } + + /// Add a sled to the system, as described by a SledBuilder + pub fn sled(&mut self, sled: SledBuilder) -> anyhow::Result<&mut Self> { + let sled_id = sled.id.unwrap_or_else(Uuid::new_v4); + ensure!( + !self.sleds.contains_key(&sled_id), + "attempted to add sled with the same id as an existing one: {}", + sled_id + ); + let sled_subnet = self + .sled_subnets + .next() + .ok_or_else(|| anyhow!("ran out of IPv6 subnets for sleds"))?; + let hardware_slot = if let Some(slot) = sled.hardware_slot { + // If the caller specified a slot number, use that. + // Make sure it's still available, though. + if !self.available_scrimlet_slots.remove(&slot) + && !self.available_non_scrimlet_slots.remove(&slot) + { + bail!("sled slot {} was used twice", slot); + } + slot + } else if sled.sled_role == SledRole::Scrimlet { + // Otherwise, if this is a Scrimlet, it must be in one of the + // allowed Scrimlet slots. + self.available_scrimlet_slots + .pop_first() + .ok_or_else(|| anyhow!("ran out of slots for Scrimlets"))? + } else { + // Otherwise, prefer a non-Scrimlet slots, but allow a Scrimlet slot + // to be used if we run out of non-Scrimlet slots. + self.available_non_scrimlet_slots + .pop_first() + .or_else(|| self.available_scrimlet_slots.pop_first()) + .ok_or_else(|| anyhow!("ran out of slots for non-Scrimlets"))? + }; + + let sled = Sled::new_simulated( + sled_id, + sled_subnet, + sled.sled_role, + sled.unique, + sled.hardware, + hardware_slot, + sled.npools, + ); + self.sleds.insert(sled_id, sled); + Ok(self) + } + + /// Add a sled to the system based on information that came from the + /// database of an existing system + pub fn sled_full( + &mut self, + sled_id: Uuid, + sled_resources: SledResources, + inventory_sp: Option>, + inventory_sled_agent: &nexus_types::inventory::SledAgent, + ) -> anyhow::Result<&mut Self> { + ensure!( + !self.sleds.contains_key(&sled_id), + "attempted to add sled with the same id as an existing one: {}", + sled_id + ); + self.sleds.insert( + sled_id, + Sled::new_full( + sled_id, + sled_resources, + inventory_sp, + inventory_sled_agent, + ), + ); + Ok(self) + } + + pub fn to_collection_builder(&self) -> anyhow::Result { + let collector_label = self + .collector + .as_ref() + .cloned() + .unwrap_or_else(|| String::from("example")); + let mut builder = CollectionBuilder::new(collector_label); + + for s in self.sleds.values() { + if let Some((slot, sp_state)) = s.sp_state() { + builder + .found_sp_state( + "fake MGS 1", + SpType::Sled, + u32::from(*slot), + sp_state.clone(), + ) + .context("recording SP state")?; + } + + builder + .found_sled_inventory( + "fake sled agent", + s.sled_agent_inventory().clone(), + ) + .context("recording sled agent")?; + } + + Ok(builder) + } + + pub fn to_policy(&self) -> anyhow::Result { + let sleds = self + .sleds + .values() + .map(|sled| { + let sled_resources = SledResources { + policy: sled.policy, + state: SledState::Active, + zpools: sled.zpools.iter().cloned().collect(), + subnet: sled.sled_subnet, + }; + (sled.sled_id, sled_resources) + }) + .collect(); + + Ok(Policy { + sleds, + service_ip_pool_ranges: self.service_ip_pool_ranges.clone(), + target_nexus_zone_count: self.target_nexus_zone_count, + }) + } +} + +#[derive(Clone, Debug)] +pub enum SledHardware { + Gimlet, + Pc, + Unknown, + Empty, +} + +#[derive(Clone, Debug)] +pub struct SledBuilder { + id: Option, + unique: Option, + hardware: SledHardware, + hardware_slot: Option, + sled_role: SledRole, + npools: u8, +} + +impl SledBuilder { + /// Begin describing a sled to be added to a `SystemDescription` + pub fn new() -> Self { + SledBuilder { + id: None, + unique: None, + hardware: SledHardware::Gimlet, + hardware_slot: None, + sled_role: SledRole::Gimlet, + npools: 10, + } + } + + /// Set the id of the sled + /// + /// Default: randomly generated + pub fn id(mut self, id: Uuid) -> Self { + self.id = Some(id); + self + } + + /// Set a unique string used to generate the serial number and other + /// identifiers + /// + /// Default: randomly generated + pub fn unique(mut self, unique: S) -> Self + where + String: From, + { + self.unique = Some(String::from(unique)); + self + } + + /// Set the number of U.2 (external) pools this sled should have + /// + /// Default is currently `10` based on the typical value for a Gimlet + pub fn npools(mut self, npools: u8) -> Self { + self.npools = npools; + self + } + + /// Sets what type of hardware this sled uses + /// + /// Default: `SledHarware::Gimlet` + pub fn hardware(mut self, hardware: SledHardware) -> Self { + self.hardware = hardware; + self + } + + /// Sets which cubby in the rack the sled is in + /// + /// Default: determined based on sled role and unused slots + pub fn hardware_slot(mut self, hardware_slot: u16) -> Self { + self.hardware_slot = Some(hardware_slot); + self + } + + /// Sets whether this sled is attached to a switch (`SledRole::Scrimlet`) or + /// not (`SledRole::Gimlet`) + pub fn sled_role(mut self, sled_role: SledRole) -> Self { + self.sled_role = sled_role; + self + } +} + +/// Convenience structure summarizing `Sled` inputs that come from inventory +#[derive(Debug)] +pub struct SledHwInventory<'a> { + pub baseboard_id: &'a BaseboardId, + pub sp: &'a nexus_types::inventory::ServiceProcessor, + pub rot: &'a nexus_types::inventory::RotState, +} + +/// Our abstract description of a `Sled` +/// +/// This needs to be rich enough to generate a Policy and inventory Collection. +#[derive(Clone, Debug)] +struct Sled { + sled_id: Uuid, + sled_subnet: Ipv6Subnet, + inventory_sp: Option<(u16, SpState)>, + inventory_sled_agent: sled_agent_client::types::Inventory, + zpools: Vec, + policy: SledPolicy, +} + +impl Sled { + /// Create a `Sled` using faked-up information based on a `SledBuilder` + fn new_simulated( + sled_id: Uuid, + sled_subnet: Ipv6Subnet, + sled_role: SledRole, + unique: Option, + hardware: SledHardware, + hardware_slot: u16, + nzpools: u8, + ) -> Sled { + let unique = unique.unwrap_or_else(|| hardware_slot.to_string()); + let model = format!("model{}", unique); + let serial = format!("serial{}", unique); + let revision = 0; + let zpools = (0..nzpools) + .map(|_| format!("oxp_{}", Uuid::new_v4()).parse().unwrap()) + .collect(); + let inventory_sp = match hardware { + SledHardware::Empty => None, + SledHardware::Gimlet | SledHardware::Pc | SledHardware::Unknown => { + Some(( + hardware_slot, + SpState { + base_mac_address: [0; 6], + hubris_archive_id: format!("hubris{}", unique), + model: model.clone(), + power_state: PowerState::A2, + revision, + rot: RotState::Enabled { + active: RotSlot::A, + pending_persistent_boot_preference: None, + persistent_boot_preference: RotSlot::A, + slot_a_sha3_256_digest: Some(String::from( + "slotAdigest1", + )), + slot_b_sha3_256_digest: Some(String::from( + "slotBdigest1", + )), + transient_boot_preference: None, + }, + serial_number: serial.clone(), + }, + )) + } + }; + + let inventory_sled_agent = { + let baseboard = match hardware { + SledHardware::Gimlet => { + sled_agent_client::types::Baseboard::Gimlet { + identifier: serial.clone(), + model: model.clone(), + revision: i64::from(revision), + } + } + SledHardware::Pc => sled_agent_client::types::Baseboard::Pc { + identifier: serial.clone(), + model: model.clone(), + }, + SledHardware::Unknown | SledHardware::Empty => { + sled_agent_client::types::Baseboard::Unknown + } + }; + let sled_agent_address = get_sled_address(sled_subnet).to_string(); + sled_agent_client::types::Inventory { + baseboard, + reservoir_size: ByteCount::from(1024), + sled_role, + sled_agent_address, + sled_id: sled_id, + usable_hardware_threads: 10, + usable_physical_ram: ByteCount::from(1024 * 1024), + } + }; + + Sled { + sled_id, + sled_subnet, + inventory_sp, + inventory_sled_agent, + zpools, + policy: SledPolicy::InService { + provision_policy: SledProvisionPolicy::Provisionable, + }, + } + } + + /// Create a `Sled` based on real information from another `Policy` and + /// inventory `Collection` + fn new_full( + sled_id: Uuid, + sled_resources: SledResources, + inventory_sp: Option>, + inv_sled_agent: &nexus_types::inventory::SledAgent, + ) -> Sled { + // Elsewhere, the user gives us some rough parameters (like a unique + // string) that we use to construct fake `sled_agent_client` types that + // we can provide to the inventory builder so that _it_ can construct + // the corresponding inventory types. Here, we're working backwards, + // which is a little weird: we're given inventory types and we construct + // the fake `sled_agent_client` types, again so that we can later pass + // them to the inventory builder so that it can construct the same + // inventory types again. This is a little goofy. + let baseboard = inventory_sp + .as_ref() + .map(|sledhw| sled_agent_client::types::Baseboard::Gimlet { + identifier: sledhw.baseboard_id.serial_number.clone(), + model: sledhw.baseboard_id.part_number.clone(), + revision: i64::from(sledhw.sp.baseboard_revision), + }) + .unwrap_or(sled_agent_client::types::Baseboard::Unknown); + + let inventory_sp = inventory_sp.map(|sledhw| { + let sp_state = SpState { + base_mac_address: [0; 6], + hubris_archive_id: sledhw.sp.hubris_archive.clone(), + model: sledhw.baseboard_id.part_number.clone(), + power_state: sledhw.sp.power_state, + revision: sledhw.sp.baseboard_revision, + rot: RotState::Enabled { + active: sledhw.rot.active_slot, + pending_persistent_boot_preference: sledhw + .rot + .pending_persistent_boot_preference, + persistent_boot_preference: sledhw + .rot + .persistent_boot_preference, + slot_a_sha3_256_digest: sledhw + .rot + .slot_a_sha3_256_digest + .clone(), + slot_b_sha3_256_digest: sledhw + .rot + .slot_b_sha3_256_digest + .clone(), + transient_boot_preference: sledhw + .rot + .transient_boot_preference, + }, + serial_number: sledhw.baseboard_id.serial_number.clone(), + }; + + (sledhw.sp.sp_slot, sp_state) + }); + + let inventory_sled_agent = sled_agent_client::types::Inventory { + baseboard, + reservoir_size: inv_sled_agent.reservoir_size, + sled_role: inv_sled_agent.sled_role, + sled_agent_address: inv_sled_agent.sled_agent_address.to_string(), + sled_id, + usable_hardware_threads: inv_sled_agent.usable_hardware_threads, + usable_physical_ram: inv_sled_agent.usable_physical_ram, + }; + + Sled { + sled_id, + sled_subnet: sled_resources.subnet, + zpools: sled_resources.zpools.into_iter().collect(), + inventory_sp, + inventory_sled_agent, + policy: sled_resources.policy, + } + } + + fn sp_state(&self) -> Option<&(u16, SpState)> { + self.inventory_sp.as_ref() + } + + fn sled_agent_inventory(&self) -> &sled_agent_client::types::Inventory { + &self.inventory_sled_agent + } +} diff --git a/nexus/reconfigurator/preparation/Cargo.toml b/nexus/reconfigurator/preparation/Cargo.toml new file mode 100644 index 0000000000..f95f9c4afe --- /dev/null +++ b/nexus/reconfigurator/preparation/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "nexus-reconfigurator-preparation" +version = "0.1.0" +edition = "2021" + +[dependencies] +illumos-utils.workspace = true +nexus-db-model.workspace = true +nexus-types.workspace = true +omicron-common.workspace = true + +omicron-workspace-hack.workspace = true diff --git a/nexus/reconfigurator/preparation/src/lib.rs b/nexus/reconfigurator/preparation/src/lib.rs new file mode 100644 index 0000000000..77d4532023 --- /dev/null +++ b/nexus/reconfigurator/preparation/src/lib.rs @@ -0,0 +1,74 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Common facilities for assembling inputs to the planner + +use nexus_types::deployment::Policy; +use nexus_types::deployment::SledResources; +use nexus_types::deployment::ZpoolName; +use nexus_types::identity::Asset; +use omicron_common::address::IpRange; +use omicron_common::address::Ipv6Subnet; +use omicron_common::address::SLED_PREFIX; +use omicron_common::api::external::Error; +use std::collections::BTreeMap; +use std::collections::BTreeSet; +use std::str::FromStr; + +/// Given various pieces of database state that go into the blueprint planning +/// process, produce a `Policy` object encapsulating what the planner needs to +/// generate a blueprint +pub fn policy_from_db( + sled_rows: &[nexus_db_model::Sled], + zpool_rows: &[nexus_db_model::Zpool], + ip_pool_range_rows: &[nexus_db_model::IpPoolRange], + target_nexus_zone_count: usize, +) -> Result { + let mut zpools_by_sled_id = { + let mut zpools = BTreeMap::new(); + for z in zpool_rows { + let sled_zpool_names = + zpools.entry(z.sled_id).or_insert_with(BTreeSet::new); + // It's unfortunate that Nexus knows how Sled Agent + // constructs zpool names, but there's not currently an + // alternative. + let zpool_name_generated = + illumos_utils::zpool::ZpoolName::new_external(z.id()) + .to_string(); + let zpool_name = ZpoolName::from_str(&zpool_name_generated) + .map_err(|e| { + Error::internal_error(&format!( + "unexpectedly failed to parse generated \ + zpool name: {}: {}", + zpool_name_generated, e + )) + })?; + sled_zpool_names.insert(zpool_name); + } + zpools + }; + + let sleds = sled_rows + .into_iter() + .map(|sled_row| { + let sled_id = sled_row.id(); + let subnet = Ipv6Subnet::::new(sled_row.ip()); + let zpools = zpools_by_sled_id + .remove(&sled_id) + .unwrap_or_else(BTreeSet::new); + let sled_info = SledResources { + policy: sled_row.policy(), + state: sled_row.state().into(), + subnet, + zpools, + }; + (sled_id, sled_info) + }) + .collect(); + + let service_ip_pool_ranges = + ip_pool_range_rows.iter().map(IpRange::from).collect(); + + Ok(Policy { sleds, service_ip_pool_ranges, target_nexus_zone_count }) +} diff --git a/nexus/src/app/deployment.rs b/nexus/src/app/deployment.rs index 31ba9fe065..778e768878 100644 --- a/nexus/src/app/deployment.rs +++ b/nexus/src/app/deployment.rs @@ -7,23 +7,16 @@ use nexus_db_model::DnsGroup; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; -use nexus_db_queries::db::datastore::SQL_BATCH_SIZE; -use nexus_db_queries::db::pagination::Paginator; use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_reconfigurator_planning::planner::Planner; +use nexus_reconfigurator_preparation::policy_from_db; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintMetadata; use nexus_types::deployment::BlueprintTarget; use nexus_types::deployment::BlueprintTargetSet; use nexus_types::deployment::Policy; -use nexus_types::deployment::SledResources; -use nexus_types::deployment::ZpoolName; -use nexus_types::identity::Asset; use nexus_types::inventory::Collection; -use omicron_common::address::IpRange; -use omicron_common::address::Ipv6Subnet; use omicron_common::address::NEXUS_REDUNDANCY; -use omicron_common::address::SLED_PREFIX; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; @@ -34,9 +27,6 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::LookupType; use slog_error_chain::InlineErrorChain; -use std::collections::BTreeMap; -use std::collections::BTreeSet; -use std::str::FromStr; use uuid::Uuid; /// Common structure for collecting information that the planner needs @@ -118,81 +108,23 @@ impl super::Nexus { let datastore = self.datastore(); let sled_rows = datastore.sled_list_all_batched(opctx).await?; - - let mut zpools_by_sled_id = { - let mut zpools = BTreeMap::new(); - let mut paginator = Paginator::new(SQL_BATCH_SIZE); - while let Some(p) = paginator.next() { - let batch = datastore - .zpool_list_all_external(opctx, &p.current_pagparams()) - .await?; - paginator = - p.found_batch(&batch, &|z: &nexus_db_model::Zpool| z.id()); - for z in batch { - let sled_zpool_names = - zpools.entry(z.sled_id).or_insert_with(BTreeSet::new); - // It's unfortunate that Nexus knows how Sled Agent - // constructs zpool names, but there's not currently an - // alternative. - let zpool_name_generated = - illumos_utils::zpool::ZpoolName::new_external(z.id()) - .to_string(); - let zpool_name = ZpoolName::from_str(&zpool_name_generated) - .map_err(|e| { - Error::internal_error(&format!( - "unexpectedly failed to parse generated \ - zpool name: {}: {}", - zpool_name_generated, e - )) - })?; - sled_zpool_names.insert(zpool_name); - } - } - zpools - }; - - let sleds = sled_rows - .into_iter() - .map(|sled_row| { - let sled_id = sled_row.id(); - let subnet = Ipv6Subnet::::new(sled_row.ip()); - let zpools = zpools_by_sled_id - .remove(&sled_id) - .unwrap_or_else(BTreeSet::new); - let sled_info = SledResources { - policy: sled_row.policy(), - state: sled_row.state().into(), - subnet, - zpools, - }; - (sled_id, sled_info) - }) - .collect(); - - let service_ip_pool_ranges = { + let zpool_rows = + datastore.zpool_list_all_external_batched(opctx).await?; + let ip_pool_range_rows = { let (authz_service_ip_pool, _) = datastore.ip_pools_service_lookup(opctx).await?; - - let mut ip_ranges = Vec::new(); - let mut paginator = Paginator::new(SQL_BATCH_SIZE); - while let Some(p) = paginator.next() { - let batch = datastore - .ip_pool_list_ranges( - opctx, - &authz_service_ip_pool, - &p.current_pagparams(), - ) - .await?; - // The use of `last_address` here assumes `paginator` is sorting - // in Ascending order (which it does - see the implementation of - // `current_pagparams()`). - paginator = p.found_batch(&batch, &|r| r.last_address); - ip_ranges.extend(batch.iter().map(IpRange::from)); - } - - ip_ranges + datastore + .ip_pool_list_ranges_batched(opctx, &authz_service_ip_pool) + .await? }; + let policy = policy_from_db( + &sled_rows, + &zpool_rows, + &ip_pool_range_rows, + NEXUS_REDUNDANCY, + )?; + // The choice of which inventory collection to use here is not // necessarily trivial. Inventory collections may be incomplete due to // transient (or even persistent) errors. It's not yet clear what @@ -223,11 +155,7 @@ impl super::Nexus { Ok(PlanningContext { creator, - policy: Policy { - sleds, - service_ip_pool_ranges, - target_nexus_zone_count: NEXUS_REDUNDANCY, - }, + policy, inventory, internal_dns_version: *dns_version.version, }) diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index 93716d4804..ecc180b6db 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -9,12 +9,14 @@ anyhow.workspace = true chrono.workspace = true base64.workspace = true futures.workspace = true +humantime.workspace = true omicron-uuid-kinds.workspace = true openssl.workspace = true parse-display.workspace = true schemars = { workspace = true, features = ["chrono", "uuid1"] } serde.workspace = true serde_json.workspace = true +serde_with.workspace = true steno.workspace = true strum.workspace = true thiserror.workspace = true diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index ef3c03a302..ac950d2ca3 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -32,6 +32,7 @@ use serde::Deserialize; use serde::Serialize; use std::collections::BTreeMap; use std::collections::BTreeSet; +use std::fmt; use uuid::Uuid; /// Fleet-wide deployment policy @@ -49,7 +50,7 @@ use uuid::Uuid; /// /// The current policy is pretty limited. It's aimed primarily at supporting /// the add/remove sled use case. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct Policy { /// set of sleds that are supposed to be part of the control plane, along /// with information about resources available to the planner @@ -64,7 +65,7 @@ pub struct Policy { } /// Describes the resources available on each sled for the planner -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct SledResources { /// current sled policy pub policy: SledPolicy, @@ -134,7 +135,7 @@ impl SledResources { // zones deployed on each host and some supporting configuration (e.g., DNS). // This is aimed at supporting add/remove sleds. The plan is to grow this to // include more of the system as we support more use cases. -#[derive(Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize)] +#[derive(Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize)] pub struct Blueprint { /// unique identifier for this blueprint pub id: Uuid, @@ -225,6 +226,59 @@ impl Blueprint { } } +impl fmt::Debug for Blueprint { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "blueprint {}", self.id)?; + writeln!( + f, + "parent: {}", + self.parent_blueprint_id + .map(|u| u.to_string()) + .unwrap_or_else(|| String::from("")) + )?; + writeln!( + f, + "created by {}{}", + self.creator, + if self.creator.parse::().is_ok() { + " (likely a Nexus instance)" + } else { + "" + } + )?; + writeln!( + f, + "created at {}", + humantime::format_rfc3339_millis(self.time_created.into(),) + )?; + writeln!(f, "internal DNS version: {}", self.internal_dns_version)?; + writeln!(f, "comment: {}", self.comment)?; + writeln!(f, "zones:\n")?; + for (sled_id, sled_zones) in &self.omicron_zones { + writeln!( + f, + " sled {}: Omicron zones at generation {}", + sled_id, sled_zones.generation + )?; + for z in &sled_zones.zones { + writeln!( + f, + " {} {} {}", + z.id, + if self.zones_in_service.contains(&z.id) { + "in service " + } else { + "not in service" + }, + z.zone_type.label(), + )?; + } + } + + Ok(()) + } +} + /// Describe high-level metadata about a blueprint // These fields are a subset of [`Blueprint`], and include only the data we can // quickly fetch from the main blueprint table (e.g., when listing all @@ -494,12 +548,12 @@ impl<'a> OmicronZonesDiff<'a> { fn print_whole_sled( &self, - f: &mut std::fmt::Formatter<'_>, + f: &mut fmt::Formatter<'_>, prefix: char, label: &str, bbsledzones: &OmicronZonesConfig, sled_id: Uuid, - ) -> std::fmt::Result { + ) -> fmt::Result { writeln!(f, "{} sled {} ({})", prefix, sled_id, label)?; writeln!( f, @@ -523,8 +577,8 @@ impl<'a> OmicronZonesDiff<'a> { } /// Implements diff(1)-like output for diff'ing two blueprints -impl<'a> std::fmt::Display for OmicronZonesDiff<'a> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl<'a> fmt::Display for OmicronZonesDiff<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "diff {} {}", self.before_label, self.after_label)?; writeln!(f, "--- {}", self.before_label)?; writeln!(f, "+++ {}", self.after_label)?; @@ -655,3 +709,17 @@ impl<'a> std::fmt::Display for OmicronZonesDiff<'a> { Ok(()) } } + +/// Encapsulates Reconfigurator state +/// +/// This serialized from is intended for saving state from hand-constructed or +/// real, deployed systems and loading it back into a simulator or test suite +/// +/// **This format is not stable. It may change at any time without +/// backwards-compatibility guarantees.** +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UnstableReconfiguratorState { + pub policy: Policy, + pub collections: Vec, + pub blueprints: Vec, +} diff --git a/nexus/types/src/inventory.rs b/nexus/types/src/inventory.rs index 71e8e64d97..57fc7bd647 100644 --- a/nexus/types/src/inventory.rs +++ b/nexus/types/src/inventory.rs @@ -18,6 +18,8 @@ pub use gateway_client::types::RotSlot; pub use gateway_client::types::SpType; use omicron_common::api::external::ByteCount; pub use omicron_common::api::internal::shared::SourceNatConfig; +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; pub use sled_agent_client::types::NetworkInterface; pub use sled_agent_client::types::NetworkInterfaceKind; pub use sled_agent_client::types::OmicronZoneConfig; @@ -48,7 +50,8 @@ use uuid::Uuid; /// database. /// /// See the documentation in the database schema for more background. -#[derive(Debug, Eq, PartialEq, Clone)] +#[serde_as] +#[derive(Debug, Eq, PartialEq, Clone, Serialize, Deserialize)] pub struct Collection { /// unique identifier for this collection pub id: Uuid, @@ -79,16 +82,19 @@ pub struct Collection { /// /// In practice, these will be inserted into the `inv_service_processor` /// table. + #[serde_as(as = "Vec<(_, _)>")] pub sps: BTreeMap, ServiceProcessor>, /// all roots of trust, keyed by baseboard id /// /// In practice, these will be inserted into the `inv_root_of_trust` table. + #[serde_as(as = "Vec<(_, _)>")] pub rots: BTreeMap, RotState>, /// all caboose contents found, keyed first by the kind of caboose /// (`CabooseWhich`), then the baseboard id of the sled where they were /// found /// /// In practice, these will be inserted into the `inv_caboose` table. + #[serde_as(as = "BTreeMap<_, Vec<(_, _)>>")] pub cabooses_found: BTreeMap, CabooseFound>>, /// all root of trust page contents found, keyed first by the kind of page @@ -97,6 +103,7 @@ pub struct Collection { /// /// In practice, these will be inserted into the `inv_root_of_trust_page` /// table. + #[serde_as(as = "BTreeMap<_, Vec<(_, _)>>")] pub rot_pages_found: BTreeMap, RotPageFound>>, @@ -157,7 +164,9 @@ impl Collection { /// number. We do not include that here. If we ever did find a baseboard with /// the same part number and serial number but a new revision number, we'd want /// to treat that as the same baseboard as one with a different revision number. -#[derive(Clone, Debug, Ord, Eq, PartialOrd, PartialEq)] +#[derive( + Clone, Debug, Ord, Eq, PartialOrd, PartialEq, Deserialize, Serialize, +)] pub struct BaseboardId { /// Oxide Part Number pub part_number: String, @@ -181,7 +190,9 @@ impl From for BaseboardId { /// /// These are normalized in the database. Each distinct `Caboose` is assigned a /// uuid and shared across many possible collections that reference it. -#[derive(Clone, Debug, Ord, Eq, PartialOrd, PartialEq)] +#[derive( + Clone, Debug, Ord, Eq, PartialOrd, PartialEq, Deserialize, Serialize, +)] pub struct Caboose { pub board: String, pub git_commit: String, @@ -202,7 +213,9 @@ impl From for Caboose { /// Indicates that a particular `Caboose` was found (at a particular time from a /// particular source, but these are only for debugging) -#[derive(Clone, Debug, Ord, Eq, PartialOrd, PartialEq)] +#[derive( + Clone, Debug, Ord, Eq, PartialOrd, PartialEq, Deserialize, Serialize, +)] pub struct CabooseFound { pub time_collected: DateTime, pub source: String, @@ -210,7 +223,9 @@ pub struct CabooseFound { } /// Describes a service processor found during collection -#[derive(Clone, Debug, Ord, Eq, PartialOrd, PartialEq)] +#[derive( + Clone, Debug, Ord, Eq, PartialOrd, PartialEq, Deserialize, Serialize, +)] pub struct ServiceProcessor { pub time_collected: DateTime, pub source: String, @@ -225,7 +240,9 @@ pub struct ServiceProcessor { /// Describes the root of trust state found (from a service processor) during /// collection -#[derive(Clone, Debug, Ord, Eq, PartialOrd, PartialEq)] +#[derive( + Clone, Debug, Ord, Eq, PartialOrd, PartialEq, Deserialize, Serialize, +)] pub struct RotState { pub time_collected: DateTime, pub source: String, @@ -239,7 +256,18 @@ pub struct RotState { } /// Describes which caboose this is (which component, which slot) -#[derive(Clone, Copy, Debug, EnumIter, PartialEq, Eq, PartialOrd, Ord)] +#[derive( + Clone, + Copy, + Debug, + EnumIter, + PartialEq, + Eq, + PartialOrd, + Ord, + Deserialize, + Serialize, +)] pub enum CabooseWhich { SpSlot0, SpSlot1, @@ -251,14 +279,18 @@ pub enum CabooseWhich { /// /// These are normalized in the database. Each distinct `RotPage` is assigned a /// uuid and shared across many possible collections that reference it. -#[derive(Clone, Debug, Ord, Eq, PartialOrd, PartialEq)] +#[derive( + Clone, Debug, Ord, Eq, PartialOrd, PartialEq, Deserialize, Serialize, +)] pub struct RotPage { pub data_base64: String, } /// Indicates that a particular `RotPage` was found (at a particular time from a /// particular source, but these are only for debugging) -#[derive(Clone, Debug, Ord, Eq, PartialOrd, PartialEq)] +#[derive( + Clone, Debug, Ord, Eq, PartialOrd, PartialEq, Deserialize, Serialize, +)] pub struct RotPageFound { pub time_collected: DateTime, pub source: String, @@ -266,7 +298,18 @@ pub struct RotPageFound { } /// Describes which root of trust page this is -#[derive(Clone, Copy, Debug, EnumIter, PartialEq, Eq, PartialOrd, Ord)] +#[derive( + Clone, + Copy, + Debug, + EnumIter, + PartialEq, + Eq, + PartialOrd, + Ord, + Deserialize, + Serialize, +)] pub enum RotPageWhich { Cmpa, CfpaActive, @@ -307,7 +350,7 @@ impl IntoRotPage for gateway_client::types::RotCfpa { /// A sled may be on a PC (in dev/test environments) and have no associated /// baseboard. There might also be baseboards with no associated sled (if /// they have not been formally added to the control plane). -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct SledAgent { pub time_collected: DateTime, pub source: String, @@ -320,7 +363,7 @@ pub struct SledAgent { pub reservoir_size: ByteCount, } -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct OmicronZonesFound { pub time_collected: DateTime, pub source: String, diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index 0a607d5115..49bdc030c7 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -18,6 +18,7 @@ libc.workspace = true nexus-config.workspace = true omicron-common.workspace = true pem.workspace = true +regex.workspace = true ring.workspace = true rustls.workspace = true slog.workspace = true @@ -29,7 +30,6 @@ tokio = { workspace = true, features = ["full"] } tokio-postgres.workspace = true usdt.workspace = true rcgen.workspace = true -regex.workspace = true reqwest.workspace = true walkdir.workspace = true omicron-workspace-hack.workspace = true diff --git a/test-utils/src/dev/test_cmds.rs b/test-utils/src/dev/test_cmds.rs index 15c94554c8..89bf022369 100644 --- a/test-utils/src/dev/test_cmds.rs +++ b/test-utils/src/dev/test_cmds.rs @@ -120,3 +120,73 @@ pub fn temp_file_path(label: &str) -> PathBuf { pub fn error_for_enoent() -> String { io::Error::from_raw_os_error(libc::ENOENT).to_string() } + +/// Redacts text from a string (usually stdout/stderr) that may change from +/// invocation to invocation (e.g., assigned TCP port numbers, timestamps) +/// +/// This allows use to use expectorate to verify the shape of the CLI output. +pub fn redact_variable(input: &str, extra_redactions: &[&str]) -> String { + // Replace TCP port numbers. We include the localhost characters to avoid + // catching any random sequence of numbers. + let s = regex::Regex::new(r"\[::1\]:\d{4,5}") + .unwrap() + .replace_all(input, "[::1]:REDACTED_PORT") + .to_string(); + let s = regex::Regex::new(r"\[::ffff:127.0.0.1\]:\d{4,5}") + .unwrap() + .replace_all(&s, "[::ffff:127.0.0.1]:REDACTED_PORT") + .to_string(); + let s = regex::Regex::new(r"127\.0\.0\.1:\d{4,5}") + .unwrap() + .replace_all(&s, "127.0.0.1:REDACTED_PORT") + .to_string(); + + // Replace uuids. + let s = regex::Regex::new( + "[a-zA-Z0-9]{8}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{4}-\ + [a-zA-Z0-9]{4}-[a-zA-Z0-9]{12}", + ) + .unwrap() + .replace_all(&s, "REDACTED_UUID_REDACTED_UUID_REDACTED") + .to_string(); + + // Replace timestamps. + let s = regex::Regex::new(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z") + .unwrap() + .replace_all(&s, "") + .to_string(); + + let s = regex::Regex::new(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z") + .unwrap() + .replace_all(&s, "") + .to_string(); + + // Replace formatted durations. These are pretty specific to the background + // task output. + let s = regex::Regex::new(r"\d+s ago") + .unwrap() + .replace_all(&s, "s ago") + .to_string(); + + let s = regex::Regex::new(r"\d+ms") + .unwrap() + .replace_all(&s, "ms") + .to_string(); + + let mut s = regex::Regex::new( + r"note: database schema version matches expected \(\d+\.\d+\.\d+\)", + ) + .unwrap() + .replace_all( + &s, + "note: database schema version matches expected \ + ()", + ) + .to_string(); + + for r in extra_redactions { + s = s.replace(r, ""); + } + + s +} From 358eec408b1df996ccf1ea115ea1770de80637a0 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Thu, 7 Mar 2024 16:00:23 -0600 Subject: [PATCH 090/157] Unquarantine clickhouse binary on mac (#5223) After #5127 (I assume, because this problem came up immediately after that) macOS Gatekeeper is putting the `clickhouse` binary in quarantine after download, which means we can't execute it unless we take it out of quarantine. This is apparently a new thing, as [this doc](https://github.com/ClickHouse/clickhouse-docs/blob/08d7a329d/knowledgebase/fix-developer-verification-error-in-macos.md) about how to do that was only [added Jan 9](https://github.com/ClickHouse/clickhouse-docs/pull/1835). Here I am doing it in the simplest way possible: if mac, then remove quarantine attr. If anyone has a better way, I'm all ears. --- tools/ci_download_clickhouse | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/ci_download_clickhouse b/tools/ci_download_clickhouse index 93201a772b..c5f1cae914 100755 --- a/tools/ci_download_clickhouse +++ b/tools/ci_download_clickhouse @@ -84,6 +84,12 @@ function main # Unpack the tarball into a local directory do_untar "$TARBALL_FILE" "$DEST_DIR" + # on macOS, we need to take the binary out of quarantine after download + # https://github.com/ClickHouse/clickhouse-docs/blob/08d7a329d/knowledgebase/fix-developer-verification-error-in-macos.md + if [[ $CIDL_OS == darwin* ]]; then + xattr -d com.apple.quarantine "$DEST_DIR/clickhouse" + fi + # Run the binary as a sanity-check. "$DEST_DIR/clickhouse" server --version } From 19ad11128b40fccfe4a19fc22171ef388f94781d Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Thu, 7 Mar 2024 17:45:53 -0500 Subject: [PATCH 091/157] `omdb nexus blueprints target set`: take an `enabled` setting (#5224) I made this a required argument because I'm nervous to default it to `Enabled` when we've talked so much about manually setting a disabled blueprint. If folks feel strongly this should be an optional `--enabled={true,false,inherit}` defaulting to `true` instead, I wouldn't put up a fight. Addresses the first part of #5210. --- dev-tools/omdb/src/bin/omdb/nexus.rs | 49 +++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 9 deletions(-) diff --git a/dev-tools/omdb/src/bin/omdb/nexus.rs b/dev-tools/omdb/src/bin/omdb/nexus.rs index 692931db51..4705da2a32 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus.rs @@ -11,6 +11,7 @@ use chrono::SecondsFormat; use chrono::Utc; use clap::Args; use clap::Subcommand; +use clap::ValueEnum; use futures::TryStreamExt; use nexus_client::types::ActivationReason; use nexus_client::types::BackgroundTask; @@ -77,7 +78,7 @@ enum BlueprintsCommands { Diff(BlueprintIdsArgs), /// Delete a blueprint Delete(BlueprintIdArgs), - /// Set the current target blueprint + /// Interact with the current target blueprint Target(BlueprintsTargetArgs), /// Generate an initial blueprint from a specific inventory collection GenerateFromCollection(CollectionIdArgs), @@ -116,7 +117,25 @@ enum BlueprintTargetCommands { /// Show the current target blueprint Show, /// Change the current target blueprint - Set(BlueprintIdArgs), + Set(BlueprintTargetSetArgs), +} + +#[derive(Debug, Args)] +struct BlueprintTargetSetArgs { + /// id of blueprint to make target + blueprint_id: Uuid, + /// whether this blueprint should be enabled + enabled: BlueprintTargetSetEnabled, +} + +#[derive(Debug, Clone, Copy, ValueEnum)] +enum BlueprintTargetSetEnabled { + /// set the new current target as enabled + Enabled, + /// set the new current target as disabled + Disabled, + /// use the enabled setting from the parent blueprint + Inherit, } #[derive(Debug, Args)] @@ -914,14 +933,26 @@ async fn cmd_nexus_blueprints_target_show( async fn cmd_nexus_blueprints_target_set( client: &nexus_client::Client, - args: &BlueprintIdArgs, + args: &BlueprintTargetSetArgs, ) -> Result<(), anyhow::Error> { - // Try to preserve the value of "enabled", if possible. - let enabled = client - .blueprint_target_view() - .await - .map(|current| current.into_inner().enabled) - .unwrap_or(true); + let enabled = match args.enabled { + BlueprintTargetSetEnabled::Enabled => true, + BlueprintTargetSetEnabled::Disabled => false, + // There's a small TOCTOU race with "inherit": What if the user wants to + // inherit the parent blueprint enabled bit but the current target + // blueprint enabled bit is flipped or the current target blueprint is + // changed? We expect neither of these to be problematic in practice: + // the only way for the `enable` bit to be set to anything at all is via + // `omdb`, so the user would have to be racing with another `omdb` + // operator. (In the case of the current target blueprint being changed + // entirely, that will result in a failure to set the current target + // below, because its parent will no longer be the current target.) + BlueprintTargetSetEnabled::Inherit => client + .blueprint_target_view() + .await + .map(|current| current.into_inner().enabled) + .context("failed to fetch current target blueprint")?, + }; client .blueprint_target_set(&nexus_client::types::BlueprintTargetSet { target_id: args.blueprint_id, From 87c9b13037e8e9d9ec12abc6d997336674993760 Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Thu, 7 Mar 2024 17:57:24 -0500 Subject: [PATCH 092/157] Setup the VMM reservoir in a background task (#5124) Fixes the most immediate problem of #5121 --- Cargo.lock | 1 + clients/nexus-client/src/lib.rs | 5 + nexus/db-model/src/schema.rs | 3 +- nexus/db-model/src/sled.rs | 49 +- nexus/db-queries/src/db/datastore/dataset.rs | 8 +- nexus/db-queries/src/db/datastore/mod.rs | 11 +- .../src/db/datastore/physical_disk.rs | 3 +- nexus/db-queries/src/db/datastore/rack.rs | 4 +- nexus/db-queries/src/db/datastore/sled.rs | 178 ++-- .../reconfigurator/execution/src/datasets.rs | 8 +- .../src/app/background/blueprint_execution.rs | 4 +- .../app/background/inventory_collection.rs | 4 +- nexus/src/app/sled.rs | 23 +- nexus/src/internal_api/http_entrypoints.rs | 26 +- nexus/tests/integration_tests/rack.rs | 7 +- nexus/types/src/internal_api/params.rs | 14 +- openapi/nexus-internal.json | 53 +- schema/crdb/39.0.0/up.sql | 4 + schema/crdb/dbinit.sql | 5 +- sled-agent/Cargo.toml | 1 + sled-agent/src/fakes/nexus.rs | 61 +- sled-agent/src/hardware_monitor.rs | 12 +- sled-agent/src/instance_manager.rs | 113 +-- sled-agent/src/lib.rs | 1 + sled-agent/src/nexus.rs | 894 +++++++++++++++++- sled-agent/src/sim/server.rs | 5 +- sled-agent/src/sled_agent.rs | 184 +--- sled-agent/src/vmm_reservoir.rs | 272 ++++++ 28 files changed, 1525 insertions(+), 428 deletions(-) create mode 100644 schema/crdb/39.0.0/up.sql create mode 100644 sled-agent/src/vmm_reservoir.rs diff --git a/Cargo.lock b/Cargo.lock index fd9155bb8e..416766b9cb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5297,6 +5297,7 @@ dependencies = [ "dropshot", "expectorate", "flate2", + "flume", "futures", "gateway-client", "glob", diff --git a/clients/nexus-client/src/lib.rs b/clients/nexus-client/src/lib.rs index 17fb5aa367..55bdf3d0aa 100644 --- a/clients/nexus-client/src/lib.rs +++ b/clients/nexus-client/src/lib.rs @@ -33,6 +33,11 @@ progenitor::generate_api!( MacAddr = omicron_common::api::external::MacAddr, Name = omicron_common::api::external::Name, NewPasswordHash = omicron_passwords::NewPasswordHash, + }, + patch = { + SledAgentInfo = { derives = [PartialEq, Eq] }, + ByteCount = { derives = [PartialEq, Eq] }, + Baseboard = { derives = [PartialEq, Eq] } } ); diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 55d3e9b43f..d70b0a267e 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -13,7 +13,7 @@ use omicron_common::api::external::SemverVersion; /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(38, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(39, 0, 0); table! { disk (id) { @@ -827,6 +827,7 @@ table! { last_used_address -> Inet, sled_policy -> crate::sled_policy::SledPolicyEnum, sled_state -> crate::SledStateEnum, + sled_agent_gen -> Int8, } } diff --git a/nexus/db-model/src/sled.rs b/nexus/db-model/src/sled.rs index 47912f89cc..c2cbb65bd6 100644 --- a/nexus/db-model/src/sled.rs +++ b/nexus/db-model/src/sled.rs @@ -6,10 +6,15 @@ use super::{ByteCount, Generation, SledState, SqlU16, SqlU32}; use crate::collection::DatastoreCollectionConfig; use crate::ipv6; use crate::schema::{physical_disk, service, sled, zpool}; +use crate::sled::shared::Baseboard; use crate::sled_policy::DbSledPolicy; use chrono::{DateTime, Utc}; use db_macros::Asset; -use nexus_types::{external_api::shared, external_api::views, identity::Asset}; +use nexus_types::{ + external_api::{shared, views}, + identity::Asset, + internal_api::params, +}; use std::net::Ipv6Addr; use std::net::SocketAddrV6; use uuid::Uuid; @@ -41,7 +46,7 @@ pub struct Sled { #[diesel(embed)] identity: SledIdentity, time_deleted: Option>, - rcgen: Generation, + pub rcgen: Generation, pub rack_id: Uuid, @@ -66,6 +71,12 @@ pub struct Sled { #[diesel(column_name = sled_state)] state: SledState, + + /// A generation number owned and incremented by sled-agent + /// + /// This is specifically distinct from `rcgen`, which is incremented by + /// child resources as part of `DatastoreCollectionConfig`. + pub sled_agent_gen: Generation, } impl Sled { @@ -119,6 +130,34 @@ impl From for views::Sled { } } +impl From for params::SledAgentInfo { + fn from(sled: Sled) -> Self { + let role = if sled.is_scrimlet { + params::SledRole::Scrimlet + } else { + params::SledRole::Gimlet + }; + let decommissioned = match sled.state { + SledState::Active => false, + SledState::Decommissioned => true, + }; + Self { + sa_address: sled.address(), + role, + baseboard: Baseboard { + serial: sled.serial_number.clone(), + part: sled.part_number.clone(), + revision: sled.revision, + }, + usable_hardware_threads: sled.usable_hardware_threads.into(), + usable_physical_ram: sled.usable_physical_ram.into(), + reservoir_size: sled.reservoir_size.into(), + generation: sled.sled_agent_gen.into(), + decommissioned, + } + } +} + impl DatastoreCollectionConfig for Sled { type CollectionId = Uuid; type GenerationNumberColumn = sled::dsl::rcgen; @@ -161,6 +200,9 @@ pub struct SledUpdate { // ServiceAddress (Sled Agent). pub ip: ipv6::Ipv6Addr, pub port: SqlU16, + + // Generation number - owned and incremented by sled-agent. + pub sled_agent_gen: Generation, } impl SledUpdate { @@ -170,6 +212,7 @@ impl SledUpdate { baseboard: SledBaseboard, hardware: SledSystemHardware, rack_id: Uuid, + sled_agent_gen: Generation, ) -> Self { Self { id, @@ -185,6 +228,7 @@ impl SledUpdate { reservoir_size: hardware.reservoir_size, ip: addr.ip().into(), port: addr.port().into(), + sled_agent_gen, } } @@ -220,6 +264,7 @@ impl SledUpdate { ip: self.ip, port: self.port, last_used_address, + sled_agent_gen: self.sled_agent_gen, } } diff --git a/nexus/db-queries/src/db/datastore/dataset.rs b/nexus/db-queries/src/db/datastore/dataset.rs index 3c1fd0afb1..792f8f81a4 100644 --- a/nexus/db-queries/src/db/datastore/dataset.rs +++ b/nexus/db-queries/src/db/datastore/dataset.rs @@ -186,6 +186,7 @@ impl DataStore { mod test { use super::*; use crate::db::datastore::test_utils::datastore_test; + use nexus_db_model::Generation; use nexus_db_model::SledBaseboard; use nexus_db_model::SledSystemHardware; use nexus_db_model::SledUpdate; @@ -222,12 +223,9 @@ mod test { reservoir_size: (16 << 30).try_into().unwrap(), }, Uuid::new_v4(), + Generation::new(), ); - datastore - .sled_upsert(sled) - .await - .expect("failed to upsert sled") - .unwrap(); + datastore.sled_upsert(sled).await.expect("failed to upsert sled"); // Create a fake zpool that backs our fake datasets. let zpool_id = Uuid::new_v4(); diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index dea6fcb997..b164186fdf 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -108,7 +108,6 @@ pub use instance::InstanceAndActiveVmm; pub use inventory::DataStoreInventoryTest; pub use rack::RackInit; pub use silo::Discoverability; -pub use sled::SledUpsertOutput; pub use switch_port::SwitchPortSettingsCombinedResult; pub use virtual_provisioning_collection::StorageType; pub use volume::read_only_resources_associated_with_volume; @@ -393,6 +392,7 @@ mod test { use futures::stream; use futures::StreamExt; use nexus_config::RegionAllocationStrategy; + use nexus_db_model::Generation; use nexus_db_model::IpAttachState; use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params; @@ -619,8 +619,9 @@ mod test { sled_baseboard_for_test(), sled_system_hardware_for_test(), rack_id, + Generation::new(), ); - datastore.sled_upsert(sled_update).await.unwrap().unwrap(); + datastore.sled_upsert(sled_update).await.unwrap(); sled_id } @@ -1338,8 +1339,9 @@ mod test { sled_baseboard_for_test(), sled_system_hardware_for_test(), rack_id, + Generation::new(), ); - datastore.sled_upsert(sled1).await.unwrap().unwrap(); + datastore.sled_upsert(sled1).await.unwrap(); let addr2 = "[fd00:1df::1]:12345".parse().unwrap(); let sled2_id = "66285c18-0c79-43e0-e54f-95271f271314".parse().unwrap(); @@ -1349,8 +1351,9 @@ mod test { sled_baseboard_for_test(), sled_system_hardware_for_test(), rack_id, + Generation::new(), ); - datastore.sled_upsert(sled2).await.unwrap().unwrap(); + datastore.sled_upsert(sled2).await.unwrap(); let ip = datastore.next_ipv6_address(&opctx, sled1_id).await.unwrap(); let expected_ip = Ipv6Addr::new(0xfd00, 0x1de, 0, 0, 0, 0, 1, 0); diff --git a/nexus/db-queries/src/db/datastore/physical_disk.rs b/nexus/db-queries/src/db/datastore/physical_disk.rs index d4e94745aa..81fc14d1d7 100644 --- a/nexus/db-queries/src/db/datastore/physical_disk.rs +++ b/nexus/db-queries/src/db/datastore/physical_disk.rs @@ -143,6 +143,7 @@ mod test { use crate::db::datastore::test_utils::datastore_test; use crate::db::model::{PhysicalDiskKind, Sled, SledUpdate}; use dropshot::PaginationOrder; + use nexus_db_model::Generation; use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Asset; use omicron_test_utils::dev; @@ -159,11 +160,11 @@ mod test { sled_baseboard_for_test(), sled_system_hardware_for_test(), rack_id, + Generation::new(), ); db.sled_upsert(sled_update) .await .expect("Could not upsert sled during test prep") - .unwrap() } fn list_disk_params() -> DataPageParams<'static, Uuid> { diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 32e059bf81..3224f36b8d 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -868,7 +868,7 @@ mod test { use async_bb8_diesel::AsyncSimpleConnection; use internal_params::DnsRecord; use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; - use nexus_db_model::{DnsGroup, InitialDnsGroup, SledUpdate}; + use nexus_db_model::{DnsGroup, Generation, InitialDnsGroup, SledUpdate}; use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::shared::SiloIdentityMode; use nexus_types::identity::Asset; @@ -1065,11 +1065,11 @@ mod test { sled_baseboard_for_test(), sled_system_hardware_for_test(), rack_id(), + Generation::new(), ); db.sled_upsert(sled_update) .await .expect("Could not upsert sled during test prep") - .unwrap() } // Hacky macro helper to: diff --git a/nexus/db-queries/src/db/datastore/sled.rs b/nexus/db-queries/src/db/datastore/sled.rs index 8d812f69cc..93d3d0e6a2 100644 --- a/nexus/db-queries/src/db/datastore/sled.rs +++ b/nexus/db-queries/src/db/datastore/sled.rs @@ -41,21 +41,17 @@ use uuid::Uuid; impl DataStore { /// Stores a new sled in the database. /// - /// Produces `SledUpsertOutput::Decommissioned` if the sled is - /// decommissioned. This is not an error, because `sled_upsert`'s only - /// caller (sled-agent) is not expected to receive this error. + /// Returns an error if `sled_agent_gen` is stale, or the sled is + /// decommissioned. pub async fn sled_upsert( &self, sled_update: SledUpdate, - ) -> CreateResult { + ) -> CreateResult { use db::schema::sled::dsl; // required for conditional upsert use diesel::query_dsl::methods::FilterDsl; - // TODO: figure out what to do with time_deleted. We want to replace it - // with a time_decommissioned, most probably. - - let query = diesel::insert_into(dsl::sled) + diesel::insert_into(dsl::sled) .values(sled_update.clone().into_insertable()) .on_conflict(dsl::id) .do_update() @@ -69,14 +65,13 @@ impl DataStore { .eq(sled_update.usable_hardware_threads), dsl::usable_physical_ram.eq(sled_update.usable_physical_ram), dsl::reservoir_size.eq(sled_update.reservoir_size), + dsl::sled_agent_gen.eq(sled_update.sled_agent_gen), )) + .filter(dsl::sled_agent_gen.lt(sled_update.sled_agent_gen)) .filter(dsl::sled_state.ne(SledState::Decommissioned)) - .returning(Sled::as_returning()); - - let sled: Option = query + .returning(Sled::as_returning()) .get_result_async(&*self.pool_connection_unauthorized().await?) .await - .optional() .map_err(|e| { public_error_from_diesel( e, @@ -85,19 +80,7 @@ impl DataStore { &sled_update.id().to_string(), ), ) - })?; - - // The only situation in which a sled is not returned is if the - // `.filter(dsl::sled_state.ne(SledState::Decommissioned))` is not - // satisfied. - // - // If we want to return a sled even if it's decommissioned here, we may - // have to do something more complex. See - // https://stackoverflow.com/q/34708509. - match sled { - Some(sled) => Ok(SledUpsertOutput::Updated(sled)), - None => Ok(SledUpsertOutput::Decommissioned), - } + }) } pub async fn sled_list( @@ -532,28 +515,6 @@ impl DataStore { } } -/// The result of [`DataStore::sled_upsert`]. -#[derive(Clone, Debug)] -#[must_use] -pub enum SledUpsertOutput { - /// The sled was updated. - Updated(Sled), - /// The sled was not updated because it is decommissioned. - Decommissioned, -} - -impl SledUpsertOutput { - /// Returns the sled if it was updated, or panics if it was not. - pub fn unwrap(self) -> Sled { - match self { - SledUpsertOutput::Updated(sled) => sled, - SledUpsertOutput::Decommissioned => { - panic!("sled was decommissioned, not updated") - } - } - } -} - // --- // State transition validators // --- @@ -713,6 +674,7 @@ mod test { use crate::db::model::SqlU32; use anyhow::{Context, Result}; use itertools::Itertools; + use nexus_db_model::Generation; use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Asset; use omicron_common::api::external; @@ -732,7 +694,7 @@ mod test { let mut sled_update = test_new_sled_update(); let observed_sled = - datastore.sled_upsert(sled_update.clone()).await.unwrap().unwrap(); + datastore.sled_upsert(sled_update.clone()).await.unwrap(); assert_eq!( observed_sled.usable_hardware_threads, sled_update.usable_hardware_threads @@ -760,12 +722,14 @@ mod test { .unwrap(), ); + // Bump the generation number so the insert succeeds. + sled_update.sled_agent_gen.0 = sled_update.sled_agent_gen.0.next(); + // Test that upserting the sled propagates those changes to the DB. let observed_sled = datastore .sled_upsert(sled_update.clone()) .await - .expect("Could not upsert sled during test prep") - .unwrap(); + .expect("Could not upsert sled during test prep"); assert_eq!( observed_sled.usable_hardware_threads, sled_update.usable_hardware_threads @@ -780,6 +744,70 @@ mod test { logctx.cleanup_successful(); } + #[tokio::test] + async fn upsert_sled_updates_fails_with_stale_sled_agent_gen() { + let logctx = dev::test_setup_log( + "upsert_sled_updates_fails_with_stale_sled_agent_gen", + ); + let mut db = test_setup_database(&logctx.log).await; + let (_opctx, datastore) = datastore_test(&logctx, &db).await; + + let mut sled_update = test_new_sled_update(); + let observed_sled = + datastore.sled_upsert(sled_update.clone()).await.unwrap(); + + assert_eq!(observed_sled.reservoir_size, sled_update.reservoir_size); + + // Modify the reservoir size + const MIB: u64 = 1024 * 1024; + + sled_update.reservoir_size = ByteCount::from( + external::ByteCount::try_from( + sled_update.reservoir_size.0.to_bytes() + MIB, + ) + .unwrap(), + ); + + // Fail the update, since the generation number didn't change. + assert!(datastore.sled_upsert(sled_update.clone()).await.is_err()); + + // Bump the generation number so the next insert succeeds. + sled_update.sled_agent_gen.0 = sled_update.sled_agent_gen.0.next(); + + // Test that upserting the sled propagates those changes to the DB. + let observed_sled = datastore + .sled_upsert(sled_update.clone()) + .await + .expect("Could not upsert sled during test prep"); + assert_eq!(observed_sled.reservoir_size, sled_update.reservoir_size); + + // Now reset the generation to a lower value and try again. + // This should fail. + let current_gen = sled_update.sled_agent_gen; + sled_update.sled_agent_gen = Generation::new(); + assert!(datastore.sled_upsert(sled_update.clone()).await.is_err()); + + // Now bump the generation from the saved `current_gen` + // Change the reservoir value again. This should succeed. + sled_update.reservoir_size = ByteCount::from( + external::ByteCount::try_from( + sled_update.reservoir_size.0.to_bytes() + MIB, + ) + .unwrap(), + ); + sled_update.sled_agent_gen.0 = current_gen.0.next(); + // Test that upserting the sled propagates those changes to the DB. + let observed_sled = datastore + .sled_upsert(sled_update.clone()) + .await + .expect("Could not upsert sled during test prep"); + assert_eq!(observed_sled.reservoir_size, sled_update.reservoir_size); + assert_eq!(observed_sled.sled_agent_gen, sled_update.sled_agent_gen); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + #[tokio::test] async fn upsert_sled_doesnt_update_decommissioned() { let logctx = @@ -789,7 +817,7 @@ mod test { let mut sled_update = test_new_sled_update(); let observed_sled = - datastore.sled_upsert(sled_update.clone()).await.unwrap().unwrap(); + datastore.sled_upsert(sled_update.clone()).await.unwrap(); assert_eq!( observed_sled.usable_hardware_threads, sled_update.usable_hardware_threads @@ -830,15 +858,8 @@ mod test { .unwrap(), ); - // Upserting the sled should produce the `Decommisioned` variant. - let sled = datastore - .sled_upsert(sled_update.clone()) - .await - .expect("updating a decommissioned sled should succeed"); - assert!( - matches!(sled, SledUpsertOutput::Decommissioned), - "sled should be decommissioned" - ); + // Upserting the sled should produce an error, because it is decommissioend. + assert!(datastore.sled_upsert(sled_update.clone()).await.is_err()); // The sled should not have been updated. let (_, observed_sled_2) = LookupPath::new(&opctx, &datastore) @@ -874,26 +895,14 @@ mod test { let (opctx, datastore) = datastore_test(&logctx, &db).await; // Define some sleds that resources cannot be provisioned on. - let non_provisionable_sled = datastore - .sled_upsert(test_new_sled_update()) - .await - .unwrap() - .unwrap(); - let expunged_sled = datastore - .sled_upsert(test_new_sled_update()) - .await - .unwrap() - .unwrap(); - let decommissioned_sled = datastore - .sled_upsert(test_new_sled_update()) - .await - .unwrap() - .unwrap(); - let illegal_decommissioned_sled = datastore - .sled_upsert(test_new_sled_update()) - .await - .unwrap() - .unwrap(); + let non_provisionable_sled = + datastore.sled_upsert(test_new_sled_update()).await.unwrap(); + let expunged_sled = + datastore.sled_upsert(test_new_sled_update()).await.unwrap(); + let decommissioned_sled = + datastore.sled_upsert(test_new_sled_update()).await.unwrap(); + let illegal_decommissioned_sled = + datastore.sled_upsert(test_new_sled_update()).await.unwrap(); let ineligible_sleds = IneligibleSleds { non_provisionable: non_provisionable_sled.id(), @@ -926,7 +935,7 @@ mod test { // Now add a provisionable sled and try again. let sled_update = test_new_sled_update(); let provisionable_sled = - datastore.sled_upsert(sled_update.clone()).await.unwrap().unwrap(); + datastore.sled_upsert(sled_update.clone()).await.unwrap(); // Try a few times to ensure that resources never get allocated to the // non-provisionable sled. @@ -1048,11 +1057,7 @@ mod test { .enumerate(); // Set up a sled to test against. - let sled = datastore - .sled_upsert(test_new_sled_update()) - .await - .unwrap() - .unwrap(); + let sled = datastore.sled_upsert(test_new_sled_update()).await.unwrap(); let sled_id = sled.id(); for (i, ((policy, state), after)) in all_transitions { @@ -1173,6 +1178,7 @@ mod test { sled_baseboard_for_test(), sled_system_hardware_for_test(), rack_id(), + Generation::new(), ) } diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index 97d8324fdb..5e677b98be 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -138,6 +138,7 @@ pub(crate) async fn ensure_crucible_dataset_records_exist( #[cfg(test)] mod tests { use super::*; + use nexus_db_model::Generation; use nexus_db_model::SledBaseboard; use nexus_db_model::SledSystemHardware; use nexus_db_model::SledUpdate; @@ -184,12 +185,9 @@ mod tests { reservoir_size: (16 << 30).try_into().unwrap(), }, rack_id, + Generation::new(), ); - datastore - .sled_upsert(sled) - .await - .expect("failed to upsert sled") - .unwrap(); + datastore.sled_upsert(sled).await.expect("failed to upsert sled"); for zone in &config.zones.zones { let OmicronZoneType::Crucible { dataset, .. } = &zone.zone_type diff --git a/nexus/src/app/background/blueprint_execution.rs b/nexus/src/app/background/blueprint_execution.rs index 3c2530a3d3..ceb3cd7a05 100644 --- a/nexus/src/app/background/blueprint_execution.rs +++ b/nexus/src/app/background/blueprint_execution.rs @@ -191,12 +191,12 @@ mod test { reservoir_size: ByteCount(999.into()), }, rack_id, + nexus_db_model::Generation::new(), ); datastore .sled_upsert(update) .await - .expect("Failed to insert sled to db") - .unwrap(); + .expect("Failed to insert sled to db"); } let (blueprint_tx, blueprint_rx) = watch::channel(None); diff --git a/nexus/src/app/background/inventory_collection.rs b/nexus/src/app/background/inventory_collection.rs index c0d64d554a..27f08ec738 100644 --- a/nexus/src/app/background/inventory_collection.rs +++ b/nexus/src/app/background/inventory_collection.rs @@ -194,6 +194,7 @@ mod test { use crate::app::background::common::BackgroundTask; use crate::app::background::inventory_collection::DbSledAgentEnumerator; use crate::app::background::inventory_collection::InventoryCollector; + use nexus_db_model::Generation; use nexus_db_model::SledBaseboard; use nexus_db_model::SledSystemHardware; use nexus_db_model::SledUpdate; @@ -337,8 +338,9 @@ mod test { reservoir_size: ByteCount::from_gibibytes_u32(8).into(), }, rack_id, + Generation::new(), ); - sleds.push(datastore.sled_upsert(sled).await.unwrap().unwrap()); + sleds.push(datastore.sled_upsert(sled).await.unwrap()); } // The same enumerator should immediately find all the new sleds. diff --git a/nexus/src/app/sled.rs b/nexus/src/app/sled.rs index a9341472e0..88f70c7d0d 100644 --- a/nexus/src/app/sled.rs +++ b/nexus/src/app/sled.rs @@ -5,13 +5,12 @@ //! Sleds, and the hardware and services within them. use crate::internal_api::params::{ - PhysicalDiskDeleteRequest, PhysicalDiskPutRequest, SledAgentStartupInfo, - SledRole, ZpoolPutRequest, + PhysicalDiskDeleteRequest, PhysicalDiskPutRequest, SledAgentInfo, SledRole, + ZpoolPutRequest, }; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; -use nexus_db_queries::db::datastore::SledUpsertOutput; use nexus_db_queries::db::lookup; use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::db::model::DatasetKind; @@ -45,7 +44,7 @@ impl super::Nexus { &self, _opctx: &OpContext, id: Uuid, - info: SledAgentStartupInfo, + info: SledAgentInfo, ) -> Result<(), Error> { info!(self.log, "registered sled agent"; "sled_uuid" => id.to_string()); @@ -69,21 +68,9 @@ impl super::Nexus { reservoir_size: info.reservoir_size.into(), }, self.rack_id, + info.generation.into(), ); - match self.db_datastore.sled_upsert(sled).await? { - SledUpsertOutput::Updated(_) => {} - SledUpsertOutput::Decommissioned => { - // We currently don't bubble up errors for decommissioned sleds - // -- if it ever happens, a decommissioned sled-agent doesn't - // know about that. - warn!( - self.log, - "decommissioned sled-agent reached out for upserts"; - "sled_uuid" => id.to_string() - ); - } - } - + self.db_datastore.sled_upsert(sled).await?; Ok(()) } diff --git a/nexus/src/internal_api/http_entrypoints.rs b/nexus/src/internal_api/http_entrypoints.rs index 5702539a4b..e298935fee 100644 --- a/nexus/src/internal_api/http_entrypoints.rs +++ b/nexus/src/internal_api/http_entrypoints.rs @@ -8,7 +8,7 @@ use crate::ServerContext; use super::params::{ OximeterInfo, PhysicalDiskDeleteRequest, PhysicalDiskPutRequest, - PhysicalDiskPutResponse, RackInitializationRequest, SledAgentStartupInfo, + PhysicalDiskPutResponse, RackInitializationRequest, SledAgentInfo, ZpoolPutRequest, ZpoolPutResponse, }; use dropshot::endpoint; @@ -58,6 +58,7 @@ type NexusApiDescription = ApiDescription>; /// Returns a description of the internal nexus API pub(crate) fn internal_api() -> NexusApiDescription { fn register_endpoints(api: &mut NexusApiDescription) -> Result<(), String> { + api.register(sled_agent_get)?; api.register(sled_agent_put)?; api.register(sled_firewall_rules_request)?; api.register(switch_put)?; @@ -109,6 +110,27 @@ struct SledAgentPathParam { sled_id: Uuid, } +/// Return information about the given sled agent +#[endpoint { + method = GET, + path = "/sled-agents/{sled_id}", + }] +async fn sled_agent_get( + rqctx: RequestContext>, + path_params: Path, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let nexus = &apictx.nexus; + let opctx = crate::context::op_context_for_internal_api(&rqctx).await; + let path = path_params.into_inner(); + let sled_id = &path.sled_id; + let handler = async { + let (.., sled) = nexus.sled_lookup(&opctx, sled_id)?.fetch().await?; + Ok(HttpResponseOk(sled.into())) + }; + apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + /// Report that the sled agent for the specified sled has come online. #[endpoint { method = POST, @@ -117,7 +139,7 @@ struct SledAgentPathParam { async fn sled_agent_put( rqctx: RequestContext>, path_params: Path, - sled_info: TypedBody, + sled_info: TypedBody, ) -> Result { let apictx = rqctx.context(); let nexus = &apictx.nexus; diff --git a/nexus/tests/integration_tests/rack.rs b/nexus/tests/integration_tests/rack.rs index 2098ff660d..1148655195 100644 --- a/nexus/tests/integration_tests/rack.rs +++ b/nexus/tests/integration_tests/rack.rs @@ -13,9 +13,10 @@ use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; use nexus_types::external_api::shared::UninitializedSled; use nexus_types::external_api::views::Rack; -use nexus_types::internal_api::params::SledAgentStartupInfo; +use nexus_types::internal_api::params::SledAgentInfo; use nexus_types::internal_api::params::SledRole; use omicron_common::api::external::ByteCount; +use omicron_common::api::external::Generation; use omicron_nexus::TestInterfaces; use uuid::Uuid; @@ -108,13 +109,15 @@ async fn test_sled_list_uninitialized(cptestctx: &ControlPlaneTestContext) { // Just pick some random fields other than `baseboard` let baseboard = uninitialized_sleds.pop().unwrap().baseboard; let sled_uuid = Uuid::new_v4(); - let sa = SledAgentStartupInfo { + let sa = SledAgentInfo { sa_address: "[fd00:1122:3344:0100::1]:8080".parse().unwrap(), role: SledRole::Gimlet, baseboard, usable_hardware_threads: 32, usable_physical_ram: ByteCount::from_gibibytes_u32(100), reservoir_size: ByteCount::from_mebibytes_u32(100), + generation: Generation::new(), + decommissioned: false, }; internal_client .make_request( diff --git a/nexus/types/src/internal_api/params.rs b/nexus/types/src/internal_api/params.rs index 31ea86907a..e8c4703008 100644 --- a/nexus/types/src/internal_api/params.rs +++ b/nexus/types/src/internal_api/params.rs @@ -9,6 +9,7 @@ use crate::external_api::params::UserId; use crate::external_api::shared::Baseboard; use crate::external_api::shared::IpRange; use omicron_common::api::external::ByteCount; +use omicron_common::api::external::Generation; use omicron_common::api::external::MacAddr; use omicron_common::api::external::Name; use omicron_common::api::internal::shared::ExternalPortDiscovery; @@ -36,9 +37,9 @@ pub enum SledRole { Scrimlet, } -/// Sent by a sled agent on startup to Nexus to request further instruction +/// Sent by a sled agent to Nexus to inform about resources #[derive(Serialize, Deserialize, Debug, JsonSchema)] -pub struct SledAgentStartupInfo { +pub struct SledAgentInfo { /// The address of the sled agent's API endpoint pub sa_address: SocketAddrV6, @@ -58,6 +59,15 @@ pub struct SledAgentStartupInfo { /// /// Must be smaller than "usable_physical_ram" pub reservoir_size: ByteCount, + + /// The generation number of this request from sled-agent + pub generation: Generation, + + /// Whether the sled-agent has been decommissioned by nexus + /// + /// This flag is only set to true by nexus. Setting it on an upsert from + /// sled-agent has no effect. + pub decommissioned: bool, } #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 65c167f177..fb3ff976ae 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -798,6 +798,39 @@ } }, "/sled-agents/{sled_id}": { + "get": { + "summary": "Return information about the given sled agent", + "operationId": "sled_agent_get", + "parameters": [ + { + "in": "path", + "name": "sled_id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledAgentInfo" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, "post": { "summary": "Report that the sled agent for the specified sled has come online.", "operationId": "sled_agent_put", @@ -816,7 +849,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SledAgentStartupInfo" + "$ref": "#/components/schemas/SledAgentInfo" } } }, @@ -6394,8 +6427,8 @@ "sled_id" ] }, - "SledAgentStartupInfo": { - "description": "Sent by a sled agent on startup to Nexus to request further instruction", + "SledAgentInfo": { + "description": "Sent by a sled agent to Nexus to inform about resources", "type": "object", "properties": { "baseboard": { @@ -6406,6 +6439,18 @@ } ] }, + "decommissioned": { + "description": "Whether the sled-agent has been decommissioned by nexus\n\nThis flag is only set to true by nexus. Setting it on an upsert from sled-agent has no effect.", + "type": "boolean" + }, + "generation": { + "description": "The generation number of this request from sled-agent", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, "reservoir_size": { "description": "Amount of RAM dedicated to the VMM reservoir\n\nMust be smaller than \"usable_physical_ram\"", "allOf": [ @@ -6443,6 +6488,8 @@ }, "required": [ "baseboard", + "decommissioned", + "generation", "reservoir_size", "role", "sa_address", diff --git a/schema/crdb/39.0.0/up.sql b/schema/crdb/39.0.0/up.sql new file mode 100644 index 0000000000..d11fabb297 --- /dev/null +++ b/schema/crdb/39.0.0/up.sql @@ -0,0 +1,4 @@ +-- Sled Agent upserts are now conditional on a generation number +ALTER TABLE omicron.public.sled +ADD COLUMN IF NOT EXISTS sled_agent_gen INT8 NOT NULL + DEFAULT 1; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 40a6fd463f..79b6131d85 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -147,6 +147,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.sled ( /* The actual state of the sled, updated exclusively by Nexus */ sled_state omicron.public.sled_state NOT NULL, + /* Generation number owned and incremented by the sled-agent */ + sled_agent_gen INT8 NOT NULL DEFAULT 1, + -- This constraint should be upheld, even for deleted disks -- in the fleet. CONSTRAINT serial_part_revision_unique UNIQUE ( @@ -3552,7 +3555,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '38.0.0', NULL) + ( TRUE, NOW(), NOW(), '39.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/sled-agent/Cargo.toml b/sled-agent/Cargo.toml index d61f2d09e4..7f46a277ce 100644 --- a/sled-agent/Cargo.toml +++ b/sled-agent/Cargo.toml @@ -28,6 +28,7 @@ dpd-client.workspace = true display-error-chain.workspace = true dropshot.workspace = true flate2.workspace = true +flume.workspace = true futures.workspace = true glob.workspace = true hex.workspace = true diff --git a/sled-agent/src/fakes/nexus.rs b/sled-agent/src/fakes/nexus.rs index 5ba4b6c501..5920d40f6c 100644 --- a/sled-agent/src/fakes/nexus.rs +++ b/sled-agent/src/fakes/nexus.rs @@ -8,13 +8,17 @@ //! to operate correctly. use dropshot::{ - endpoint, ApiDescription, FreeformBody, HttpError, HttpResponseOk, Path, - RequestContext, + endpoint, ApiDescription, FreeformBody, HttpError, HttpResponseOk, + HttpResponseUpdatedNoContent, Path, RequestContext, TypedBody, }; use hyper::Body; use internal_dns::ServiceName; +use nexus_client::types::SledAgentInfo; use omicron_common::api::external::Error; use omicron_common::api::internal::nexus::UpdateArtifactId; +use schemars::JsonSchema; +use serde::Deserialize; +use uuid::Uuid; /// Implements a fake Nexus. /// @@ -28,6 +32,18 @@ pub trait FakeNexusServer: Send + Sync { ) -> Result, Error> { Err(Error::internal_error("Not implemented")) } + + fn sled_agent_get(&self, _sled_id: Uuid) -> Result { + Err(Error::internal_error("Not implemented")) + } + + fn sled_agent_put( + &self, + _sled_id: Uuid, + _info: SledAgentInfo, + ) -> Result<(), Error> { + Err(Error::internal_error("Not implemented")) + } } /// Describes the server context type. @@ -52,9 +68,50 @@ async fn cpapi_artifact_download( )) } +/// Path parameters for Sled Agent requests (internal API) +#[derive(Deserialize, JsonSchema)] +struct SledAgentPathParam { + sled_id: Uuid, +} + +/// Return information about the given sled agent +#[endpoint { + method = GET, + path = "/sled-agents/{sled_id}", + }] +async fn sled_agent_get( + request_context: RequestContext, + path_params: Path, +) -> Result, HttpError> { + let context = request_context.context(); + + Ok(HttpResponseOk( + context.sled_agent_get(path_params.into_inner().sled_id)?, + )) +} + +#[endpoint { + method = POST, + path = "/sled-agents/{sled_id}", + }] +async fn sled_agent_put( + request_context: RequestContext, + path_params: Path, + sled_info: TypedBody, +) -> Result { + let context = request_context.context(); + context.sled_agent_put( + path_params.into_inner().sled_id, + sled_info.into_inner(), + )?; + Ok(HttpResponseUpdatedNoContent()) +} + fn api() -> ApiDescription { let mut api = ApiDescription::new(); api.register(cpapi_artifact_download).unwrap(); + api.register(sled_agent_get).unwrap(); + api.register(sled_agent_put).unwrap(); api } diff --git a/sled-agent/src/hardware_monitor.rs b/sled-agent/src/hardware_monitor.rs index 698d2d4608..5dcd060f98 100644 --- a/sled-agent/src/hardware_monitor.rs +++ b/sled-agent/src/hardware_monitor.rs @@ -172,7 +172,7 @@ impl HardwareMonitor { } HardwareUpdate::TofinoDeviceChange => { if let Some(sled_agent) = &mut self.sled_agent { - sled_agent.notify_nexus_about_self(&self.log); + sled_agent.notify_nexus_about_self(&self.log).await; } } HardwareUpdate::DiskAdded(disk) => { @@ -234,10 +234,12 @@ impl HardwareMonitor { // We use this when we're monitoring hardware for the first // time, and if we miss notifications. async fn check_latest_hardware_snapshot(&mut self) { - let underlay_network = self.sled_agent.as_ref().map(|sled_agent| { - sled_agent.notify_nexus_about_self(&self.log); - sled_agent.switch_zone_underlay_info() - }); + let underlay_network = if let Some(sled_agent) = &self.sled_agent { + sled_agent.notify_nexus_about_self(&self.log).await; + Some(sled_agent.switch_zone_underlay_info()) + } else { + None + }; info!( self.log, "Checking current full hardware snapshot"; "underlay_network_info" => ?underlay_network, diff --git a/sled-agent/src/instance_manager.rs b/sled-agent/src/instance_manager.rs index 666d970538..badaf2d7ba 100644 --- a/sled-agent/src/instance_manager.rs +++ b/sled-agent/src/instance_manager.rs @@ -14,16 +14,16 @@ use crate::params::{ InstanceHardware, InstanceMigrationSourceParams, InstancePutStateResponse, InstanceStateRequested, InstanceUnregisterResponse, }; +use crate::vmm_reservoir::VmmReservoirManagerHandle; use crate::zone_bundle::BundleError; use crate::zone_bundle::ZoneBundler; +use omicron_common::api::external::ByteCount; use anyhow::anyhow; use illumos_utils::dladm::Etherstub; use illumos_utils::link::VnicAllocator; use illumos_utils::opte::PortManager; use illumos_utils::running_zone::ZoneBuilderFactory; -use illumos_utils::vmm_reservoir; -use omicron_common::api::external::ByteCount; use omicron_common::api::internal::nexus::InstanceRuntimeState; use omicron_common::api::internal::nexus::SledInstanceState; use omicron_common::api::internal::nexus::VmmRuntimeState; @@ -31,7 +31,7 @@ use sled_storage::manager::StorageHandle; use slog::Logger; use std::collections::BTreeMap; use std::net::SocketAddr; -use std::sync::{Arc, Mutex}; +use std::sync::Arc; use tokio::sync::{mpsc, oneshot}; use uuid::Uuid; @@ -49,12 +49,6 @@ pub enum Error { #[error("OPTE port management error: {0}")] Opte(#[from] illumos_utils::opte::Error), - #[error("Failed to create reservoir: {0}")] - Reservoir(#[from] vmm_reservoir::Error), - - #[error("Invalid reservoir configuration: {0}")] - ReservoirConfig(String), - #[error("Cannot find data link: {0}")] Underlay(#[from] sled_hardware::underlay::Error), @@ -73,12 +67,6 @@ pub enum Error { RequestDropped(#[from] oneshot::error::RecvError), } -pub enum ReservoirMode { - None, - Size(u32), - Percentage(u8), -} - pub(crate) struct InstanceManagerServices { pub nexus_client: NexusClientWithResolver, pub vnic_allocator: VnicAllocator, @@ -91,14 +79,8 @@ pub(crate) struct InstanceManagerServices { // Describes the internals of the "InstanceManager", though most of the // instance manager's state exists within the "InstanceManagerRunner" structure. struct InstanceManagerInternal { - log: Logger, tx: mpsc::Sender, - // NOTE: Arguably, this field could be "owned" by the InstanceManagerRunner. - // It was not moved there, and the reservoir functions were not converted to - // use the message-passing interface (see: "InstanceManagerRequest") because - // callers of "get/set reservoir size" are not async, and (in the case of - // getting the size) they also do not expect a "Result" type. - reservoir_size: Mutex, + vmm_reservoir_manager: VmmReservoirManagerHandle, #[allow(dead_code)] runner_handle: tokio::task::JoinHandle<()>, @@ -111,6 +93,7 @@ pub struct InstanceManager { impl InstanceManager { /// Initializes a new [`InstanceManager`] object. + #[allow(clippy::too_many_arguments)] pub fn new( log: Logger, nexus_client: NexusClientWithResolver, @@ -119,6 +102,7 @@ impl InstanceManager { storage: StorageHandle, zone_bundler: ZoneBundler, zone_builder_factory: ZoneBuilderFactory, + vmm_reservoir_manager: VmmReservoirManagerHandle, ) -> Result { let (tx, rx) = mpsc::channel(QUEUE_SIZE); let (terminate_tx, terminate_rx) = mpsc::unbounded_channel(); @@ -143,91 +127,13 @@ impl InstanceManager { Ok(Self { inner: Arc::new(InstanceManagerInternal { - log, tx, - // no reservoir size set on startup - reservoir_size: Mutex::new(ByteCount::from_kibibytes_u32(0)), + vmm_reservoir_manager, runner_handle, }), }) } - /// Sets the VMM reservoir to the requested percentage of usable physical - /// RAM or to a size in MiB. Either mode will round down to the nearest - /// aligned size required by the control plane. - pub fn set_reservoir_size( - &self, - hardware: &sled_hardware::HardwareManager, - mode: ReservoirMode, - ) -> Result<(), Error> { - let hardware_physical_ram_bytes = hardware.usable_physical_ram_bytes(); - let req_bytes = match mode { - ReservoirMode::None => return Ok(()), - ReservoirMode::Size(mb) => { - let bytes = ByteCount::from_mebibytes_u32(mb).to_bytes(); - if bytes > hardware_physical_ram_bytes { - return Err(Error::ReservoirConfig(format!( - "cannot specify a reservoir of {bytes} bytes when \ - physical memory is {hardware_physical_ram_bytes} bytes", - ))); - } - bytes - } - ReservoirMode::Percentage(percent) => { - if !matches!(percent, 1..=99) { - return Err(Error::ReservoirConfig(format!( - "VMM reservoir percentage of {} must be between 0 and \ - 100", - percent - ))); - }; - (hardware_physical_ram_bytes as f64 * (percent as f64 / 100.0)) - .floor() as u64 - } - }; - - let req_bytes_aligned = vmm_reservoir::align_reservoir_size(req_bytes); - - if req_bytes_aligned == 0 { - warn!( - self.inner.log, - "Requested reservoir size of {} bytes < minimum aligned size \ - of {} bytes", - req_bytes, - vmm_reservoir::RESERVOIR_SZ_ALIGN - ); - return Ok(()); - } - - // The max ByteCount value is i64::MAX, which is ~8 million TiB. - // As this value is either a percentage of DRAM or a size in MiB - // represented as a u32, constructing this should always work. - let reservoir_size = ByteCount::try_from(req_bytes_aligned).unwrap(); - if let ReservoirMode::Percentage(percent) = mode { - info!( - self.inner.log, - "{}% of {} physical ram = {} bytes)", - percent, - hardware_physical_ram_bytes, - req_bytes, - ); - } - info!( - self.inner.log, - "Setting reservoir size to {reservoir_size} bytes" - ); - vmm_reservoir::ReservoirControl::set(reservoir_size)?; - - *self.inner.reservoir_size.lock().unwrap() = reservoir_size; - - Ok(()) - } - - /// Returns the last-set size of the reservoir - pub fn reservoir_size(&self) -> ByteCount { - *self.inner.reservoir_size.lock().unwrap() - } - #[allow(clippy::too_many_arguments)] pub async fn ensure_registered( &self, @@ -383,6 +289,11 @@ impl InstanceManager { .map_err(|_| Error::FailedSendInstanceManagerClosed)?; rx.await? } + + /// Returns the last-set size of the reservoir + pub fn reservoir_size(&self) -> ByteCount { + self.inner.vmm_reservoir_manager.reservoir_size() + } } // Most requests that can be sent to the "InstanceManagerRunner" task. diff --git a/sled-agent/src/lib.rs b/sled-agent/src/lib.rs index 527b483ee8..bfc23b248d 100644 --- a/sled-agent/src/lib.rs +++ b/sled-agent/src/lib.rs @@ -40,6 +40,7 @@ mod smf_helper; mod storage_monitor; mod swap_device; mod updates; +mod vmm_reservoir; mod zone_bundle; #[cfg(test)] diff --git a/sled-agent/src/nexus.rs b/sled-agent/src/nexus.rs index 4cd97d9ba8..7e7d60f6a4 100644 --- a/sled-agent/src/nexus.rs +++ b/sled-agent/src/nexus.rs @@ -3,16 +3,20 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. pub use nexus_client::Client as NexusClient; +use omicron_common::api::external::Generation; +use crate::vmm_reservoir::VmmReservoirManagerHandle; use internal_dns::resolver::{ResolveError, Resolver}; use internal_dns::ServiceName; +use nexus_client::types::SledAgentInfo; use omicron_common::address::NEXUS_INTERNAL_PORT; +use sled_hardware::HardwareManager; use slog::Logger; -use std::future::Future; -use std::pin::Pin; +use std::net::SocketAddrV6; use std::sync::Arc; -use tokio::sync::mpsc; -use tokio::task::JoinHandle; +use tokio::sync::{broadcast, mpsc, oneshot, Notify}; +use tokio::time::{interval, Duration, MissedTickBehavior}; +use uuid::Uuid; /// A thin wrapper over a progenitor-generated NexusClient. /// @@ -72,45 +76,6 @@ impl NexusClientWithResolver { } } -type NexusRequestFut = dyn Future + Send; -type NexusRequest = Pin>; - -/// A queue of futures which represent requests to Nexus. -pub struct NexusRequestQueue { - tx: mpsc::UnboundedSender, - _worker: JoinHandle<()>, -} - -impl NexusRequestQueue { - /// Creates a new request queue, along with a worker which executes - /// any incoming tasks. - pub fn new() -> Self { - // TODO(https://github.com/oxidecomputer/omicron/issues/1917): - // In the future, this should basically just be a wrapper around a - // generation number, and we shouldn't be serializing requests to Nexus. - // - // In the meanwhile, we're using an unbounded_channel for simplicity, so - // that we don't need to cope with dropped notifications / - // retransmissions. - let (tx, mut rx) = mpsc::unbounded_channel(); - - let _worker = tokio::spawn(async move { - while let Some(fut) = rx.recv().await { - fut.await; - } - }); - - Self { tx, _worker } - } - - /// Gets access to the sending portion of the request queue. - /// - /// Callers can use this to add their own requests. - pub fn sender(&self) -> &mpsc::UnboundedSender { - &self.tx - } -} - pub fn d2n_params( params: &dns_service_client::types::DnsConfigParams, ) -> nexus_client::types::DnsConfigParams { @@ -202,3 +167,846 @@ impl ConvertInto } } } + +// Somewhat arbitrary bound size, large enough that we should never hit it. +const QUEUE_SIZE: usize = 256; + +enum NexusNotifierMsg { + // Inform nexus about a change to this sled-agent. This is just a + // notification to perform a send. The request is constructed inside + // `NexusNotifierTask`. + NotifyNexusAboutSelf, + + // Return status of the `NexusNotifierTask` + Status(oneshot::Sender), +} + +#[derive(Debug)] +pub struct NexusNotifierTaskStatus { + pub nexus_known_info: Option, + pub has_pending_notification: bool, + pub has_outstanding_request: bool, + pub total_get_requests_started: u64, + pub total_get_requests_completed: u64, + pub total_put_requests_started: u64, + pub total_put_requests_completed: u64, + pub cancelled_pending_notifications: u64, +} + +#[derive(Debug)] +pub struct NexusNotifierHandle { + tx: mpsc::Sender, +} + +#[derive(Debug)] +pub struct SenderOrReceiverDropped {} + +impl NexusNotifierHandle { + pub async fn notify_nexus_about_self(&self, log: &Logger) { + if let Err(_) = + self.tx.send(NexusNotifierMsg::NotifyNexusAboutSelf).await + { + warn!(log, "Failed to send to NexusNotifierTask: did it exit?"); + } + } + + #[allow(unused)] + pub async fn get_status( + &self, + ) -> Result { + let (tx, rx) = oneshot::channel(); + self.tx + .send(NexusNotifierMsg::Status(tx)) + .await + .map_err(|_| SenderOrReceiverDropped {})?; + rx.await.map_err(|_| SenderOrReceiverDropped {}) + } +} + +// The type of operation issued to nexus +enum NexusOp { + Get, + Put, +} + +/// What sled-agent has confirmed that Nexus knows about this sled-agent +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum NexusKnownInfo { + // CRDB doesn't contain a record for this sled-agent + NotFound, + Found(SledAgentInfo), +} + +impl NexusKnownInfo { + fn generation(&self) -> Generation { + match self { + NexusKnownInfo::NotFound => Generation::new(), + NexusKnownInfo::Found(known) => known.generation, + } + } +} + +/// Return the latest sled-agent info. Boxed for use in testing. +type GetSledAgentInfo = Box SledAgentInfo + Send>; + +// A mechanism owned by the `NexusNotifierTask` that allows it to access +// enough information to send a `SledAgentInfo` to Nexus. +pub struct NexusNotifierInput { + pub sled_id: Uuid, + pub sled_address: SocketAddrV6, + pub nexus_client: NexusClient, + pub hardware: HardwareManager, + pub vmm_reservoir_manager: VmmReservoirManagerHandle, +} + +// Type returned from a join on the "outstanding request" task` +type NexusRsp = ( + NexusOp, + Result>, +); + +/// A mechanism for notifying nexus about this sled agent +/// +/// The semantics are as follows: +/// 1. At any time there is a single outstanding HTTP request to nexus +/// 2. On startup, this task gets the latest sled-agent info, if any, from +/// nexus, and saves it. +/// 3. Whenever the state needs to be updated to a value different +/// from what nexus has, the generation number is bumped +/// and the new state transmitted. +/// 4. If a caller requests an update to be made to nexus and it succeeds +/// the known value is set to what was updated. +/// 5. If the request fails, we go ahead and set the known state to `None`. +/// This will trigger a get request for the latest state. In the case of +/// a timeout we are not sure if the last update succeeded and so need to +/// learn that. In the case of an explicit rejection from nexus, we also +/// want to learn the latest state. We need to learn the latest state this +/// because an update can fail if the sled-agent's state is out of date +/// with respect to what's in CRDB. The only way to fix that is to get the +/// latest known state, and it's easiest to just do that unconditionally +/// rather than trying to reason about exactly why Nexus rejected the +/// request. +/// 6. An exception to step 5 is that if the latest state returned from Nexus +/// contains `decommissioned = true`, then we stop dead in our tracks and no +/// longer send requests to nexus. +pub struct NexusNotifierTask { + sled_id: Uuid, + nexus_client: NexusClient, + get_sled_agent_info: GetSledAgentInfo, + + // Notifies when the VMM reservoir size changes. + // + // This is an option, since we don't use a reservoir during testing. There + // really isn't a better place to put this in sled-agent, and this task is + // the only one that cares about updates. + vmm_reservoir_size_updated: Option>, + + log: Logger, + rx: mpsc::Receiver, + + // The last known value either put or gotten from nexus + // + // We only send `Get` requests if we haven't learned any info yet + nexus_known_info: Option, + + // Do we need to notify nexus about an update to our state? + pending_notification: bool, + + // We only have one outstanding nexus request at a time. + // + // We spawn a task to manage this request so we don't block our main + // notifier task. We wait for a notification on `outstanding_req_ready` + // and then get the result from the `JoinHandle`. + outstanding_request: Option>, + + // A notification sent from the outstanding task when it has completed. + outstanding_req_ready: Arc, + + // Some stats for testing/debugging + total_get_requests_started: u64, + total_put_requests_started: u64, + total_get_requests_completed: u64, + total_put_requests_completed: u64, + cancelled_pending_notifications: u64, +} + +impl NexusNotifierTask { + pub fn new( + input: NexusNotifierInput, + log: &Logger, + ) -> (NexusNotifierTask, NexusNotifierHandle) { + let NexusNotifierInput { + sled_id, + sled_address, + nexus_client, + hardware, + vmm_reservoir_manager, + } = input; + + let vmm_reservoir_size_updated = + Some(vmm_reservoir_manager.subscribe_for_size_updates()); + + // Box a function that can return the latest `SledAgentInfo` + let get_sled_agent_info = Box::new(move |generation| { + let role = if hardware.is_scrimlet() { + nexus_client::types::SledRole::Scrimlet + } else { + nexus_client::types::SledRole::Gimlet + }; + SledAgentInfo { + sa_address: sled_address.to_string(), + role, + baseboard: hardware.baseboard().convert(), + usable_hardware_threads: hardware.online_processor_count(), + usable_physical_ram: hardware + .usable_physical_ram_bytes() + .into(), + reservoir_size: vmm_reservoir_manager.reservoir_size().into(), + generation, + decommissioned: false, + } + }); + + let (tx, rx) = mpsc::channel(QUEUE_SIZE); + ( + NexusNotifierTask { + sled_id, + nexus_client, + get_sled_agent_info, + vmm_reservoir_size_updated, + log: log.new(o!("component" => "NexusNotifierTask")), + rx, + nexus_known_info: None, + // We start with pending true, because we always want to attempt + // to retrieve the current generation number before we upsert + // ourselves. + pending_notification: true, + outstanding_request: None, + outstanding_req_ready: Arc::new(Notify::new()), + total_get_requests_started: 0, + total_put_requests_started: 0, + total_get_requests_completed: 0, + total_put_requests_completed: 0, + cancelled_pending_notifications: 0, + }, + NexusNotifierHandle { tx }, + ) + } + + #[cfg(test)] + pub fn new_for_test( + sled_id: Uuid, + nexus_client: NexusClient, + get_sled_agent_info: GetSledAgentInfo, + log: &Logger, + ) -> (NexusNotifierTask, NexusNotifierHandle) { + let (tx, rx) = mpsc::channel(QUEUE_SIZE); + + // During testing we don't actually have a reservoir. Just dummy it out. + let vmm_reservoir_size_updated = None; + + ( + NexusNotifierTask { + sled_id, + nexus_client, + get_sled_agent_info, + vmm_reservoir_size_updated, + log: log.new(o!("component" => "NexusNotifierTask")), + rx, + nexus_known_info: None, + // We start with pending true, because we always want to attempt + // to retrieve the current generation number before we upsert + // ourselves. + pending_notification: true, + outstanding_request: None, + outstanding_req_ready: Arc::new(Notify::new()), + total_get_requests_started: 0, + total_put_requests_started: 0, + total_get_requests_completed: 0, + total_put_requests_completed: 0, + cancelled_pending_notifications: 0, + }, + NexusNotifierHandle { tx }, + ) + } + + /// Run the main receive loop of the `NexusNotifierTask` + /// + /// This should be spawned into a tokio task + pub async fn run(mut self) { + let (_tx, mut vmm_size_updated) = if let Some(vmm_size_updated) = + self.vmm_reservoir_size_updated.take() + { + (None, vmm_size_updated) + } else { + // Dummy channel for testing + let (tx, rx) = broadcast::channel(1); + (Some(tx), rx) + }; + + loop { + const RETRY_TIMEOUT: Duration = Duration::from_secs(2); + let mut interval = interval(RETRY_TIMEOUT); + interval.set_missed_tick_behavior(MissedTickBehavior::Delay); + tokio::select! { + req = self.rx.recv() => { + let Some(req) = req else { + warn!(self.log, "All senders dropped. Exiting."); + break; + }; + match req { + NexusNotifierMsg::NotifyNexusAboutSelf => { + // We'll contact nexus on the next timeout + self.pending_notification = true; + } + NexusNotifierMsg::Status(reply_tx) => { + let _ = reply_tx.send(NexusNotifierTaskStatus { + nexus_known_info: self.nexus_known_info.clone(), + has_outstanding_request: self.outstanding_request.is_some(), + has_pending_notification: self.pending_notification, + total_get_requests_started: self.total_get_requests_started, + total_put_requests_started: self.total_put_requests_started, + total_get_requests_completed: self.total_get_requests_completed, + total_put_requests_completed: self.total_put_requests_completed, + cancelled_pending_notifications: self.cancelled_pending_notifications + }); + } + + } + } + _ = vmm_size_updated.recv() => { + self.pending_notification = true; + } + _ = self.outstanding_req_ready.notified() => { + // Our request task has completed. Let's check the result. + self.handle_nexus_reply().await; + + } + _ = interval.tick(), if self.pending_notification => { + self.contact_nexus().await; + } + } + } + } + + /// If we haven't yet learned the latest info that nexus has about us + /// then go ahead and send a get request. Otherwise, if necessaary, send + /// a put request to nexus with the latest `SledAgentInfo`. + /// + /// Only one outstanding request is allowed at a time, so if there is + /// already one outstanding then we return and will try again later. + async fn contact_nexus(&mut self) { + // Is there already an outstanding request to nexus? + if self.outstanding_request.is_some() { + return; + } + + let client = self.nexus_client.clone(); + let sled_id = self.sled_id; + + // Have we learned about any generations stored in CRDB yet? + if let Some(known_info) = &self.nexus_known_info { + let mut info = (self.get_sled_agent_info)(known_info.generation()); + + // Does CRDB actually contain an existing record for this sled? + match known_info { + NexusKnownInfo::NotFound => { + // Nothing to do. We must send the request as is. + } + NexusKnownInfo::Found(known) => { + // Don't send any more requests if this sled-agent has been decommissioned. + if known.decommissioned { + return; + } + + // We don't need to send a request if the info is identical to what + // nexus knows + if info == *known { + self.pending_notification = false; + self.cancelled_pending_notifications += 1; + return; + } + + // Bump the generation of the value that nexus knows, so + // that the update takes precedence. + info.generation = known.generation.next(); + } + } + + let outstanding_req_ready = self.outstanding_req_ready.clone(); + self.total_put_requests_started += 1; + self.outstanding_request = Some(tokio::spawn(async move { + let res = + client.sled_agent_put(&sled_id, &info).await.map(|_| info); + outstanding_req_ready.notify_one(); + (NexusOp::Put, res) + })); + } else { + let outstanding_req_ready = self.outstanding_req_ready.clone(); + self.total_get_requests_started += 1; + self.outstanding_request = Some(tokio::spawn(async move { + let res = client + .sled_agent_get(&sled_id) + .await + .map(|info| info.into_inner()); + outstanding_req_ready.notify_one(); + (NexusOp::Get, res) + })); + } + } + + /// Handle a reply from nexus by extracting the value from the `JoinHandle` of + /// the last outstanding request. + async fn handle_nexus_reply(&mut self) { + let (op, res) = match self + .outstanding_request + .take() + .expect("missing JoinHandle") + .await + { + Ok(res) => res, + Err(e) => { + error!(self.log, "Nexus request task exited prematurely: {e}"); + return; + } + }; + match (op, res) { + (NexusOp::Get, Ok(info)) => { + info!( + self.log, + "Retrieved SledAgentInfo from Nexus: {:?}", info + ); + assert!(self.nexus_known_info.is_none()); + if info.decommissioned { + info!(self.log, "Sled Agent Decommissioned."); + } + self.nexus_known_info = Some(NexusKnownInfo::Found(info)); + self.total_get_requests_completed += 1; + } + (NexusOp::Put, Ok(info)) => { + // Unwrap Safety: we must have a known value in order to have + // submitted a PUT request in the first place. + info!( + self.log, + "Successfully put SledAgentInfo to nexus"; + "old_generation" => + %self.nexus_known_info.as_ref().unwrap().generation(), + "new_generation" => %info.generation, + ); + self.nexus_known_info = Some(NexusKnownInfo::Found(info)); + self.total_put_requests_completed += 1; + } + (NexusOp::Get, Err(e)) => { + self.total_get_requests_completed += 1; + if e.status() == Some(http::StatusCode::NOT_FOUND) { + if self.nexus_known_info.is_none() { + self.nexus_known_info = Some(NexusKnownInfo::NotFound); + return; + } + warn!( + self.log, + "Nexus doesn't have have the latest state of this\ + sled-agent, but sled-agent thinks it should. Setting \ + known state to `None` and trying again.", + ); + self.nexus_known_info = None; + return; + } + self.nexus_known_info = None; + warn!( + self.log, + "Received Error from Nexus for Get request: {:?}", e + ); + } + (NexusOp::Put, Err(e)) => { + self.total_put_requests_completed += 1; + self.nexus_known_info = None; + warn!( + self.log, + "Received Error from Nexus for Put request: {:?}", e + ); + } + } + } +} + +#[cfg(test)] +mod test { + use std::sync::atomic::{AtomicBool, Ordering}; + + use crate::fakes::nexus::FakeNexusServer; + use omicron_test_utils::dev::poll::{ + wait_for_condition as wait_for, CondCheckError, + }; + + use super::*; + use omicron_common::api::external::{ + ByteCount, Error, Generation, LookupType, MessagePair, ResourceType, + }; + use omicron_test_utils::dev::test_setup_log; + use sled_hardware::Baseboard; + + /// Pretend to be CRDB storing info about a sled-agent + #[derive(Default, Clone)] + struct FakeCrdb { + info: Arc>>, + } + + // Puts and Gets with our magical fake nexus + struct NexusServer { + fake_crdb: FakeCrdb, + // Injectable errors + get_error: Arc, + put_error: Arc, + } + impl FakeNexusServer for NexusServer { + fn sled_agent_get( + &self, + sled_id: Uuid, + ) -> Result { + // Always disable any errors after the first time. This simplifies + // testing due to lack of races. + let injected_err = self.get_error.swap(false, Ordering::SeqCst); + if injected_err { + return Err(Error::ServiceUnavailable { + internal_message: "go away".into(), + }); + } + + self.fake_crdb.info.lock().unwrap().clone().ok_or( + Error::ObjectNotFound { + type_name: ResourceType::Sled, + lookup_type: LookupType::ById(sled_id), + }, + ) + } + + fn sled_agent_put( + &self, + _sled_id: Uuid, + info: SledAgentInfo, + ) -> Result<(), Error> { + // Always disable any errors after the first time. This simplifies + // testing due to lack of races. + let injected_err = self.put_error.swap(false, Ordering::SeqCst); + if injected_err { + return Err(Error::Conflict { + message: MessagePair::new( + "I don't like the cut of your jib".into(), + ), + }); + } + + let mut crdb_info = self.fake_crdb.info.lock().unwrap(); + *crdb_info = Some(info); + Ok(()) + } + } + + #[tokio::test] + async fn nexus_self_notification_test() { + let logctx = test_setup_log("nexus_notification_test"); + let log = &logctx.log; + let sa_address = "::1".to_string(); + let fake_crdb = FakeCrdb::default(); + let sled_id = Uuid::new_v4(); + let get_error = Arc::new(AtomicBool::new(false)); + let put_error = Arc::new(AtomicBool::new(false)); + + let nexus_server = crate::fakes::nexus::start_test_server( + log.clone(), + Box::new(NexusServer { + fake_crdb: fake_crdb.clone(), + get_error: get_error.clone(), + put_error: put_error.clone(), + }), + ); + let nexus_client = NexusClient::new( + &format!("http://{}", nexus_server.local_addr()), + log.clone(), + ); + + // Pretend we are retrieving the latest `SledAgentInfo` from hardware and + // the VMM reservoir. + let latest_sled_agent_info = + Arc::new(std::sync::Mutex::new(SledAgentInfo { + sa_address: sa_address.clone(), + role: nexus_client::types::SledRole::Gimlet, + baseboard: Baseboard::new_pc("test".into(), "test".into()) + .convert(), + usable_hardware_threads: 16, + usable_physical_ram: ByteCount::from(1024 * 1024 * 1024u32) + .into(), + reservoir_size: ByteCount::from(0u32).into(), + generation: Generation::new(), + decommissioned: false, + })); + let latest_sled_agent_info2 = latest_sled_agent_info.clone(); + + // Return `SledAgentInfo` from our test object + let get_sled_agent_info: GetSledAgentInfo = + Box::new(move |generation| { + let mut info = latest_sled_agent_info2.lock().unwrap().clone(); + info.generation = generation; + info + }); + + let (nexus_notifier_task, handle) = NexusNotifierTask::new_for_test( + sled_id, + nexus_client, + get_sled_agent_info, + log, + ); + + tokio::spawn(async move { + nexus_notifier_task.run().await; + }); + + // Ensure that the task will try to initially talk to Nexus and get back + // some `SledAgentInfo` or a `NotFound`, depending upon timing. + let status = wait_for::<_, (), _, _>( + || async { + let status = handle.get_status().await.unwrap(); + if status.nexus_known_info.is_some() { + Ok(status) + } else { + Err(CondCheckError::NotYet) + } + }, + &Duration::from_millis(2), + &Duration::from_secs(15), + ) + .await + .expect("Failed to get status from Nexus"); + + if status.total_put_requests_completed == 0 { + assert_eq!(status.nexus_known_info, Some(NexusKnownInfo::NotFound)); + } + + // Wait for a steady state, when the the latest info has been put to nexus + let status = wait_for::<_, (), _, _>( + || async { + let status = handle.get_status().await.unwrap(); + if !status.has_pending_notification { + Ok(status) + } else { + Err(CondCheckError::NotYet) + } + }, + &Duration::from_millis(2), + &Duration::from_secs(15), + ) + .await + .expect("Failed to get status from Nexus"); + + assert_eq!(status.total_get_requests_started, 1u64); + assert_eq!(status.total_put_requests_started, 1u64); + assert_eq!(status.total_get_requests_completed, 1u64); + assert_eq!(status.total_put_requests_completed, 1u64); + assert_eq!(status.has_outstanding_request, false); + assert_eq!(status.cancelled_pending_notifications, 1); + let expected = latest_sled_agent_info.lock().unwrap().clone(); + assert_eq!( + status.nexus_known_info, + Some(NexusKnownInfo::Found(expected)), + ); + + // Trigger another update notification + // + // We haven't changed the underlying `SledAgentInfo` so this request + // should be cancelled without a request going to nexus. + handle.notify_nexus_about_self(log).await; + let status = wait_for::<_, (), _, _>( + || async { + let status = handle.get_status().await.unwrap(); + if status.cancelled_pending_notifications == 2 { + Ok(status) + } else { + Err(CondCheckError::NotYet) + } + }, + &Duration::from_millis(2), + &Duration::from_secs(15), + ) + .await + .expect("Failed to get status from Nexus"); + assert_eq!(status.total_get_requests_started, 1u64); + assert_eq!(status.total_put_requests_started, 1u64); + assert_eq!(status.total_get_requests_completed, 1u64); + assert_eq!(status.total_put_requests_completed, 1u64); + assert_eq!(status.has_outstanding_request, false); + let expected = latest_sled_agent_info.lock().unwrap().clone(); + assert_eq!( + status.nexus_known_info, + Some(NexusKnownInfo::Found(expected)), + ); + + // Update the VMM reservoir size and trigger a successful put to Nexus. + { + let mut info = latest_sled_agent_info.lock().unwrap(); + info.reservoir_size = (1024 * 1024u64).into(); + info.generation = info.generation.next(); + } + + // Wait for a steady state, when the the latest info has been put to nexus + handle.notify_nexus_about_self(log).await; + let status = wait_for::<_, (), _, _>( + || async { + let status = handle.get_status().await.unwrap(); + if !status.has_pending_notification { + Ok(status) + } else { + Err(CondCheckError::NotYet) + } + }, + &Duration::from_millis(2), + &Duration::from_secs(15), + ) + .await + .expect("Failed to get status from Nexus"); + + assert_eq!(status.total_get_requests_started, 1u64); + assert_eq!(status.total_get_requests_completed, 1u64); + assert_eq!(status.total_put_requests_started, 2u64); + assert_eq!(status.total_put_requests_completed, 2u64); + assert_eq!(status.has_outstanding_request, false); + assert_eq!(status.cancelled_pending_notifications, 3); + let expected = latest_sled_agent_info.lock().unwrap().clone(); + assert_eq!( + status.nexus_known_info, + Some(NexusKnownInfo::Found(expected)), + ); + + // Inject a put error and trigger a put to nexus after updating the VMM + // reservoir size. It should eventually succeed. + put_error.store(true, Ordering::SeqCst); + { + let mut info = latest_sled_agent_info.lock().unwrap(); + info.reservoir_size = (2 * 1024 * 1024u64).into(); + info.generation = info.generation.next(); + } + handle.notify_nexus_about_self(log).await; + // Wait for a steady state, when the the latest info has been put to nexus. + // Ensure the second get request has been sent. + let status = wait_for::<_, (), _, _>( + || async { + let status = handle.get_status().await.unwrap(); + if !status.has_pending_notification + && status.total_get_requests_started == 2 + { + Ok(status) + } else { + Err(CondCheckError::NotYet) + } + }, + &Duration::from_millis(2), + &Duration::from_secs(15), + ) + .await + .expect("Failed to get status from Nexus"); + + // One extra get and put request because of the put error + assert_eq!(status.total_get_requests_completed, 2u64); + + assert_eq!(status.total_put_requests_started, 4u64); + assert_eq!(status.total_put_requests_completed, 4u64); + assert_eq!(status.has_outstanding_request, false); + assert_eq!(status.cancelled_pending_notifications, 4); + let expected = latest_sled_agent_info.lock().unwrap().clone(); + assert_eq!( + status.nexus_known_info, + Some(NexusKnownInfo::Found(expected)), + ); + + // Inject a get error and trigger a put to nexus after updating the VMM + // reservoir size. We shouldn't end up calling Get at all since we know + // our state, and so we will not trigger the error here. However, later + // on when we trigger a put error, it will perform a get and that will + // trigger this injected get errror. + get_error.store(true, Ordering::SeqCst); + { + let mut info = latest_sled_agent_info.lock().unwrap(); + info.reservoir_size = (3 * 1024 * 1024u64).into(); + info.generation = info.generation.next(); + } + handle.notify_nexus_about_self(log).await; + // Wait for a steady state, when the the latest info has been put to nexus. + // Ensure the second get request has been sent. + let status = wait_for::<_, (), _, _>( + || async { + let status = handle.get_status().await.unwrap(); + if !status.has_pending_notification + && status.total_put_requests_started == 5 + { + Ok(status) + } else { + Err(CondCheckError::NotYet) + } + }, + &Duration::from_millis(2), + &Duration::from_secs(15), + ) + .await + .expect("Failed to get status from Nexus"); + + // Get wasn't called + assert_eq!(status.total_get_requests_started, 2u64); + assert_eq!(status.total_get_requests_completed, 2u64); + + assert_eq!(status.total_put_requests_started, 5u64); + assert_eq!(status.total_put_requests_completed, 5u64); + assert_eq!(status.has_outstanding_request, false); + assert_eq!(status.cancelled_pending_notifications, 5); + let expected = latest_sled_agent_info.lock().unwrap().clone(); + assert_eq!( + status.nexus_known_info, + Some(NexusKnownInfo::Found(expected)), + ); + + // Now inject a put error. This will trigger the put error and + // previously injected get error. + put_error.store(true, Ordering::SeqCst); + { + let mut info = latest_sled_agent_info.lock().unwrap(); + info.reservoir_size = (4 * 1024 * 1024u64).into(); + info.generation = info.generation.next(); + } + handle.notify_nexus_about_self(log).await; + // Wait for a steady state, when the the latest info has been put to nexus. + // Ensure the second get request has been sent. + let status = wait_for::<_, (), _, _>( + || async { + let status = handle.get_status().await.unwrap(); + if !status.has_pending_notification + && status.total_put_requests_started > 5 + { + Ok(status) + } else { + Err(CondCheckError::NotYet) + } + }, + &Duration::from_millis(2), + &Duration::from_secs(15), + ) + .await + .expect("Failed to get status from Nexus"); + + // Get was called twice, once for error, once for success + assert_eq!(status.total_get_requests_started, 4u64); + assert_eq!(status.total_get_requests_completed, 4u64); + + // Put was called twice, once for error, once for success + assert_eq!(status.total_put_requests_started, 7u64); + assert_eq!(status.total_put_requests_completed, 7u64); + + assert_eq!(status.has_outstanding_request, false); + assert_eq!(status.cancelled_pending_notifications, 6); + let expected = latest_sled_agent_info.lock().unwrap().clone(); + assert_eq!( + status.nexus_known_info, + Some(NexusKnownInfo::Found(expected)), + ); + + logctx.cleanup_successful(); + } +} diff --git a/sled-agent/src/sim/server.rs b/sled-agent/src/sim/server.rs index 784e3f4938..dd815775ff 100644 --- a/sled-agent/src/sim/server.rs +++ b/sled-agent/src/sim/server.rs @@ -18,6 +18,7 @@ use nexus_client::types::{IpRange, Ipv4Range, Ipv6Range}; use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use omicron_common::address::DNS_OPTE_IPV4_SUBNET; use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; +use omicron_common::api::external::Generation; use omicron_common::api::external::MacAddr; use omicron_common::backoff::{ retry_notify, retry_policy_internal_service_aggressive, BackoffError, @@ -101,7 +102,7 @@ impl Server { nexus_client .sled_agent_put( &config.id, - &NexusTypes::SledAgentStartupInfo { + &NexusTypes::SledAgentInfo { sa_address: sa_address.to_string(), role: NexusTypes::SledRole::Scrimlet, baseboard: NexusTypes::Baseboard { @@ -124,6 +125,8 @@ impl Server { config.hardware.reservoir_ram, ) .unwrap(), + generation: Generation::new(), + decommissioned: false, }, ) .await diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index 5381337b8c..8f737f879b 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -11,10 +11,13 @@ use crate::bootstrap::early_networking::{ }; use crate::bootstrap::params::{BaseboardId, StartSledAgentRequest}; use crate::config::Config; -use crate::instance_manager::{InstanceManager, ReservoirMode}; +use crate::instance_manager::InstanceManager; use crate::long_running_tasks::LongRunningTaskHandles; use crate::metrics::MetricsManager; -use crate::nexus::{ConvertInto, NexusClientWithResolver, NexusRequestQueue}; +use crate::nexus::{ + NexusClientWithResolver, NexusNotifierHandle, NexusNotifierInput, + NexusNotifierTask, +}; use crate::params::{ DiskStateRequested, InstanceExternalIpBody, InstanceHardware, InstanceMetadata, InstanceMigrationSourceParams, InstancePutStateResponse, @@ -25,6 +28,7 @@ use crate::params::{ use crate::services::{self, ServiceManager}; use crate::storage_monitor::UnderlayAccess; use crate::updates::{ConfigUpdates, UpdateManager}; +use crate::vmm_reservoir::{ReservoirMode, VmmReservoirManager}; use crate::zone_bundle; use crate::zone_bundle::BundleError; use bootstore::schemes::v0 as bootstore; @@ -57,7 +61,7 @@ use omicron_common::api::{ internal::nexus::UpdateArtifactId, }; use omicron_common::backoff::{ - retry_notify, retry_notify_ext, retry_policy_internal_service, + retry_notify, retry_policy_internal_service, retry_policy_internal_service_aggressive, BackoffError, }; use oximeter::types::ProducerRegistry; @@ -288,8 +292,8 @@ struct SledAgentInner { // Connection to Nexus. nexus_client: NexusClientWithResolver, - // A serialized request queue for operations interacting with Nexus. - nexus_request_queue: NexusRequestQueue, + // A mechanism for notifiying nexus about sled-agent updates + nexus_notifier: NexusNotifierHandle, // The rack network config provided at RSS time. rack_network_config: Option, @@ -466,6 +470,19 @@ impl SledAgent { .map_err(|_| ()) .expect("Failed to send to StorageMonitor"); + // Configure the VMM reservoir as either a percentage of DRAM or as an + // exact size in MiB. + let reservoir_mode = ReservoirMode::from_config( + config.vmm_reservoir_percentage, + config.vmm_reservoir_size_mb, + ); + + let vmm_reservoir_manager = VmmReservoirManager::spawn( + &log, + long_running_task_handles.hardware_manager.clone(), + reservoir_mode, + ); + let instances = InstanceManager::new( parent_log.clone(), nexus_client.clone(), @@ -474,41 +491,9 @@ impl SledAgent { storage_manager.clone(), long_running_task_handles.zone_bundler.clone(), ZoneBuilderFactory::default(), + vmm_reservoir_manager.clone(), )?; - // Configure the VMM reservoir as either a percentage of DRAM or as an - // exact size in MiB. - let reservoir_mode = match ( - config.vmm_reservoir_percentage, - config.vmm_reservoir_size_mb, - ) { - (None, None) => ReservoirMode::None, - (Some(p), None) => ReservoirMode::Percentage(p), - (None, Some(mb)) => ReservoirMode::Size(mb), - (Some(_), Some(_)) => panic!( - "only one of vmm_reservoir_percentage and \ - vmm_reservoir_size_mb is allowed" - ), - }; - - match reservoir_mode { - ReservoirMode::None => warn!(log, "Not using VMM reservoir"), - ReservoirMode::Size(0) | ReservoirMode::Percentage(0) => { - warn!(log, "Not using VMM reservoir (size 0 bytes requested)") - } - _ => { - instances - .set_reservoir_size( - &long_running_task_handles.hardware_manager, - reservoir_mode, - ) - .map_err(|e| { - error!(log, "Failed to setup VMM reservoir: {e}"); - e - })?; - } - } - let update_config = ConfigUpdates { zone_artifact_path: Utf8PathBuf::from("/opt/oxide"), }; @@ -570,6 +555,22 @@ impl SledAgent { rack_network_config.clone(), )?; + // Spawn a background task for managing notifications to nexus + // about this sled-agent. + let nexus_notifier_input = NexusNotifierInput { + sled_id: request.body.id, + sled_address: get_sled_address(request.body.subnet), + nexus_client: nexus_client.client().clone(), + hardware: long_running_task_handles.hardware_manager.clone(), + vmm_reservoir_manager: vmm_reservoir_manager.clone(), + }; + let (nexus_notifier_task, nexus_notifier_handle) = + NexusNotifierTask::new(nexus_notifier_input, &log); + + tokio::spawn(async move { + nexus_notifier_task.run().await; + }); + let sled_agent = SledAgent { inner: Arc::new(SledAgentInner { id: request.body.id, @@ -582,14 +583,7 @@ impl SledAgent { port_manager, services, nexus_client, - - // TODO(https://github.com/oxidecomputer/omicron/issues/1917): - // Propagate usage of this request queue throughout the Sled - // Agent. - // - // Also, we could maybe de-dup some of the backoff code in the - // request queue? - nexus_request_queue: NexusRequestQueue::new(), + nexus_notifier: nexus_notifier_handle, rack_network_config, zone_bundler: long_running_task_handles.zone_bundler.clone(), bootstore: long_running_task_handles.bootstore.clone(), @@ -603,7 +597,7 @@ impl SledAgent { // existence. If inspection of the hardware later informs us that we're // actually running on a scrimlet, that's fine, the updated value will // be received by Nexus eventually. - sled_agent.notify_nexus_about_self(&log); + sled_agent.notify_nexus_about_self(&log).await; Ok(sled_agent) } @@ -681,98 +675,10 @@ impl SledAgent { Ok(()) } - /// Sends a request to Nexus informing it that the current sled exists, - /// with information abou the existing set of hardware. - /// - /// Does not block until Nexus is available -- the future created by this - /// function is retried in a queue that is polled in the background. - pub(crate) fn notify_nexus_about_self(&self, log: &Logger) { - let sled_id = self.inner.id; - let nexus_client = self.inner.nexus_client.clone(); - let sled_address = self.inner.sled_address(); - let is_scrimlet = self.inner.hardware.is_scrimlet(); - let baseboard = self.inner.hardware.baseboard().convert(); - let usable_hardware_threads = - self.inner.hardware.online_processor_count(); - let usable_physical_ram = - self.inner.hardware.usable_physical_ram_bytes(); - let reservoir_size = self.inner.instances.reservoir_size(); - - let log = log.clone(); - let fut = async move { - // Notify the control plane that we're up, and continue trying this - // until it succeeds. We retry with a randomized, capped - // exponential backoff. - // - // TODO-robustness if this returns a 400 error, we probably want to - // return a permanent error from the `notify_nexus` closure. - let notify_nexus = || async { - info!( - log, - "contacting server nexus, registering sled"; - "id" => ?sled_id, - "baseboard" => ?baseboard, - ); - let role = if is_scrimlet { - nexus_client::types::SledRole::Scrimlet - } else { - nexus_client::types::SledRole::Gimlet - }; - - nexus_client - .client() - .sled_agent_put( - &sled_id, - &nexus_client::types::SledAgentStartupInfo { - sa_address: sled_address.to_string(), - role, - baseboard: baseboard.clone(), - usable_hardware_threads, - usable_physical_ram: nexus_client::types::ByteCount( - usable_physical_ram, - ), - reservoir_size: nexus_client::types::ByteCount( - reservoir_size.to_bytes(), - ), - }, - ) - .await - .map_err(|err| BackoffError::transient(err.to_string())) - }; - // This notification is often invoked before Nexus has started - // running, so avoid flagging any errors as concerning until some - // time has passed. - let log_notification_failure = |err, call_count, total_duration| { - if call_count == 0 { - info!( - log, - "failed to notify nexus about sled agent"; - "error" => %err, - ); - } else if total_duration > std::time::Duration::from_secs(30) { - warn!( - log, - "failed to notify nexus about sled agent"; - "error" => %err, - "total duration" => ?total_duration, - ); - } - }; - retry_notify_ext( - retry_policy_internal_service_aggressive(), - notify_nexus, - log_notification_failure, - ) - .await - .expect("Expected an infinite retry loop contacting Nexus"); - }; - self.inner - .nexus_request_queue - .sender() - .send(Box::pin(fut)) - .unwrap_or_else(|err| { - panic!("Failed to send future to request queue: {err}"); - }); + /// Trigger a request to Nexus informing it that the current sled exists, + /// with information about the existing set of hardware. + pub(crate) async fn notify_nexus_about_self(&self, log: &Logger) { + self.inner.nexus_notifier.notify_nexus_about_self(log).await; } /// List all zone bundles on the system, for any zones live or dead. diff --git a/sled-agent/src/vmm_reservoir.rs b/sled-agent/src/vmm_reservoir.rs new file mode 100644 index 0000000000..d7b6b64ecf --- /dev/null +++ b/sled-agent/src/vmm_reservoir.rs @@ -0,0 +1,272 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! A background thread for managing the VMM reservoir + +use illumos_utils::vmm_reservoir; +use omicron_common::api::external::ByteCount; +use slog::Logger; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; +use std::thread; +use tokio::sync::{broadcast, oneshot}; + +use sled_hardware::HardwareManager; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Failed to create reservoir: {0}")] + Reservoir(#[from] vmm_reservoir::Error), + + #[error("Invalid reservoir configuration: {0}")] + ReservoirConfig(String), + + #[error("VmmReservoirManager is currently busy")] + Busy, + + #[error("VmmReservoirManager has shutdown")] + Shutdown, + + #[error( + "Communication error with VmmReservoirManager: ReplySenderDropped" + )] + ReplySenderDropped, +} + +#[derive(Debug, Clone, Copy)] +pub enum ReservoirMode { + Size(u32), + Percentage(u8), +} + +impl ReservoirMode { + /// Return a configuration of the VMM reservoir as either a percentage of + /// DRAM or as an exact size in MiB. + /// + /// Panic upon invalid configuration + pub fn from_config( + percentage: Option, + size_mb: Option, + ) -> Option { + match (percentage, size_mb) { + (None, None) => None, + (Some(p), None) => Some(ReservoirMode::Percentage(p)), + (None, Some(mb)) => Some(ReservoirMode::Size(mb)), + (Some(_), Some(_)) => panic!( + "only one of vmm_reservoir_percentage and \ + vmm_reservoir_size_mb is allowed" + ), + } + } +} + +/// A message sent from [`VmmReservoirManagerHandle`] to [`VmmReservoirManager`] +enum ReservoirManagerMsg { + SetReservoirSize { + mode: ReservoirMode, + reply_tx: oneshot::Sender>, + }, +} + +#[derive(Clone)] +/// A mechanism to interact with the [`VmmReservoirManager`] +pub struct VmmReservoirManagerHandle { + reservoir_size: Arc, + tx: flume::Sender, + // A notification channel indicating that the size of the VMM reservoir has + // changed. We use a broadcast channel instead of a `Notify` to prevent lost + // updates with multiple receivers. Importantly, a `RecvError::Lagged` is + // just as valuable as an `Ok(())`, and so this acts as a pure notification + // channel. + size_updated_tx: broadcast::Sender<()>, + _manager_handle: Arc>, +} + +impl VmmReservoirManagerHandle { + /// Returns the last-set size of the reservoir + pub fn reservoir_size(&self) -> ByteCount { + self.reservoir_size.load(Ordering::SeqCst).try_into().unwrap() + } + + // Return the receiver that notifies us about a size update. The value + // itself is held in an atomic. While we could have replaced both of these + // with a `watch`, that is not the semantics all callers want. Sometimes + // they just want to "read" the latest size of the reservoir. + pub fn subscribe_for_size_updates(&self) -> broadcast::Receiver<()> { + self.size_updated_tx.subscribe() + } + + /// Tell the [`VmmReservoirManager`] to set the reservoir size and wait for + /// a response. + /// + /// Returns an error if the allocation fails or if the manager is currently + /// busy handling another request. + // + // It's anticipated this will be used to change the reservation + #[allow(unused)] + pub async fn set_reservoir_size( + &self, + mode: ReservoirMode, + ) -> Result<(), Error> { + let (tx, rx) = oneshot::channel(); + let msg = ReservoirManagerMsg::SetReservoirSize { mode, reply_tx: tx }; + if let Err(e) = self.tx.try_send(msg) { + return Err(match e { + flume::TrySendError::Full(e) => Error::Busy, + flume::TrySendError::Disconnected(e) => Error::Shutdown, + }); + } + rx.await.map_err(|_| Error::ReplySenderDropped)? + } +} + +/// Manage the VMM reservoir in a background thread +pub struct VmmReservoirManager { + reservoir_size: Arc, + rx: flume::Receiver, + size_updated_tx: broadcast::Sender<()>, + // We maintain a copy of the receiver so sends never fail. + _size_updated_rx: broadcast::Receiver<()>, + log: Logger, +} + +impl VmmReservoirManager { + pub fn spawn( + log: &Logger, + hardware_manager: HardwareManager, + reservoir_mode: Option, + ) -> VmmReservoirManagerHandle { + let log = log.new(o!("component" => "VmmReservoirManager")); + let (size_updated_tx, _size_updated_rx) = broadcast::channel(1); + // We use a rendevous channel to only allow one request at a time. + // Resizing a reservoir may block the thread for up to two minutes, so + // we want to ensure it is complete before allowing another call. + let (tx, rx) = flume::bounded(0); + let reservoir_size = Arc::new(AtomicU64::new(0)); + let manager = VmmReservoirManager { + reservoir_size: reservoir_size.clone(), + size_updated_tx: size_updated_tx.clone(), + _size_updated_rx, + rx, + log, + }; + let _manager_handle = Arc::new(thread::spawn(move || { + manager.run(hardware_manager, reservoir_mode) + })); + VmmReservoirManagerHandle { + reservoir_size, + tx, + size_updated_tx, + _manager_handle, + } + } + + fn run( + self, + hardware_manager: HardwareManager, + reservoir_mode: Option, + ) { + match reservoir_mode { + None => warn!(self.log, "Not using VMM reservoir"), + Some(ReservoirMode::Size(0)) + | Some(ReservoirMode::Percentage(0)) => { + warn!( + self.log, + "Not using VMM reservoir (size 0 bytes requested)" + ) + } + Some(mode) => { + if let Err(e) = self.set_reservoir_size(&hardware_manager, mode) + { + error!(self.log, "Failed to setup VMM reservoir: {e}"); + } + } + } + + while let Ok(msg) = self.rx.recv() { + let ReservoirManagerMsg::SetReservoirSize { mode, reply_tx } = msg; + match self.set_reservoir_size(&hardware_manager, mode) { + Ok(()) => { + let _ = reply_tx.send(Ok(())); + } + Err(e) => { + error!(self.log, "Failed to setup VMM reservoir: {e}"); + let _ = reply_tx.send(Err(e)); + } + } + } + } + + /// Sets the VMM reservoir to the requested percentage of usable physical + /// RAM or to a size in MiB. Either mode will round down to the nearest + /// aligned size required by the control plane. + fn set_reservoir_size( + &self, + hardware: &sled_hardware::HardwareManager, + mode: ReservoirMode, + ) -> Result<(), Error> { + let hardware_physical_ram_bytes = hardware.usable_physical_ram_bytes(); + let req_bytes = match mode { + ReservoirMode::Size(mb) => { + let bytes = ByteCount::from_mebibytes_u32(mb).to_bytes(); + if bytes > hardware_physical_ram_bytes { + return Err(Error::ReservoirConfig(format!( + "cannot specify a reservoir of {bytes} bytes when \ + physical memory is {hardware_physical_ram_bytes} bytes", + ))); + } + bytes + } + ReservoirMode::Percentage(percent) => { + if !matches!(percent, 1..=99) { + return Err(Error::ReservoirConfig(format!( + "VMM reservoir percentage of {} must be between 0 and \ + 100", + percent + ))); + }; + (hardware_physical_ram_bytes as f64 * (percent as f64 / 100.0)) + .floor() as u64 + } + }; + + let req_bytes_aligned = vmm_reservoir::align_reservoir_size(req_bytes); + + if req_bytes_aligned == 0 { + warn!( + self.log, + "Requested reservoir size of {} bytes < minimum aligned size \ + of {} bytes", + req_bytes, + vmm_reservoir::RESERVOIR_SZ_ALIGN + ); + return Ok(()); + } + + // The max ByteCount value is i64::MAX, which is ~8 million TiB. + // As this value is either a percentage of DRAM or a size in MiB + // represented as a u32, constructing this should always work. + let reservoir_size = ByteCount::try_from(req_bytes_aligned).unwrap(); + if let ReservoirMode::Percentage(percent) = mode { + info!( + self.log, + "{}% of {} physical ram = {} bytes)", + percent, + hardware_physical_ram_bytes, + req_bytes, + ); + } + info!(self.log, "Setting reservoir size to {reservoir_size} bytes"); + vmm_reservoir::ReservoirControl::set(reservoir_size)?; + + self.reservoir_size.store(reservoir_size.to_bytes(), Ordering::SeqCst); + info!( + self.log, + "Finished setting reservoir size to {reservoir_size} bytes" + ); + self.size_updated_tx.send(()).unwrap(); + + Ok(()) + } +} From 8697f39aac3323ffda3f33f4b5aa44fd7ad857e3 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Fri, 8 Mar 2024 14:38:35 -0500 Subject: [PATCH 093/157] Teach `omdb db network list-eips` and `DataStore::vpc_resolve_to_sleds` about blueprints (#5202) While working on #4886, I ran into a couple places that expect all services to have rows in the `service` table, but Reconfigurator does not add new rows to `service` when it spins up new services (see #4947 for rationale). This PR teaches these two users of `service` to _also_ check the current target blueprint when checking for services. I also removed `LookupPath::service_id` and `lookup_resource! { ... Service ... }`; the only user of these was `omdb`, and I think their existence is misleading in the post-`service`-table world we're moving toward. --- dev-tools/omdb/src/bin/omdb/db.rs | 99 +++- nexus/db-model/src/inventory.rs | 21 +- nexus/db-model/src/schema.rs | 2 + .../db-queries/src/db/datastore/deployment.rs | 6 +- nexus/db-queries/src/db/datastore/vpc.rs | 450 +++++++++++++++++- nexus/db-queries/src/db/lookup.rs | 14 - 6 files changed, 550 insertions(+), 42 deletions(-) diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 02b68144df..339c257d8e 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -80,6 +80,7 @@ use nexus_db_queries::db::DataStore; use nexus_reconfigurator_preparation::policy_from_db; use nexus_test_utils::db::ALLOW_FULL_TABLE_SCAN_SQL; use nexus_types::deployment::Blueprint; +use nexus_types::deployment::OmicronZoneType; use nexus_types::deployment::UnstableReconfiguratorState; use nexus_types::identity::Resource; use nexus_types::internal_api::params::DnsRecord; @@ -628,7 +629,7 @@ fn first_page<'a, T>(limit: NonZeroU32) -> DataPageParams<'a, T> { } } -/// Helper function to looks up an instance with the given ID. +/// Helper function to look up an instance with the given ID. async fn lookup_instance( datastore: &DataStore, instance_id: Uuid, @@ -646,7 +647,71 @@ async fn lookup_instance( .with_context(|| format!("loading instance {instance_id}")) } -/// Helper function to looks up a project with the given ID. +/// Helper function to look up the kind of the service with the given ID. +/// +/// Requires the caller to first have fetched the current target blueprint, so +/// we can find services that have been added by Reconfigurator. +async fn lookup_service_kind( + datastore: &DataStore, + service_id: Uuid, + current_target_blueprint: Option<&Blueprint>, +) -> anyhow::Result> { + let conn = datastore.pool_connection_for_tests().await?; + + // We need to check the `service` table (populated during rack setup)... + { + use db::schema::service::dsl; + if let Some(kind) = dsl::service + .filter(dsl::id.eq(service_id)) + .limit(1) + .select(dsl::kind) + .get_result_async(&*conn) + .await + .optional() + .with_context(|| format!("loading service {service_id}"))? + { + return Ok(Some(kind)); + } + } + + // ...and if we don't find the service, check the latest blueprint, because + // the service might have been added by Reconfigurator after RSS ran. + let Some(blueprint) = current_target_blueprint else { + return Ok(None); + }; + + let Some(zone_config) = + blueprint.all_omicron_zones().find_map(|(_sled_id, zone_config)| { + if zone_config.id == service_id { + Some(zone_config) + } else { + None + } + }) + else { + return Ok(None); + }; + + let service_kind = match &zone_config.zone_type { + OmicronZoneType::BoundaryNtp { .. } + | OmicronZoneType::InternalNtp { .. } => ServiceKind::Ntp, + OmicronZoneType::Clickhouse { .. } => ServiceKind::Clickhouse, + OmicronZoneType::ClickhouseKeeper { .. } => { + ServiceKind::ClickhouseKeeper + } + OmicronZoneType::CockroachDb { .. } => ServiceKind::Cockroach, + OmicronZoneType::Crucible { .. } => ServiceKind::Crucible, + OmicronZoneType::CruciblePantry { .. } => ServiceKind::CruciblePantry, + OmicronZoneType::ExternalDns { .. } => ServiceKind::ExternalDns, + OmicronZoneType::InternalDns { .. } => ServiceKind::InternalDns, + OmicronZoneType::Nexus { .. } => ServiceKind::Nexus, + OmicronZoneType::Oximeter { .. } => ServiceKind::Oximeter, + }; + + Ok(Some(service_kind)) +} + +/// Helper function to look up a project with the given ID. async fn lookup_project( datastore: &DataStore, project_id: Uuid, @@ -1886,26 +1951,26 @@ async fn cmd_db_eips( let mut rows = Vec::new(); + let current_target_blueprint = datastore + .blueprint_target_get_current_full(opctx) + .await + .context("loading current target blueprint")? + .map(|(_, blueprint)| blueprint); + for ip in &ips { let owner = if let Some(owner_id) = ip.parent_id { if ip.is_service { - let service = match LookupPath::new(opctx, datastore) - .service_id(owner_id) - .fetch() - .await + let kind = match lookup_service_kind( + datastore, + owner_id, + current_target_blueprint.as_ref(), + ) + .await? { - Ok(instance) => instance, - Err(e) => { - eprintln!( - "error looking up service with id {owner_id}: {e}" - ); - continue; - } + Some(kind) => format!("{kind:?}"), + None => "UNKNOWN (service ID not found)".to_string(), }; - Owner::Service { - id: owner_id, - kind: format!("{:?}", service.1.kind), - } + Owner::Service { id: owner_id, kind } } else { let instance = match lookup_instance(datastore, owner_id).await? { diff --git a/nexus/db-model/src/inventory.rs b/nexus/db-model/src/inventory.rs index d8314f97b8..0992eb60b5 100644 --- a/nexus/db-model/src/inventory.rs +++ b/nexus/db-model/src/inventory.rs @@ -12,8 +12,8 @@ use crate::schema::{ inv_sled_omicron_zones, sw_caboose, sw_root_of_trust_page, }; use crate::{ - impl_enum_type, ipv6, ByteCount, Generation, MacAddr, Name, SqlU16, SqlU32, - SqlU8, + impl_enum_type, ipv6, ByteCount, Generation, MacAddr, Name, ServiceKind, + SqlU16, SqlU32, SqlU8, }; use anyhow::anyhow; use chrono::DateTime; @@ -715,6 +715,23 @@ impl_enum_type!( Oximeter => b"oximeter" ); +impl From for ServiceKind { + fn from(zone_type: ZoneType) -> Self { + match zone_type { + ZoneType::BoundaryNtp | ZoneType::InternalNtp => Self::Ntp, + ZoneType::Clickhouse => Self::Clickhouse, + ZoneType::ClickhouseKeeper => Self::ClickhouseKeeper, + ZoneType::CockroachDb => Self::Cockroach, + ZoneType::Crucible => Self::Crucible, + ZoneType::CruciblePantry => Self::CruciblePantry, + ZoneType::ExternalDns => Self::ExternalDns, + ZoneType::InternalDns => Self::InternalDns, + ZoneType::Nexus => Self::Nexus, + ZoneType::Oximeter => Self::Oximeter, + } + } +} + /// See [`nexus_types::inventory::OmicronZoneConfig`]. #[derive(Queryable, Clone, Debug, Selectable, Insertable)] #[diesel(table_name = inv_omicron_zone)] diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index d70b0a267e..09bc963936 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1550,6 +1550,8 @@ allow_tables_to_appear_in_same_query!( allow_tables_to_appear_in_same_query!(hw_baseboard_id, inv_sled_agent,); allow_tables_to_appear_in_same_query!( + bp_omicron_zone, + bp_target, dataset, disk, image, diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index 1dae1030f0..5fc17c88d0 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -1201,7 +1201,7 @@ mod tests { #[tokio::test] async fn test_empty_blueprint() { // Setup - let logctx = dev::test_setup_log("inventory_insert"); + let logctx = dev::test_setup_log("test_empty_blueprint"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; @@ -1264,7 +1264,7 @@ mod tests { #[tokio::test] async fn test_representative_blueprint() { // Setup - let logctx = dev::test_setup_log("inventory_insert"); + let logctx = dev::test_setup_log("test_representative_blueprint"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; @@ -1448,7 +1448,7 @@ mod tests { #[tokio::test] async fn test_set_target() { // Setup - let logctx = dev::test_setup_log("inventory_insert"); + let logctx = dev::test_setup_log("test_set_target"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; diff --git a/nexus/db-queries/src/db/datastore/vpc.rs b/nexus/db-queries/src/db/datastore/vpc.rs index 4626301f76..cd972c0941 100644 --- a/nexus/db-queries/src/db/datastore/vpc.rs +++ b/nexus/db-queries/src/db/datastore/vpc.rs @@ -640,8 +640,8 @@ impl DataStore { }) } - /// Return the list of `Sled`s hosting instances with network interfaces - /// on the provided VPC. + /// Return the list of `Sled`s hosting instances or control plane services + /// with network interfaces on the provided VPC. pub async fn vpc_resolve_to_sleds( &self, vpc_id: Uuid, @@ -650,9 +650,15 @@ impl DataStore { // Resolve each VNIC in the VPC to the Sled it's on, so we know which // Sleds to notify when firewall rules change. use db::schema::{ - instance, instance_network_interface, service, - service_network_interface, sled, vmm, + bp_omicron_zone, bp_target, instance, instance_network_interface, + service, service_network_interface, sled, vmm, }; + // Diesel requires us to use aliases in order to refer to the + // `bp_target` table twice in the same query. + let (bp_target1, bp_target2) = diesel::alias!( + db::schema::bp_target as bp_target1, + db::schema::bp_target as bp_target2 + ); let instance_query = instance_network_interface::table .inner_join( @@ -671,7 +677,12 @@ impl DataStore { .filter(vmm::time_deleted.is_null()) .select(Sled::as_select()); - let service_query = service_network_interface::table + // When Nexus accepts the rack initialization handoff from RSS, it + // populates the `service` table. We eventually want to retire it + // (https://github.com/oxidecomputer/omicron/issues/4947), and the + // Reconfigurator does not add new entries to it. We still need to query + // it for systems that are not yet under Reconfigurator control... + let rss_service_query = service_network_interface::table .inner_join( service::table .on(service::id.eq(service_network_interface::service_id)), @@ -681,6 +692,37 @@ impl DataStore { .filter(service_network_interface::time_deleted.is_null()) .select(Sled::as_select()); + // ... and we also need to query for the current target blueprint to + // support systems that _are_ under Reconfigurator control. + let reconfig_service_query = service_network_interface::table + .inner_join(bp_omicron_zone::table.on( + bp_omicron_zone::id.eq(service_network_interface::service_id), + )) + .inner_join( + bp_target1.on(bp_omicron_zone::blueprint_id + .eq(bp_target1.field(bp_target::blueprint_id))), + ) + .inner_join(sled::table.on(sled::id.eq(bp_omicron_zone::sled_id))) + .filter( + // This filters us down to the one current target blueprint (if + // it exists); i.e., the target with the maximal version. We + // could also check that the current target is `enabled`, but + // that could very easily be incorrect: if the current target + // or any of its blueprint ancestors were _ever_ enabled, it's + // possible the current target blueprint describes running + // services that were added after RSS and therefore wouldn't be + // seen in `rss_service_query`. + bp_target1.field(bp_target::version).eq_any( + bp_target2 + .select(bp_target2.field(bp_target::version)) + .order_by(bp_target2.field(bp_target::version).desc()) + .limit(1), + ), + ) + .filter(service_network_interface::vpc_id.eq(vpc_id)) + .filter(service_network_interface::time_deleted.is_null()) + .select(Sled::as_select()); + let mut sleds = sled::table .select(Sled::as_select()) .filter(sled::time_deleted.is_null()) @@ -691,7 +733,11 @@ impl DataStore { let conn = self.pool_connection_unauthorized().await?; sleds - .intersect(instance_query.union(service_query)) + .intersect( + instance_query + .union(rss_service_query) + .union(reconfig_service_query), + ) .get_results_async(&*conn) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) @@ -1171,14 +1217,35 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; + use crate::db::datastore::test::sled_baseboard_for_test; + use crate::db::datastore::test::sled_system_hardware_for_test; use crate::db::datastore::test_utils::datastore_test; + use crate::db::fixed_data::vpc_subnet::NEXUS_VPC_SUBNET; use crate::db::model::Project; use crate::db::queries::vpc::MAX_VNI_SEARCH_RANGE_SIZE; + use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; + use nexus_db_model::SledUpdate; use nexus_test_utils::db::test_setup_database; + use nexus_types::deployment::Blueprint; + use nexus_types::deployment::BlueprintTarget; + use nexus_types::deployment::NetworkInterface; + use nexus_types::deployment::NetworkInterfaceKind; + use nexus_types::deployment::OmicronZoneConfig; + use nexus_types::deployment::OmicronZoneType; + use nexus_types::deployment::OmicronZonesConfig; use nexus_types::external_api::params; + use nexus_types::identity::Asset; + use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; use omicron_common::api::external; + use omicron_common::api::external::Generation; + use omicron_common::api::external::IpNet; + use omicron_common::api::external::MacAddr; + use omicron_common::api::external::Vni; use omicron_test_utils::dev; use slog::info; + use std::collections::BTreeMap; + use std::collections::BTreeSet; + use std::net::IpAddr; // Test that we detect the right error condition and return None when we // fail to insert a VPC due to VNI exhaustion. @@ -1397,4 +1464,375 @@ mod tests { db.cleanup().await.unwrap(); logctx.cleanup_successful(); } + + #[derive(Debug)] + struct Harness { + rack_id: Uuid, + sled_ids: Vec, + nexuses: Vec, + } + + #[derive(Debug)] + struct HarnessNexus { + id: Uuid, + ip: IpAddr, + mac: MacAddr, + nic_id: Uuid, + } + + impl Harness { + fn new(num_sleds: usize) -> Self { + let mut sled_ids = + (0..num_sleds).map(|_| Uuid::new_v4()).collect::>(); + sled_ids.sort(); + + let mut nexus_ips = NEXUS_OPTE_IPV4_SUBNET + .iter() + .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES) + .map(IpAddr::from); + let mut nexus_macs = MacAddr::iter_system(); + let nexuses = (0..num_sleds) + .map(|_| HarnessNexus { + id: Uuid::new_v4(), + ip: nexus_ips.next().unwrap(), + mac: nexus_macs.next().unwrap(), + nic_id: Uuid::new_v4(), + }) + .collect::>(); + Self { rack_id: Uuid::new_v4(), sled_ids, nexuses } + } + + fn db_sleds(&self) -> impl Iterator + '_ { + self.sled_ids.iter().copied().map(|sled_id| { + SledUpdate::new( + sled_id, + "[::1]:0".parse().unwrap(), + sled_baseboard_for_test(), + sled_system_hardware_for_test(), + self.rack_id, + Generation::new().into(), + ) + }) + } + + fn db_services( + &self, + ) -> impl Iterator< + Item = (db::model::Service, db::model::IncompleteNetworkInterface), + > + '_ { + self.sled_ids.iter().zip(&self.nexuses).map(|(sled_id, nexus)| { + let service = db::model::Service::new( + nexus.id, + *sled_id, + Some(nexus.id), + "[::1]:0".parse().unwrap(), + db::model::ServiceKind::Nexus, + ); + let name = format!("test-nexus-{}", nexus.id); + let nic = db::model::IncompleteNetworkInterface::new_service( + nexus.nic_id, + nexus.id, + NEXUS_VPC_SUBNET.clone(), + IdentityMetadataCreateParams { + name: name.parse().unwrap(), + description: name, + }, + nexus.ip, + nexus.mac, + 0, + ) + .expect("failed to create incomplete Nexus NIC"); + (service, nic) + }) + } + + fn omicron_zone_configs( + &self, + ) -> impl Iterator + '_ { + self.db_services().map(|(service, nic)| { + let zone_config = OmicronZoneConfig { + id: service.id(), + underlay_address: "::1".parse().unwrap(), + zone_type: OmicronZoneType::Nexus { + internal_address: "[::1]:0".to_string(), + external_ip: "::1".parse().unwrap(), + nic: NetworkInterface { + id: nic.identity.id, + kind: NetworkInterfaceKind::Service(service.id()), + name: format!("test-nic-{}", nic.identity.id) + .parse() + .unwrap(), + ip: nic.ip.unwrap(), + mac: nic.mac.unwrap(), + subnet: IpNet::from(*NEXUS_OPTE_IPV4_SUBNET).into(), + vni: Vni::SERVICES_VNI, + primary: true, + slot: nic.slot.unwrap(), + }, + external_tls: false, + external_dns_servers: Vec::new(), + }, + }; + (service.sled_id, zone_config) + }) + } + } + + #[tokio::test] + async fn test_vpc_resolve_to_sleds_uses_current_target_blueprint() { + // Test setup. + usdt::register_probes().unwrap(); + let logctx = dev::test_setup_log( + "test_vpc_resolve_to_sleds_uses_current_target_blueprint", + ); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // Helper function to fetch and sort the IDs of sleds we've resolved the + // SERVICES_VPC_ID to. + let fetch_service_sled_ids = || async { + let mut service_sled_ids = datastore + .vpc_resolve_to_sleds(*SERVICES_VPC_ID, &[]) + .await + .expect("failed to resolve to sleds") + .into_iter() + .map(|sled| sled.id()) + .collect::>(); + service_sled_ids.sort(); + service_sled_ids + }; + + // Create four sleds. + let harness = Harness::new(4); + for sled in harness.db_sleds() { + datastore.sled_upsert(sled).await.expect("failed to upsert sled"); + } + + // Insert two Nexus records into `service`, emulating RSS. + for (service, nic) in harness.db_services().take(2) { + datastore + .service_upsert(&opctx, service) + .await + .expect("failed to insert RSS-like service"); + datastore + .service_create_network_interface_raw(&opctx, nic) + .await + .expect("failed to insert Nexus NIC"); + } + + // Ensure we find the two sleds we expect after adding Nexus records. + assert_eq!(&harness.sled_ids[..2], fetch_service_sled_ids().await); + + // Create a blueprint that has a Nexus on our third sled. (This + // blueprint is completely invalid in many ways, but all we care about + // here is inserting relevant records in `bp_omicron_zone`.) + let bp1_omicron_zones = { + let (sled_id, zone_config) = harness + .omicron_zone_configs() + .nth(2) + .expect("fewer than 3 services in test harness"); + let mut zones = BTreeMap::new(); + zones.insert( + sled_id, + OmicronZonesConfig { + generation: Generation::new(), + zones: vec![zone_config], + }, + ); + zones + }; + let bp1_id = Uuid::new_v4(); + let bp1 = Blueprint { + id: bp1_id, + omicron_zones: bp1_omicron_zones, + zones_in_service: BTreeSet::new(), + parent_blueprint_id: None, + internal_dns_version: Generation::new(), + time_created: Utc::now(), + creator: "test".to_string(), + comment: "test".to_string(), + }; + datastore + .blueprint_insert(&opctx, &bp1) + .await + .expect("failed to insert blueprint"); + + // We haven't set a blueprint target yet, so we should still only see + // the two RSS-inserted service-running sleds. + assert_eq!(&harness.sled_ids[..2], fetch_service_sled_ids().await); + + // Make bp1 the current target. + datastore + .blueprint_target_set_current( + &opctx, + BlueprintTarget { + target_id: bp1_id, + enabled: true, + time_made_target: Utc::now(), + }, + ) + .await + .expect("failed to set blueprint target"); + + // bp1 is the target, but we haven't yet inserted a vNIC record, so + // we'll still only see the original 2 sleds. + assert_eq!(&harness.sled_ids[..2], fetch_service_sled_ids().await); + + // Insert the relevant service NIC record (normally performed by the + // reconfigurator's executor). + datastore + .service_create_network_interface_raw( + &opctx, + harness.db_services().nth(2).unwrap().1, + ) + .await + .expect("failed to insert service VNIC"); + + // We should now see _three_ sleds running services. + assert_eq!(&harness.sled_ids[..3], fetch_service_sled_ids().await); + + // Create another blueprint with no services and make it the target. + let bp2_id = Uuid::new_v4(); + let bp2 = Blueprint { + id: bp2_id, + omicron_zones: BTreeMap::new(), + zones_in_service: BTreeSet::new(), + parent_blueprint_id: Some(bp1_id), + internal_dns_version: Generation::new(), + time_created: Utc::now(), + creator: "test".to_string(), + comment: "test".to_string(), + }; + datastore + .blueprint_insert(&opctx, &bp2) + .await + .expect("failed to insert blueprint"); + datastore + .blueprint_target_set_current( + &opctx, + BlueprintTarget { + target_id: bp2_id, + enabled: true, + time_made_target: Utc::now(), + }, + ) + .await + .expect("failed to set blueprint target"); + + // We haven't removed the service NIC record, but we should no longer + // see the third sled here, because we should be back to just the + // original two services in the `service` table. + assert_eq!(&harness.sled_ids[..2], fetch_service_sled_ids().await); + + // Insert a service NIC record for our fourth sled's Nexus. This + // shouldn't change our VPC resolution. + datastore + .service_create_network_interface_raw( + &opctx, + harness.db_services().nth(3).unwrap().1, + ) + .await + .expect("failed to insert service VNIC"); + assert_eq!(&harness.sled_ids[..2], fetch_service_sled_ids().await); + + // Create a blueprint that has a Nexus on our fourth sled. This + // shouldn't change our VPC resolution. + let bp3_omicron_zones = { + let (sled_id, zone_config) = harness + .omicron_zone_configs() + .nth(3) + .expect("fewer than 3 services in test harness"); + let mut zones = BTreeMap::new(); + zones.insert( + sled_id, + OmicronZonesConfig { + generation: Generation::new(), + zones: vec![zone_config], + }, + ); + zones + }; + let bp3_id = Uuid::new_v4(); + let bp3 = Blueprint { + id: bp3_id, + omicron_zones: bp3_omicron_zones, + zones_in_service: BTreeSet::new(), + parent_blueprint_id: Some(bp2_id), + internal_dns_version: Generation::new(), + time_created: Utc::now(), + creator: "test".to_string(), + comment: "test".to_string(), + }; + datastore + .blueprint_insert(&opctx, &bp3) + .await + .expect("failed to insert blueprint"); + assert_eq!(&harness.sled_ids[..2], fetch_service_sled_ids().await); + + // Make this blueprint the target. We've already created the service + // VNIC, so we should immediately see our fourth sled in VPC resolution. + datastore + .blueprint_target_set_current( + &opctx, + BlueprintTarget { + target_id: bp3_id, + enabled: true, + time_made_target: Utc::now(), + }, + ) + .await + .expect("failed to set blueprint target"); + assert_eq!( + &[harness.sled_ids[0], harness.sled_ids[1], harness.sled_ids[3]] + as &[Uuid], + fetch_service_sled_ids().await + ); + + // Finally, create a blueprint that includes our third and fourth sleds, + // make it the target, and ensure we resolve to all four sleds. + let bp4_omicron_zones = { + let mut zones = BTreeMap::new(); + for (sled_id, zone_config) in harness.omicron_zone_configs().skip(2) + { + zones.insert( + sled_id, + OmicronZonesConfig { + generation: Generation::new(), + zones: vec![zone_config], + }, + ); + } + zones + }; + let bp4_id = Uuid::new_v4(); + let bp4 = Blueprint { + id: bp4_id, + omicron_zones: bp4_omicron_zones, + zones_in_service: BTreeSet::new(), + parent_blueprint_id: Some(bp3_id), + internal_dns_version: Generation::new(), + time_created: Utc::now(), + creator: "test".to_string(), + comment: "test".to_string(), + }; + datastore + .blueprint_insert(&opctx, &bp4) + .await + .expect("failed to insert blueprint"); + datastore + .blueprint_target_set_current( + &opctx, + BlueprintTarget { + target_id: bp4_id, + enabled: true, + time_made_target: Utc::now(), + }, + ) + .await + .expect("failed to set blueprint target"); + assert_eq!(harness.sled_ids, fetch_service_sled_ids().await); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } } diff --git a/nexus/db-queries/src/db/lookup.rs b/nexus/db-queries/src/db/lookup.rs index 050c1dcfe9..380c9db140 100644 --- a/nexus/db-queries/src/db/lookup.rs +++ b/nexus/db-queries/src/db/lookup.rs @@ -358,11 +358,6 @@ impl<'a> LookupPath<'a> { Zpool::PrimaryKey(Root { lookup_root: self }, id) } - /// Select a resource of type Service, identified by its id - pub fn service_id(self, id: Uuid) -> Service<'a> { - Service::PrimaryKey(Root { lookup_root: self }, id) - } - /// Select a resource of type Switch, identified by its id pub fn switch_id(self, id: Uuid) -> Switch<'a> { Switch::PrimaryKey(Root { lookup_root: self }, id) @@ -826,15 +821,6 @@ lookup_resource! { primary_key_columns = [ { column_name = "id", rust_type = Uuid } ], } -lookup_resource! { - name = "Service", - ancestors = [], - children = [], - lookup_by_name = false, - soft_deletes = false, - primary_key_columns = [ { column_name = "id", rust_type = Uuid } ] -} - lookup_resource! { name = "Switch", ancestors = [], From 65cbb829d60fbb63ada38cc54d6518cbe7e57eca Mon Sep 17 00:00:00 2001 From: Ryan Goodfellow Date: Sat, 9 Mar 2024 11:00:24 -0800 Subject: [PATCH 094/157] Add on-demand communication probes. (#4585) --- .github/buildomat/jobs/a4x2-deploy.sh | 205 ++++++++ .github/buildomat/jobs/a4x2-prepare.sh | 94 ++++ .github/buildomat/jobs/ci-tools.sh | 5 + .github/buildomat/jobs/package.sh | 1 + .github/buildomat/jobs/tuf-repo.sh | 0 .gitignore | 3 +- Cargo.lock | 414 +++++++++------ Cargo.toml | 13 +- clients/nexus-client/src/lib.rs | 2 + clients/sled-agent-client/src/lib.rs | 25 +- common/src/api/external/mod.rs | 11 + common/src/api/internal/shared.rs | 2 + dev-tools/omdb/src/bin/omdb/db.rs | 43 +- end-to-end-tests/Cargo.toml | 11 +- end-to-end-tests/src/bin/commtest.rs | 443 ++++++++++++++++ end-to-end-tests/src/bin/dhcp-server.rs | 125 +++++ end-to-end-tests/src/helpers/cli.rs | 22 + end-to-end-tests/src/helpers/icmp.rs | 306 +++++++++++ end-to-end-tests/src/helpers/mod.rs | 2 + end-to-end-tests/src/instance_launch.rs | 1 + illumos-utils/src/running_zone.rs | 6 + installinator/src/dispatch.rs | 4 +- nexus/Cargo.toml | 2 +- nexus/db-model/src/deployment.rs | 3 +- nexus/db-model/src/external_ip.rs | 47 +- nexus/db-model/src/inventory.rs | 3 +- nexus/db-model/src/lib.rs | 2 + nexus/db-model/src/network_interface.rs | 65 ++- nexus/db-model/src/omicron_zone_config.rs | 17 +- nexus/db-model/src/probe.rs | 50 ++ nexus/db-model/src/schema.rs | 16 +- nexus/db-model/src/unsigned.rs | 2 + nexus/db-queries/Cargo.toml | 1 + .../src/db/datastore/external_ip.rs | 82 ++- nexus/db-queries/src/db/datastore/mod.rs | 5 + .../src/db/datastore/network_interface.rs | 104 +++- nexus/db-queries/src/db/datastore/probe.rs | 390 ++++++++++++++ nexus/db-queries/src/db/datastore/vpc.rs | 34 +- .../db-queries/src/db/queries/external_ip.rs | 7 + .../src/db/queries/network_interface.rs | 9 +- .../execution/src/resource_allocation.rs | 26 +- .../planning/src/blueprint_builder.rs | 10 +- nexus/src/app/instance_network.rs | 179 ++++++- nexus/src/app/mod.rs | 1 + nexus/src/app/probe.rs | 109 ++++ nexus/src/app/rack.rs | 5 +- nexus/src/app/sagas/project_create.rs | 1 - nexus/src/app/switch_port.rs | 6 +- nexus/src/app/vpc.rs | 2 +- nexus/src/external_api/http_entrypoints.rs | 153 +++++- nexus/src/external_api/tag-config.json | 6 + nexus/src/internal_api/http_entrypoints.rs | 35 +- nexus/tests/integration_tests/mod.rs | 1 + nexus/tests/integration_tests/oximeter.rs | 2 +- nexus/tests/integration_tests/probe.rs | 127 +++++ nexus/tests/output/nexus_tags.txt | 4 + .../output/uncovered-authz-endpoints.txt | 4 + nexus/types/src/deployment.rs | 2 - nexus/types/src/external_api/params.rs | 19 + nexus/types/src/inventory.rs | 4 +- openapi/nexus-internal.json | 192 ++++++- openapi/nexus.json | 489 ++++++++++++++++++ openapi/sled-agent.json | 20 + package-manifest.toml | 17 + schema/all-zone-requests.json | 20 + schema/all-zones-requests.json | 20 + schema/crdb/40.0.0/up1.sql | 10 + schema/crdb/40.0.0/up2.sql | 4 + schema/crdb/40.0.0/up3.sql | 1 + schema/crdb/40.0.0/up4.sql | 1 + schema/crdb/dbinit.sql | 22 +- schema/rss-service-plan-v2.json | 20 + sled-agent/src/lib.rs | 1 + sled-agent/src/params.rs | 6 +- sled-agent/src/probe_manager.rs | 383 ++++++++++++++ sled-agent/src/sled_agent.rs | 16 + tools/ci_download_maghemite_mgd | 2 + tools/ci_download_maghemite_openapi | 4 +- tools/ci_download_thundermuffin | 153 ++++++ tools/install_builder_prerequisites.sh | 3 + tools/thundermuffin_checksums | 1 + tools/thundermuffin_version | 1 + workspace-hack/Cargo.toml | 108 ++-- 83 files changed, 4440 insertions(+), 327 deletions(-) create mode 100755 .github/buildomat/jobs/a4x2-deploy.sh create mode 100755 .github/buildomat/jobs/a4x2-prepare.sh mode change 100644 => 100755 .github/buildomat/jobs/tuf-repo.sh create mode 100644 end-to-end-tests/src/bin/commtest.rs create mode 100644 end-to-end-tests/src/bin/dhcp-server.rs create mode 100644 end-to-end-tests/src/helpers/cli.rs create mode 100644 end-to-end-tests/src/helpers/icmp.rs create mode 100644 nexus/db-model/src/probe.rs create mode 100644 nexus/db-queries/src/db/datastore/probe.rs create mode 100644 nexus/src/app/probe.rs create mode 100644 nexus/tests/integration_tests/probe.rs create mode 100644 schema/crdb/40.0.0/up1.sql create mode 100644 schema/crdb/40.0.0/up2.sql create mode 100644 schema/crdb/40.0.0/up3.sql create mode 100644 schema/crdb/40.0.0/up4.sql create mode 100644 sled-agent/src/probe_manager.rs create mode 100755 tools/ci_download_thundermuffin create mode 100644 tools/thundermuffin_checksums create mode 100644 tools/thundermuffin_version diff --git a/.github/buildomat/jobs/a4x2-deploy.sh b/.github/buildomat/jobs/a4x2-deploy.sh new file mode 100755 index 0000000000..a07f953281 --- /dev/null +++ b/.github/buildomat/jobs/a4x2-deploy.sh @@ -0,0 +1,205 @@ +#!/bin/bash +#: +#: name = "a4x2-deploy" +#: variety = "basic" +#: target = "lab-2.0-opte-0.27" +#: rust_toolchain = "stable" +#: output_rules = [ +#: "/out/falcon/*.log", +#: "/out/falcon/*.err", +#: "/out/connectivity-report.json", +#: "/ci/out/*-sled-agent.log", +#: "/ci/out/*cockroach*.log", +#: "%/out/dhcp-server.log", +#: ] +#: skip_clone = true +#: enable = false +#: +#: [dependencies.a4x2] +#: job = "a4x2-prepare" + +set -o errexit +set -o pipefail +set -o xtrace + +pfexec mkdir -p /out +pfexec chown "$UID" /out + +# +# If we fail, try to collect some debugging information +# +_exit_trap() { + local status=$? + [[ $status -eq 0 ]] && exit 0 + + set +o errexit + + df -h + + # show what services have issues + for gimlet in g0 g1 g2 g3; do + ./a4x2 exec $gimlet "svcs -xvZ" + done + + mkdir -p /out/falcon + cp .falcon/* /out/falcon/ + for x in ce cr1 cr2 g0 g1 g2 g3; do + mv /out/falcon/$x.out /out/falcon/$x.log + done + cp connectivity-report.json /out/ + + mkdir -p /ci/out + + for gimlet in g0 g1 g2 g3; do + ./a4x2 exec \ + $gimlet \ + "cat /var/svc/log/oxide-sled-agent:default.log" > \ + /ci/out/$gimlet-sled-agent.log + done + + # collect cockroachdb logs + mkdir -p /ci/log + for gimlet in g0 g1 g2 g3; do + ./a4x2 exec $gimlet 'cat /pool/ext/*/crypt/zone/oxz_cockroachdb*/root/data/logs/cockroach.log' > \ + /ci/out/$gimlet-cockroach.log + + ./a4x2 exec $gimlet 'cat /pool/ext/*/crypt/zone/oxz_cockroachdb*/root/data/logs/cockroach-stderr.log' > \ + /ci/out/$gimlet-cockroach-stderr.log + + ./a4x2 exec $gimlet 'cat /pool/ext/*/crypt/zone/oxz_cockroachdb*/root/data/logs/cockroach-health.log' > \ + /ci/out/$gimlet-cockroach-health.log + + ./a4x2 exec $gimlet 'cat /pool/ext/*/crypt/zone/oxz_cockroachdb*/root/var/svc/log/oxide-cockroachdb:default.log*' > \ + /ci/out/$gimlet-oxide-cockroachdb.log + done +} +trap _exit_trap EXIT + +# +# Install propolis +# +curl -fOL https://buildomat.eng.oxide.computer/wg/0/artefact/01HJ4BJJY2Q9EKXHYV6HQZ8XPN/qQS2fnkS9LebcL4cDLeHRWdleSiXaGKEXGLDucRoab8pwBSi/01HJ4BJY5F995ET252YSD4NJWV/01HJ4CGFH946THBF0ZRH6SRM8X/propolis-server +chmod +x propolis-server +pfexec mv propolis-server /usr/bin/ + +# +# Make space for CI work +# +export DISK=${DISK:-c1t1d0} +pfexec diskinfo +pfexec zpool create -f cpool $DISK +pfexec zfs create -o mountpoint=/ci cpool/ci + +if [[ $(curl -s http://catacomb.eng.oxide.computer:12346/trim-me) =~ "true" ]]; then + pfexec zpool trim cpool + while [[ ! $(zpool status -t cpool) =~ "100%" ]]; do sleep 10; done +fi + +pfexec chown "$UID" /ci +cd /ci + +# +# Fetch and decompress the cargo bay from the a4x2-prepeare job +# +for x in ce cr1 cr2 omicron-common g0 g1 g2 g3 tools; do + tar -xvzf /input/a4x2/out/cargo-bay-$x.tgz +done + +for sled in g0 g1 g2 g3; do + cp -r cargo-bay/omicron-common/omicron/out/* cargo-bay/$sled/omicron/out/ +done +ls -R + +# +# Fetch the a4x2 topology manager program +# +buildomat_url=https://buildomat.eng.oxide.computer +testbed_artifact_path=public/file/oxidecomputer/testbed/topo/ +testbed_rev=677559e30b4dfc65c374b24336ac23d40102de81 +curl -fOL $buildomat_url/$testbed_artifact_path/$testbed_rev/a4x2 +chmod +x a4x2 + +# +# Create a zpool for falcon images and disks +# + +# +# Install falcon base images +# +export FALCON_DATASET=cpool/falcon +images="debian-11.0_0 helios-2.0_0" +for img in $images; do + file=$img.raw.xz + curl -OL http://catacomb.eng.oxide.computer:12346/falcon/$file + unxz --keep -T 0 $file + + file=$img.raw + name=${img%_*} + fsize=`ls -l $img.raw | awk '{print $5}'` + let vsize=(fsize + 4096 - size%4096) + + pfexec zfs create -p -V $vsize -o volblocksize=4k "$FALCON_DATASET/img/$name" + pfexec dd if=$img.raw of="/dev/zvol/rdsk/$FALCON_DATASET/img/$name" bs=1024k status=progress + pfexec zfs snapshot "$FALCON_DATASET/img/$name@base" +done + +# +# Install OVMF +# +curl -fOL http://catacomb.eng.oxide.computer:12346/falcon/OVMF_CODE.fd +pfexec mkdir -p /var/ovmf +pfexec cp OVMF_CODE.fd /var/ovmf/OVMF_CODE.fd + +# +# Fetch the arista image +# +curl -fOL http://catacomb.eng.oxide.computer:12346/falcon/arista.gz.xz +unxz arista.gz.xz +pfexec zfs receive cpool/falcon/img/arista@base < arista.gz + +# +# Run the VM dhcp server +# +export EXT_INTERFACE=${EXT_INTERFACE:-igb0} + +cp /input/a4x2/out/dhcp-server . +chmod +x dhcp-server +first=`bmat address ls -f extra -Ho first` +last=`bmat address ls -f extra -Ho last` +gw=`bmat address ls -f extra -Ho gateway` +server=`ipadm show-addr $EXT_INTERFACE/dhcp -po ADDR | sed 's#/.*##g'` +pfexec ./dhcp-server $first $last $gw $server &> /out/dhcp-server.log & + +# +# Run the topology +# +pfexec ./a4x2 launch + +# +# Add a route to the rack ip pool +# + +# Get the DHCP address for the external interface of the customer edge VM. This +# VM interface is attached to the host machine's external interface via viona. +customer_edge_addr=$(./a4x2 exec ce \ + "ip -4 -j addr show enp0s10 | jq -r '.[0].addr_info[] | select(.dynamic == true) | .local'") + +# Add the route to the rack via the customer edge VM +pfexec dladm +pfexec ipadm +pfexec netstat -nr +pfexec route add 198.51.100.0/24 $customer_edge_addr + +# +# Run the communications test program +# +cp /input/a4x2/out/commtest . +chmod +x commtest +pfexec ./commtest http://198.51.100.23 run \ + --ip-pool-begin 198.51.100.40 \ + --ip-pool-end 198.51.100.70 \ + --icmp-loss-tolerance 10 \ + --test-duration 300s \ + --packet-rate 30 + +cp connectivity-report.json /out/ diff --git a/.github/buildomat/jobs/a4x2-prepare.sh b/.github/buildomat/jobs/a4x2-prepare.sh new file mode 100755 index 0000000000..bc88ddd4c0 --- /dev/null +++ b/.github/buildomat/jobs/a4x2-prepare.sh @@ -0,0 +1,94 @@ +#!/bin/bash +#: +#: name = "a4x2-prepare" +#: variety = "basic" +#: target = "helios-2.0" +#: rust_toolchain = "stable" +#: output_rules = [ +#: "=/out/cargo-bay-ce.tgz", +#: "=/out/cargo-bay-cr1.tgz", +#: "=/out/cargo-bay-cr2.tgz", +#: "=/out/cargo-bay-g0.tgz", +#: "=/out/cargo-bay-g1.tgz", +#: "=/out/cargo-bay-g2.tgz", +#: "=/out/cargo-bay-g3.tgz", +#: "=/out/cargo-bay-tools.tgz", +#: "=/out/cargo-bay-omicron-common.tgz", +#: "=/out/commtest", +#: "=/out/dhcp-server", +#: ] +#: access_repos = [ +#: "oxidecomputer/testbed", +#: ] +#: enable = false + +source ./env.sh + +set -o errexit +set -o pipefail +set -o xtrace + +pfexec mkdir -p /out +pfexec chown "$UID" /out + +# +# Prep to build omicron +# +banner "prerequisites" +set -o xtrace +./tools/install_builder_prerequisites.sh -y + +# +# Build the commtest program and place in the output +# +banner "commtest" +cargo build -p end-to-end-tests --bin commtest --bin dhcp-server --release +cp target/release/commtest /out/ +cp target/release/dhcp-server /out/ + +# +# Clone the testbed repo +# +banner "testbed" +cd /work/oxidecomputer +rm -rf testbed +git clone https://github.com/oxidecomputer/testbed +cd testbed/a4x2 + +# +# Build the a4x2 cargo bay using the omicron sources in this branch, fetch the +# softnpu artifacts into the cargo bay, zip up the cargo bay and place it in the +# output. +# +OMICRON=/work/oxidecomputer/omicron ./config/build-packages.sh + +# Create an omicron archive that captures common assets + +pushd cargo-bay +mkdir -p omicron-common/omicron/ +cp -r g0/omicron/out omicron-common/omicron/ +# sled agent, gateway and switch archives are sled-specific +rm omicron-common/omicron/out/omicron-sled-agent.tar +rm omicron-common/omicron/out/omicron-gateway* +rm omicron-common/omicron/out/switch-softnpu.tar.gz +popd + +# Remove everything in $sled/omicron/out except sled-agent, mgs (gateway), and +# switch tar archives, these common elements are in the omicron-common archive +for sled in g0 g1 g2 g3; do + find cargo-bay/$sled/omicron/out/ -maxdepth 1 -mindepth 1 \ + | grep -v sled-agent \ + | grep -v omicron-gateway \ + | grep -v switch-softnpu \ + | xargs -l rm -rf +done + +# Put the softnpu artifacts in place. +./config/fetch-softnpu-artifacts.sh + +# Archive everything up and place it in the output +for x in ce cr1 cr2 g0 g1 g2 g3 tools omicron-common; do + tar -czf cargo-bay-$x.tgz cargo-bay/$x + mv cargo-bay-$x.tgz /out/ +done + diff --git a/.github/buildomat/jobs/ci-tools.sh b/.github/buildomat/jobs/ci-tools.sh index 07a63af30c..ce17d4fb30 100755 --- a/.github/buildomat/jobs/ci-tools.sh +++ b/.github/buildomat/jobs/ci-tools.sh @@ -8,6 +8,7 @@ #: "=/work/end-to-end-tests/*.gz", #: "=/work/caboose-util.gz", #: "=/work/tufaceous.gz", +#: "=/work/commtest", #: ] set -o errexit @@ -33,6 +34,10 @@ export CARGO_INCREMENTAL=0 ptime -m cargo build --locked -p end-to-end-tests --tests --bin bootstrap \ --message-format json-render-diagnostics >/tmp/output.end-to-end.json +mkdir -p /work +ptime -m cargo build --locked -p end-to-end-tests --tests --bin commtest +cp target/debug/commtest /work/commtest + mkdir -p /work/end-to-end-tests for p in target/debug/bootstrap $(/opt/ooce/bin/jq -r 'select(.profile.test) | .executable' /tmp/output.end-to-end.json); do # shellcheck disable=SC2094 diff --git a/.github/buildomat/jobs/package.sh b/.github/buildomat/jobs/package.sh index dc89bc787b..d290976d9f 100755 --- a/.github/buildomat/jobs/package.sh +++ b/.github/buildomat/jobs/package.sh @@ -124,6 +124,7 @@ zones=( out/omicron-gateway-softnpu.tar.gz out/omicron-gateway-asic.tar.gz out/overlay.tar.gz + out/probe.tar.gz ) cp "${zones[@]}" /work/zones/ diff --git a/.github/buildomat/jobs/tuf-repo.sh b/.github/buildomat/jobs/tuf-repo.sh old mode 100644 new mode 100755 diff --git a/.gitignore b/.gitignore index fc5fd5f297..a13536aab7 100644 --- a/.gitignore +++ b/.gitignore @@ -13,4 +13,5 @@ debug.out rusty-tags.vi *.sw* tags -.direnv \ No newline at end of file +.direnv +connectivity-report.json diff --git a/Cargo.lock b/Cargo.lock index 416766b9cb..0cfc7b4500 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -54,12 +54,12 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +checksum = "42cd52102d3df161c77a887b608d7a4897d7cc112886a9537b738a887a03aaff" dependencies = [ "cfg-if", - "getrandom 0.2.10", + "getrandom 0.2.12", "once_cell", "version_check", "zerocopy 0.7.32", @@ -67,9 +67,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.4" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] @@ -160,9 +160,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" dependencies = [ "backtrace", ] @@ -241,7 +241,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed72493ac66d5804837f480ab3766c72bdfab91a65e565fc54fa9e42db0073a8" dependencies = [ "anstyle", - "bstr 1.6.0", + "bstr 1.9.0", "doc-comment", "predicates", "predicates-core", @@ -377,7 +377,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" dependencies = [ "futures-core", - "getrandom 0.2.10", + "getrandom 0.2.12", "instant", "pin-project-lite", "rand 0.8.5", @@ -485,7 +485,7 @@ version = "0.69.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4c69fae65a523209d34240b60abe0c42d33d1045d445c0839d8a4894a736e2d" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.2", "cexpr", "clang-sys", "lazy_static", @@ -531,9 +531,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.0" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" dependencies = [ "serde", ] @@ -678,12 +678,12 @@ dependencies = [ [[package]] name = "bstr" -version = "1.6.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05" +checksum = "c48f0051a4b4c5e0b6d365cd04af53aeaa209e3cc15ec2cdb69e73cc87fbd0dc" dependencies = [ "memchr", - "regex-automata 0.3.8", + "regex-automata 0.4.5", "serde", ] @@ -909,9 +909,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.31" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" dependencies = [ "android-tzdata", "iana-time-zone", @@ -919,7 +919,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.48.5", + "windows-targets 0.52.0", ] [[package]] @@ -988,9 +988,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.0" +version = "4.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80c21025abd42669a92efc996ef13cfb2c5c627858421ea58d5c3b331a6c134f" +checksum = "c918d541ef2913577a0f9566e9ce27cb35b6df072075769e0b26cb5a554520da" dependencies = [ "clap_builder", "clap_derive", @@ -998,9 +998,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.0" +version = "4.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458bf1f341769dfcf849846f65dffdf9146daa56bcd2a47cb4e1de9915567c99" +checksum = "9f3e7391dad68afb0c2ede1bf619f579a3dc9c2ec67f089baa397123a2f3d1eb" dependencies = [ "anstream", "anstyle", @@ -1050,11 +1050,10 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "colored" -version = "2.0.4" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6" +checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" dependencies = [ - "is-terminal", "lazy_static", "windows-sys 0.48.0", ] @@ -1087,9 +1086,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "constant_time_eq" @@ -1109,6 +1108,17 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +[[package]] +name = "cookie" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7efb37c3e1ccb1ff97164ad95ac1606e8ccd35b3fa0a7d99a304c7f4a428cc24" +dependencies = [ + "percent-encoding", + "time", + "version_check", +] + [[package]] name = "cookie" version = "0.18.0" @@ -1119,6 +1129,23 @@ dependencies = [ "version_check", ] +[[package]] +name = "cookie_store" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "387461abbc748185c3a6e1673d826918b450b87ff22639429c694619a83b6cf6" +dependencies = [ + "cookie 0.17.0", + "idna 0.3.0", + "log", + "publicsuffix", + "serde", + "serde_derive", + "serde_json", + "time", + "url", +] + [[package]] name = "core-foundation" version = "0.9.3" @@ -1201,7 +1228,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.0", + "clap 4.5.1", "criterion-plot", "futures", "is-terminal", @@ -1259,25 +1286,18 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if", "crossbeam-utils", - "memoffset 0.9.0", - "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if", -] +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crossterm" @@ -1285,7 +1305,7 @@ version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.2", "crossterm_winapi", "futures-core", "libc", @@ -1646,10 +1666,11 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.8" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ + "powerfmt", "serde", ] @@ -1719,13 +1740,34 @@ dependencies = [ "syn 2.0.51", ] +[[package]] +name = "dhcproto" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "000717b4f6913807b6195419e0bacb008d449ba6023ca26abf349c4ff2f1866b" +dependencies = [ + "dhcproto-macros", + "hex", + "ipnet", + "rand 0.8.5", + "thiserror", + "trust-dns-proto", + "url", +] + +[[package]] +name = "dhcproto-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7993efb860416547839c115490d4951c6d0f8ec04a3594d9dd99d50ed7ec170" + [[package]] name = "diesel" version = "2.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62c6fcf842f17f8c78ecf7c81d75c5ce84436b41ee07e03f490fbb5f5a8731d8" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.2", "byteorder", "chrono", "diesel_derives", @@ -1863,7 +1905,7 @@ dependencies = [ "anyhow", "camino", "chrono", - "clap 4.5.0", + "clap 4.5.1", "dns-service-client", "dropshot", "expectorate", @@ -1988,7 +2030,7 @@ dependencies = [ "futures", "hostname", "http 0.2.12", - "hyper 0.14.27", + "hyper 0.14.28", "indexmap 2.2.5", "multer", "openapiv3", @@ -2165,12 +2207,20 @@ dependencies = [ name = "end-to-end-tests" version = "0.1.0" dependencies = [ + "anstyle", "anyhow", "async-trait", "base64", "chrono", + "clap 4.5.1", + "colored", + "dhcproto", "http 0.2.12", - "hyper 0.14.27", + "humantime", + "hyper 0.14.28", + "internet-checksum", + "ispf", + "macaddr", "omicron-sled-agent", "omicron-test-utils", "omicron-workspace-hack", @@ -2181,6 +2231,7 @@ dependencies = [ "russh-keys", "serde", "serde_json", + "socket2 0.5.5", "tokio", "toml 0.8.10", "trust-dns-resolver", @@ -2569,7 +2620,7 @@ name = "gateway-cli" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.0", + "clap 4.5.1", "futures", "gateway-client", "gateway-messages", @@ -2711,9 +2762,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if", "js-sys", @@ -2751,7 +2802,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "759c97c1e17c55525b57192c06a267cda0ac5210b222d6b82189a2338fa1c13d" dependencies = [ "aho-corasick", - "bstr 1.6.0", + "bstr 1.9.0", "fnv", "log", "regex", @@ -3093,7 +3144,7 @@ dependencies = [ "form_urlencoded", "futures", "http 0.2.12", - "hyper 0.14.27", + "hyper 0.14.28", "log", "once_cell", "regex", @@ -3170,9 +3221,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", @@ -3185,7 +3236,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2 0.5.5", "tokio", "tower-service", "tracing", @@ -3218,7 +3269,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.27", + "hyper 0.14.28", "rustls 0.21.9", "tokio", "tokio-rustls 0.24.1", @@ -3253,7 +3304,7 @@ dependencies = [ "http 0.2.12", "http-range", "httpdate", - "hyper 0.14.27", + "hyper 0.14.28", "mime_guess", "percent-encoding", "rand 0.8.5", @@ -3269,7 +3320,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.27", + "hyper 0.14.28", "native-tls", "tokio", "tokio-native-tls", @@ -3335,6 +3386,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "idna" version = "0.5.0" @@ -3474,7 +3535,7 @@ dependencies = [ "bytes", "camino", "cancel-safe-futures", - "clap 4.5.0", + "clap 4.5.1", "ddm-admin-client", "display-error-chain", "futures", @@ -3534,10 +3595,10 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "clap 4.5.0", + "clap 4.5.1", "dropshot", "expectorate", - "hyper 0.14.27", + "hyper 0.14.28", "installinator-common", "omicron-common", "omicron-test-utils", @@ -3593,7 +3654,7 @@ dependencies = [ "dropshot", "expectorate", "futures", - "hyper 0.14.27", + "hyper 0.14.28", "omicron-common", "omicron-test-utils", "omicron-workspace-hack", @@ -3615,7 +3676,7 @@ name = "internal-dns-cli" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.0", + "clap 4.5.1", "dropshot", "internal-dns", "omicron-common", @@ -3625,6 +3686,12 @@ dependencies = [ "trust-dns-resolver", ] +[[package]] +name = "internet-checksum" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc6d6206008e25125b1f97fbe5d309eb7b85141cf9199d52dbd3729a1584dd16" + [[package]] name = "ipcc" version = "0.1.0" @@ -3686,6 +3753,14 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7655c9839580ee829dfacba1d1278c2b7883e50a277ff7541299489d6bdfdc45" +[[package]] +name = "ispf" +version = "0.1.0" +source = "git+https://github.com/oxidecomputer/ispf#f78443a98397f7818b1e7a487dbb7d5cad625496" +dependencies = [ + "serde", +] + [[package]] name = "itertools" version = "0.10.5" @@ -3884,7 +3959,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d8de370f98a6cb8a4606618e53e802f93b094ddec0f96988eaec2c27e6e9ce7" dependencies = [ - "clap 4.5.0", + "clap 4.5.1", "termcolor", "threadpool", ] @@ -3940,7 +4015,7 @@ version = "0.2.4" source = "git+https://github.com/oxidecomputer/lpc55_support#96f064eaae5e95930efaab6c29fd1b2e22225dac" dependencies = [ "bitfield", - "clap 4.5.0", + "clap 4.5.1", "packed_struct", "serde", ] @@ -4044,9 +4119,9 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "memchr" -version = "2.6.3" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" [[package]] name = "memmap" @@ -4076,15 +4151,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] - [[package]] name = "mg-admin-client" version = "0.1.0" @@ -4211,7 +4277,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.12", ] [[package]] @@ -4366,7 +4432,7 @@ dependencies = [ "camino", "camino-tempfile", "chrono", - "cookie", + "cookie 0.18.0", "db-macros", "diesel", "diesel-dtrace", @@ -4376,7 +4442,7 @@ dependencies = [ "gateway-client", "headers", "http 0.2.12", - "hyper 0.14.27", + "hyper 0.14.28", "hyper-rustls 0.26.0", "illumos-utils", "internal-dns", @@ -4414,6 +4480,7 @@ dependencies = [ "regex", "rustls 0.22.2", "samael", + "schemars", "serde", "serde_json", "serde_urlencoded", @@ -4580,7 +4647,7 @@ dependencies = [ "gateway-test-utils", "headers", "http 0.2.12", - "hyper 0.14.27", + "hyper 0.14.28", "internal-dns", "nexus-config", "nexus-db-queries", @@ -4659,7 +4726,7 @@ dependencies = [ "bitflags 1.3.2", "cfg-if", "libc", - "memoffset 0.7.1", + "memoffset", "pin-utils", "static_assertions", ] @@ -4670,7 +4737,7 @@ version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.2", "cfg-if", "libc", ] @@ -4758,6 +4825,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-derive" version = "0.4.0" @@ -4780,9 +4853,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" dependencies = [ "autocfg", "num-integer", @@ -4802,9 +4875,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", "libm", @@ -4968,7 +5041,7 @@ dependencies = [ "anyhow", "camino", "camino-tempfile", - "clap 4.5.0", + "clap 4.5.1", "dropshot", "expectorate", "futures", @@ -5002,7 +5075,7 @@ dependencies = [ "anyhow", "base64", "camino", - "clap 4.5.0", + "clap 4.5.1", "dropshot", "expectorate", "futures", @@ -5011,7 +5084,7 @@ dependencies = [ "gateway-test-utils", "hex", "http 0.2.12", - "hyper 0.14.27", + "hyper 0.14.28", "illumos-utils", "ipcc", "omicron-common", @@ -5053,7 +5126,7 @@ dependencies = [ "camino-tempfile", "cancel-safe-futures", "chrono", - "clap 4.5.0", + "clap 4.5.1", "criterion", "crucible-agent-client", "crucible-pantry-client", @@ -5073,7 +5146,7 @@ dependencies = [ "http 0.2.12", "httptest", "hubtools", - "hyper 0.14.27", + "hyper 0.14.28", "hyper-rustls 0.26.0", "illumos-utils", "internal-dns", @@ -5167,7 +5240,7 @@ dependencies = [ "camino", "camino-tempfile", "chrono", - "clap 4.5.0", + "clap 4.5.1", "crossterm", "crucible-agent-client", "csv", @@ -5219,7 +5292,7 @@ version = "0.1.0" dependencies = [ "anyhow", "camino", - "clap 4.5.0", + "clap 4.5.1", "expectorate", "futures", "hex", @@ -5286,7 +5359,7 @@ dependencies = [ "cancel-safe-futures", "cfg-if", "chrono", - "clap 4.5.0", + "clap 4.5.1", "crucible-agent-client", "ddm-admin-client", "derive_more", @@ -5304,7 +5377,7 @@ dependencies = [ "guppy", "hex", "http 0.2.12", - "hyper 0.14.27", + "hyper 0.14.28", "hyper-staticfile", "illumos-utils", "installinator-common", @@ -5418,14 +5491,14 @@ dependencies = [ "bit-set", "bit-vec", "bitflags 1.3.2", - "bitflags 2.4.0", + "bitflags 2.4.2", "bstr 0.2.17", - "bstr 1.6.0", + "bstr 1.9.0", "byteorder", "bytes", "chrono", "cipher", - "clap 4.5.0", + "clap 4.5.1", "clap_builder", "console", "const-oid", @@ -5450,12 +5523,12 @@ dependencies = [ "futures-util", "gateway-messages", "generic-array", - "getrandom 0.2.10", + "getrandom 0.2.12", "group", "hashbrown 0.14.3", "hex", "hmac", - "hyper 0.14.27", + "hyper 0.14.28", "indexmap 2.2.5", "inout", "ipnetwork", @@ -5483,7 +5556,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "regex", - "regex-automata 0.4.4", + "regex-automata 0.4.5", "regex-syntax 0.8.2", "reqwest", "ring 0.17.8", @@ -5495,7 +5568,6 @@ dependencies = [ "sha2", "similar", "slog", - "socket2 0.5.5", "spin 0.9.8", "string_cache", "subtle", @@ -5603,7 +5675,7 @@ version = "0.10.60" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "79a4c6c3a2b158f7f8f2a2fc5a969fa3a068df6fc9dbb4a43845436e3af7c800" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.2", "cfg-if", "foreign-types 0.3.2", "libc", @@ -5731,7 +5803,7 @@ dependencies = [ "chrono", "futures", "http 0.2.12", - "hyper 0.14.27", + "hyper 0.14.28", "omicron-workspace-hack", "progenitor", "rand 0.8.5", @@ -5804,11 +5876,11 @@ dependencies = [ "anyhow", "camino", "chrono", - "clap 4.5.0", + "clap 4.5.1", "dropshot", "expectorate", "futures", - "hyper 0.14.27", + "hyper 0.14.28", "internal-dns", "nexus-client", "nexus-types", @@ -5847,7 +5919,7 @@ dependencies = [ "bytes", "camino", "chrono", - "clap 4.5.0", + "clap 4.5.1", "dropshot", "expectorate", "futures", @@ -5917,7 +5989,7 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", - "clap 4.5.0", + "clap 4.5.1", "dropshot", "nexus-client", "omicron-common", @@ -5939,7 +6011,7 @@ dependencies = [ "anyhow", "camino", "chrono", - "clap 4.5.0", + "clap 4.5.1", "omicron-workspace-hack", "uuid", ] @@ -6466,6 +6538,12 @@ dependencies = [ "uuid", ] +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -6693,10 +6771,10 @@ dependencies = [ "anyhow", "atty", "base64", - "clap 4.5.0", + "clap 4.5.1", "dropshot", "futures", - "hyper 0.14.27", + "hyper 0.14.28", "progenitor", "propolis_types", "rand 0.8.5", @@ -6732,7 +6810,7 @@ checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.0", + "bitflags 2.4.2", "lazy_static", "num-traits", "rand 0.8.5", @@ -6744,6 +6822,22 @@ dependencies = [ "unarray", ] +[[package]] +name = "psl-types" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33cb294fe86a74cbcf50d4445b37da762029549ebeea341421c7c70370f86cac" + +[[package]] +name = "publicsuffix" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96a8c1bda5ae1af7f99a2962e49df150414a43d62404644d98dd5c3a93d07457" +dependencies = [ + "idna 0.3.0", + "psl-types", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -6883,7 +6977,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.12", ] [[package]] @@ -6910,7 +7004,7 @@ version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bcb12f8fbf6c62614b0d56eb352af54f6a22410c3b079eb53ee93c7b97dd31d8" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.2", "cassowary", "compact_str", "crossterm", @@ -6972,7 +7066,7 @@ dependencies = [ "anyhow", "camino", "camino-tempfile", - "clap 4.5.0", + "clap 4.5.1", "dropshot", "expectorate", "humantime", @@ -7027,7 +7121,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.12", "redox_syscall 0.2.16", "thiserror", ] @@ -7080,7 +7174,7 @@ checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.4", + "regex-automata 0.4.5", "regex-syntax 0.8.2", ] @@ -7092,15 +7186,9 @@ checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" [[package]] name = "regex-automata" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" - -[[package]] -name = "regex-automata" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b7fa1134405e2ec9353fd416b17f8dacd46c473d7d3fd1cf202706a14eb792a" +checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" dependencies = [ "aho-corasick", "memchr", @@ -7152,13 +7240,15 @@ checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251" dependencies = [ "base64", "bytes", + "cookie 0.17.0", + "cookie_store", "encoding_rs", "futures-core", "futures-util", "h2", "http 0.2.12", "http-body 0.4.5", - "hyper 0.14.27", + "hyper 0.14.28", "hyper-rustls 0.24.2", "hyper-tls", "ipnet", @@ -7233,7 +7323,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.10", + "getrandom 0.2.12", "libc", "spin 0.9.8", "untrusted 0.9.0", @@ -7247,7 +7337,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" dependencies = [ "base64", - "bitflags 2.4.0", + "bitflags 2.4.2", "serde", "serde_derive", ] @@ -7335,7 +7425,7 @@ dependencies = [ "aes", "aes-gcm", "async-trait", - "bitflags 2.4.0", + "bitflags 2.4.2", "byteorder", "chacha20", "ctr", @@ -7471,7 +7561,7 @@ version = "0.38.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.2", "errno", "libc", "linux-raw-sys", @@ -7605,7 +7695,7 @@ version = "13.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02a2d683a4ac90aeef5b1013933f6d977bd37d51ff3f4dad829d4931a7e6be86" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.2", "cfg-if", "clipboard-win", "fd-lock 4.0.2", @@ -8134,9 +8224,9 @@ dependencies = [ [[package]] name = "similar" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aeaf503862c419d66959f5d7ca015337d864e9c49485d771b732e2a20453597" +checksum = "32fea41aca09ee824cc9724996433064c89f7777e60762749a4170a14abbfa21" dependencies = [ "bstr 0.2.17", "unicode-segmentation", @@ -8488,7 +8578,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "clap 4.5.0", + "clap 4.5.1", "dropshot", "futures", "gateway-messages", @@ -9145,14 +9235,16 @@ dependencies = [ [[package]] name = "time" -version = "0.3.27" +version = "0.3.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb39ee79a6d8de55f48f2293a830e040392f1c5f16e336bdd1788cd0aadce07" +checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" dependencies = [ "deranged", "itoa", "libc", + "num-conv", "num_threads", + "powerfmt", "serde", "time-core", "time-macros", @@ -9160,16 +9252,17 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.13" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "733d258752e9303d392b94b75230d07b0b9c489350c69b851fc6c065fde3e8f9" +checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" dependencies = [ + "num-conv", "time-core", ] @@ -9525,11 +9618,10 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.37" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "cfg-if", "log", "pin-project-lite", "tracing-attributes", @@ -9538,9 +9630,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", @@ -9549,9 +9641,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", ] @@ -9672,7 +9764,7 @@ dependencies = [ "assert_cmd", "camino", "chrono", - "clap 4.5.0", + "clap 4.5.1", "console", "datatest-stable", "fs-err", @@ -9832,9 +9924,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" @@ -9918,7 +10010,7 @@ dependencies = [ "camino", "camino-tempfile", "chrono", - "clap 4.5.0", + "clap 4.5.1", "debug-ignore", "display-error-chain", "dropshot", @@ -9949,7 +10041,7 @@ dependencies = [ "camino", "camino-tempfile", "cancel-safe-futures", - "clap 4.5.0", + "clap 4.5.1", "debug-ignore", "derive-where", "either", @@ -10129,7 +10221,7 @@ version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.12", "serde", ] @@ -10370,7 +10462,7 @@ dependencies = [ "buf-list", "camino", "ciborium", - "clap 4.5.0", + "clap 4.5.1", "crossterm", "futures", "humantime", @@ -10431,7 +10523,7 @@ dependencies = [ "bytes", "camino", "ciborium", - "clap 4.5.0", + "clap 4.5.1", "crossterm", "omicron-workspace-hack", "reedline", @@ -10456,7 +10548,7 @@ dependencies = [ "bytes", "camino", "camino-tempfile", - "clap 4.5.0", + "clap 4.5.1", "ddm-admin-client", "debug-ignore", "display-error-chain", @@ -10474,7 +10566,7 @@ dependencies = [ "hex", "http 0.2.12", "hubtools", - "hyper 0.14.27", + "hyper 0.14.28", "illumos-utils", "installinator", "installinator-artifact-client", @@ -10782,7 +10874,7 @@ dependencies = [ "camino", "cargo_metadata", "cargo_toml", - "clap 4.5.0", + "clap 4.5.1", ] [[package]] @@ -10901,8 +10993,7 @@ dependencies = [ [[package]] name = "zone" version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62a428a79ea2224ce8ab05d6d8a21bdd7b4b68a8dbc1230511677a56e72ef22" +source = "git+https://github.com/oxidecomputer/zone?branch=state-derive-eq-hash#f1920d5636c69ea8179f8ec659702dcdef43268c" dependencies = [ "itertools 0.10.5", "thiserror", @@ -10915,7 +11006,7 @@ name = "zone-network-setup" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.0", + "clap 4.5.1", "dropshot", "illumos-utils", "omicron-common", @@ -10928,8 +11019,7 @@ dependencies = [ [[package]] name = "zone_cfg_derive" version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5c4f01d3785e222d5aca11c9813e9c46b69abfe258756c99c9b628683626cc8" +source = "git+https://github.com/oxidecomputer/zone?branch=state-derive-eq-hash#f1920d5636c69ea8179f8ec659702dcdef43268c" dependencies = [ "heck 0.4.1", "proc-macro-error", diff --git a/Cargo.toml b/Cargo.toml index 299d715d67..96f47be86f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -158,6 +158,7 @@ resolver = "2" [workspace.dependencies] anyhow = "1.0" +anstyle = "1.0" api_identity = { path = "api_identity" } approx = "0.5.1" assert_matches = "1.5.0" @@ -184,6 +185,7 @@ ciborium = "0.2.2" cfg-if = "1.0" chrono = { version = "0.4", features = [ "serde" ] } clap = { version = "4.5", features = ["cargo", "derive", "env", "wrap_help"] } +colored = "2.1" cookie = "0.18" criterion = { version = "0.5.1", features = [ "async_tokio" ] } crossbeam = "0.8" @@ -246,8 +248,11 @@ installinator-common = { path = "installinator-common" } internal-dns = { path = "internal-dns" } ipcc = { path = "ipcc" } ipnet = "2.9" -ipnetwork = { version = "0.20", features = ["schemars"] } itertools = "0.12.1" +internet-checksum = "0.2" +ipcc-key-value = { path = "ipcc-key-value" } +ipnetwork = { version = "0.20", features = ["schemars"] } +ispf = { git = "https://github.com/oxidecomputer/ispf" } key-manager = { path = "key-manager" } kstat-rs = "0.2.3" libc = "0.2.153" @@ -373,6 +378,8 @@ slog-envlogger = "2.2" slog-error-chain = { git = "https://github.com/oxidecomputer/slog-error-chain", branch = "main", features = ["derive"] } slog-term = "2.9" smf = "0.2" +snafu = "0.7" +socket2 = { version = "0.5", features = ["all"] } sp-sim = { path = "sp-sim" } sprockets-common = { git = "http://github.com/oxidecomputer/sprockets", rev = "77df31efa5619d0767ffc837ef7468101608aee9" } sprockets-host = { git = "http://github.com/oxidecomputer/sprockets", rev = "77df31efa5619d0767ffc837ef7468101608aee9" } @@ -654,3 +661,7 @@ branch = "oxide/omicron" # See also: uuid-kinds/README.adoc. [patch."https://github.com/oxidecomputer/omicron"] omicron-uuid-kinds = { path = "uuid-kinds" } + +[patch.crates-io.zone] +git = 'https://github.com/oxidecomputer/zone' +branch = 'state-derive-eq-hash' diff --git a/clients/nexus-client/src/lib.rs b/clients/nexus-client/src/lib.rs index 55bdf3d0aa..85c67ddbfd 100644 --- a/clients/nexus-client/src/lib.rs +++ b/clients/nexus-client/src/lib.rs @@ -33,6 +33,8 @@ progenitor::generate_api!( MacAddr = omicron_common::api::external::MacAddr, Name = omicron_common::api::external::Name, NewPasswordHash = omicron_passwords::NewPasswordHash, + NetworkInterface = omicron_common::api::internal::shared::NetworkInterface, + NetworkInterfaceKind = omicron_common::api::internal::shared::NetworkInterfaceKind, }, patch = { SledAgentInfo = { derives = [PartialEq, Eq] }, diff --git a/clients/sled-agent-client/src/lib.rs b/clients/sled-agent-client/src/lib.rs index eb1e57b11f..b2bc232ef5 100644 --- a/clients/sled-agent-client/src/lib.rs +++ b/clients/sled-agent-client/src/lib.rs @@ -6,6 +6,7 @@ use anyhow::Context; use async_trait::async_trait; +use omicron_common::api::internal::shared::NetworkInterface; use std::convert::TryFrom; use std::net::IpAddr; use std::net::SocketAddr; @@ -39,6 +40,7 @@ progenitor::generate_api!( PortSpeed = omicron_common::api::internal::shared::PortSpeed, SourceNatConfig = omicron_common::api::internal::shared::SourceNatConfig, Vni = omicron_common::api::external::Vni, + NetworkInterface = omicron_common::api::internal::shared::NetworkInterface, } ); @@ -141,7 +143,7 @@ impl types::OmicronZoneType { } /// The service vNIC providing external connectivity to this zone - pub fn service_vnic(&self) -> Option<&types::NetworkInterface> { + pub fn service_vnic(&self) -> Option<&NetworkInterface> { match self { types::OmicronZoneType::Nexus { nic, .. } | types::OmicronZoneType::ExternalDns { nic, .. } @@ -556,26 +558,7 @@ impl From match s { Instance { id } => Self::Instance(id), Service { id } => Self::Service(id), - } - } -} - -impl From - for types::NetworkInterface -{ - fn from( - s: omicron_common::api::internal::shared::NetworkInterface, - ) -> Self { - Self { - id: s.id, - kind: s.kind.into(), - name: s.name, - ip: s.ip, - mac: s.mac, - subnet: s.subnet.into(), - vni: s.vni, - primary: s.primary, - slot: s.slot, + Probe { id } => Self::Probe(id), } } } diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 17b4826f8c..58bc82c825 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -879,6 +879,8 @@ pub enum ResourceType { Vmm, Ipv4NatEntry, FloatingIp, + Probe, + ProbeNetworkInterface, } // IDENTITY METADATA @@ -2827,6 +2829,15 @@ pub struct TufRepoGetResponse { pub description: TufRepoDescription, } +#[derive( + Clone, Debug, Deserialize, JsonSchema, Serialize, PartialEq, ObjectIdentity, +)] +pub struct Probe { + #[serde(flatten)] + pub identity: IdentityMetadata, + pub sled: Uuid, +} + #[cfg(test)] mod test { use serde::Deserialize; diff --git a/common/src/api/internal/shared.rs b/common/src/api/internal/shared.rs index c8d8b1c786..bf825fd2e7 100644 --- a/common/src/api/internal/shared.rs +++ b/common/src/api/internal/shared.rs @@ -36,6 +36,8 @@ pub enum NetworkInterfaceKind { Instance { id: Uuid }, /// A vNIC associated with an internal service Service { id: Uuid }, + /// A vNIC associated with a probe + Probe { id: Uuid }, } /// Information required to construct a virtual network interface diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 339c257d8e..98e7748054 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -54,6 +54,7 @@ use nexus_db_model::IpAttachState; use nexus_db_model::IpKind; use nexus_db_model::NetworkInterface; use nexus_db_model::NetworkInterfaceKind; +use nexus_db_model::Probe; use nexus_db_model::Project; use nexus_db_model::Region; use nexus_db_model::RegionSnapshot; @@ -711,7 +712,25 @@ async fn lookup_service_kind( Ok(Some(service_kind)) } -/// Helper function to look up a project with the given ID. +/// Helper function to looks up a probe with the given ID. +async fn lookup_probe( + datastore: &DataStore, + probe_id: Uuid, +) -> anyhow::Result> { + use db::schema::probe::dsl; + + let conn = datastore.pool_connection_for_tests().await?; + dsl::probe + .filter(dsl::id.eq(probe_id)) + .limit(1) + .select(Probe::as_select()) + .get_result_async(&*conn) + .await + .optional() + .with_context(|| format!("loading probe {probe_id}")) +} + +/// Helper function to looks up a project with the given ID. async fn lookup_project( datastore: &DataStore, project_id: Uuid, @@ -2122,6 +2141,28 @@ async fn cmd_db_network_list_vnics( } } } + NetworkInterfaceKind::Probe => { + match lookup_probe(datastore, nic.parent_id).await? { + Some(probe) => { + match lookup_project(datastore, probe.project_id) + .await? + { + Some(project) => ( + "probe", + format!("{}/{}", project.name(), probe.name()), + ), + None => { + eprintln!( + "project with id {} not found", + probe.project_id + ); + continue; + } + } + } + None => ("probe?", "parent probe not found".to_string()), + } + } NetworkInterfaceKind::Service => { // We create service NICs named after the service, so we can use // the nic name instead of looking up the service. diff --git a/end-to-end-tests/Cargo.toml b/end-to-end-tests/Cargo.toml index fee32a344e..0fb9efd5cc 100644 --- a/end-to-end-tests/Cargo.toml +++ b/end-to-end-tests/Cargo.toml @@ -15,7 +15,7 @@ omicron-sled-agent.workspace = true omicron-test-utils.workspace = true oxide-client.workspace = true rand.workspace = true -reqwest.workspace = true +reqwest = { workspace = true, features = ["cookies"] } russh = "0.42.0" russh-keys = "0.42.0" serde.workspace = true @@ -25,3 +25,12 @@ toml.workspace = true trust-dns-resolver.workspace = true uuid.workspace = true omicron-workspace-hack.workspace = true +ispf.workspace = true +internet-checksum.workspace = true +humantime.workspace = true +socket2.workspace = true +colored.workspace = true +anstyle.workspace = true +clap.workspace = true +dhcproto = "0.11" +macaddr.workspace = true diff --git a/end-to-end-tests/src/bin/commtest.rs b/end-to-end-tests/src/bin/commtest.rs new file mode 100644 index 0000000000..27ca4633ce --- /dev/null +++ b/end-to-end-tests/src/bin/commtest.rs @@ -0,0 +1,443 @@ +use anyhow::{anyhow, Result}; +use clap::{Parser, Subcommand}; +use end_to_end_tests::helpers::cli::oxide_cli_style; +use end_to_end_tests::helpers::icmp::ping4_test_run; +use oxide_client::{ + types::{ + IpPoolCreate, IpPoolLinkSilo, IpRange, Ipv4Range, Name, NameOrId, + PingStatus, ProbeCreate, ProbeInfo, ProjectCreate, + UsernamePasswordCredentials, + }, + ClientHiddenExt, ClientLoginExt, ClientProjectsExt, + ClientSystemHardwareExt, ClientSystemNetworkingExt, ClientSystemStatusExt, + ClientVpcsExt, +}; +use std::{ + net::{IpAddr, Ipv4Addr}, + time::{Duration, Instant}, +}; +use tokio::time::sleep; +use uuid::Uuid; + +#[derive(Parser, Debug)] +#[clap(version, about, long_about = None, styles = oxide_cli_style())] +struct Cli { + /// Oxide API address i.e., http://198.51.100.20 + oxide_api: String, + + /// How long to wait for the API to become available + #[arg(long, default_value = "60m")] + api_timeout: humantime::Duration, + + #[clap(subcommand)] + command: Commands, +} + +#[derive(Debug, Subcommand)] +enum Commands { + Run(RunArgs), + Cleanup, +} + +#[derive(Parser, Debug)] +struct RunArgs { + /// Test Duration + #[arg(long, default_value = "100s")] + test_duration: humantime::Duration, + + /// Test packet rate in packets per second + #[arg(long, default_value_t = 10)] + packet_rate: usize, + + /// How many lost ICMP packets may be tolerated + #[arg(long, default_value_t = 0)] + icmp_loss_tolerance: usize, + + /// First address in the IP pool to use for testing + #[arg(long)] + ip_pool_begin: Ipv4Addr, + + /// Last address in the IP pool to use for testing + #[arg(long)] + ip_pool_end: Ipv4Addr, +} + +const API_RETRY_ATTEMPTS: usize = 15; + +#[tokio::main] +pub async fn main() -> Result<()> { + let cli = Cli::parse(); + match cli.command { + Commands::Run(ref args) => run(&cli, args).await, + Commands::Cleanup => cleanup(&cli).await, + } +} + +async fn run(cli: &Cli, args: &RunArgs) -> Result<()> { + wait_until_oxide_api_is_available(cli).await?; + let (sleds, oxide) = rack_prepare(cli, args).await?; + let addrs = launch_probes(sleds, &oxide).await?; + test_connectivity(args, addrs)?; + Ok(()) +} + +async fn cleanup(cli: &Cli) -> Result<()> { + wait_until_oxide_api_is_available(cli).await?; + let oxide = cleanup_probes(cli).await?; + rack_cleanup(&oxide).await?; + Ok(()) +} + +async fn wait_until_oxide_api_is_available(cli: &Cli) -> Result<()> { + let oxide = oxide_client::Client::new(&cli.oxide_api); + let start = Instant::now(); + loop { + if let Ok(result) = oxide.ping().send().await.map(|x| x.into_inner()) { + if result.status == PingStatus::Ok { + println!("the api is up"); + break; + } + } + if Instant::now().duration_since(start) + > Into::::into(cli.api_timeout) + { + return Err(anyhow!( + "One hour deadline for system startup exceeded" + )); + } + println!("no api response yet, wating 3s ..."); + sleep(Duration::from_secs(3)).await; + } + Ok(()) +} + +macro_rules! api_retry { + ($call:expr) => {{ + let mut limit = API_RETRY_ATTEMPTS; + loop { + match $call { + res @ Ok(_) => break res, + Err(e) => { + limit -= 1; + if limit == 0 { + break Err(e); + } + println!("API call error: {e}, retrying in 3 s"); + sleep(Duration::from_secs(3)).await; + } + } + } + }}; +} + +async fn cleanup_probes(cli: &Cli) -> Result { + let rqb = reqwest::ClientBuilder::new() + .cookie_store(true) + .timeout(Duration::from_secs(15)) + .connect_timeout(Duration::from_secs(15)) + .build() + .unwrap(); + let oxide = oxide_client::Client::new_with_client(&cli.oxide_api, rqb); + + print!("logging in ... "); + api_retry!( + oxide + .login_local() + .silo_name(Name::try_from("recovery").unwrap()) + .body(UsernamePasswordCredentials { + password: "oxide".parse().unwrap(), + username: "recovery".parse().unwrap(), + }) + .send() + .await + )?; + println!("done"); + + let probes: Vec = api_retry!( + oxide + .probe_list() + .project(Name::try_from("classone").unwrap()) + .limit(u32::MAX) + .send() + .await + )? + .into_inner() + .items; + + for probe in &probes { + print!("deleting probe {} ... ", *probe.name); + api_retry!( + oxide + .probe_delete() + .project(Name::try_from("classone").unwrap()) + .probe(probe.id) + .send() + .await + )?; + println!("done"); + } + + Ok(oxide) +} + +async fn rack_cleanup(oxide: &oxide_client::Client) -> Result<()> { + if let Err(e) = oxide + .project_view() + .project(Name::try_from("classone").unwrap()) + .send() + .await + { + if let Some(reqwest::StatusCode::NOT_FOUND) = e.status() { + print!("project does not exist"); + } else { + Err(e)?; + } + } else { + print!("deleting classone subnet ... "); + api_retry!( + oxide + .vpc_subnet_delete() + .project(Name::try_from("classone").unwrap()) + .vpc(Name::try_from("default").unwrap()) + .subnet(Name::try_from("default").unwrap()) + .send() + .await + )?; + println!("done"); + + print!("deleting classone vpc ... "); + api_retry!( + oxide + .vpc_delete() + .project(Name::try_from("classone").unwrap()) + .vpc(Name::try_from("default").unwrap()) + .send() + .await + )?; + println!("done"); + + print!("deleting classone project ... "); + api_retry!( + oxide + .project_delete() + .project(Name::try_from("classone").unwrap()) + .send() + .await + )?; + println!("done"); + } + Ok(()) +} + +async fn rack_prepare( + cli: &Cli, + args: &RunArgs, +) -> Result<(Vec, oxide_client::Client)> { + let rqb = reqwest::ClientBuilder::new().cookie_store(true).build().unwrap(); + + let oxide = oxide_client::Client::new_with_client(&cli.oxide_api, rqb); + + print!("logging in ... "); + api_retry!( + oxide + .login_local() + .silo_name(Name::try_from("recovery").unwrap()) + .body(UsernamePasswordCredentials { + password: "oxide".parse().unwrap(), + username: "recovery".parse().unwrap(), + }) + .send() + .await + )?; + println!("done"); + + api_retry!(if let Err(e) = oxide + .project_view() + .project(Name::try_from("classone").unwrap()) + .send() + .await + { + if let Some(reqwest::StatusCode::NOT_FOUND) = e.status() { + print!("project does not exist, creating ... "); + oxide + .project_create() + .body(ProjectCreate { + description: "A project for probes".into(), + name: "classone".parse().unwrap(), + }) + .send() + .await?; + println!("done"); + Ok(()) + } else { + Err(e) + } + } else { + println!("classone project already exists"); + Ok(()) + })?; + + let pool_name = "default"; + api_retry!( + if let Err(e) = oxide.ip_pool_view().pool("default").send().await { + if let Some(reqwest::StatusCode::NOT_FOUND) = e.status() { + print!("default ip pool does not exist, creating ..."); + oxide + .ip_pool_create() + .body(IpPoolCreate { + name: pool_name.parse().unwrap(), + description: "Default IP pool".to_string(), + }) + .send() + .await?; + oxide + .ip_pool_silo_link() + .pool(pool_name) + .body(IpPoolLinkSilo { + silo: NameOrId::Name("recovery".parse().unwrap()), + is_default: true, + }) + .send() + .await?; + println!("done"); + Ok(()) + } else { + Err(e) + } + } else { + println!("default ip pool already exists"); + Ok(()) + } + )?; + + let pool = api_retry!( + oxide + .ip_pool_range_list() + .limit(u32::MAX) + .pool(Name::try_from("default").unwrap()) + .send() + .await + )? + .into_inner() + .items; + + let range = Ipv4Range { first: args.ip_pool_begin, last: args.ip_pool_end }; + + let range_exists = pool + .iter() + .filter_map(|x| match &x.range { + IpRange::V4(r) => Some(r), + IpRange::V6(_) => None, + }) + .any(|x| x.first == range.first && x.last == range.last); + + if !range_exists { + print!("ip range does not exist, creating ... "); + api_retry!( + oxide + .ip_pool_range_add() + .pool(Name::try_from("default").unwrap()) + .body(IpRange::V4(range.clone())) + .send() + .await + )?; + println!("done"); + } else { + println!("ip range already exists"); + } + + print!("getting sled ids ... "); + let sleds = api_retry!(oxide.sled_list().limit(u32::MAX).send().await)? + .into_inner() + .items + .iter() + .map(|x| x.id) + .collect(); + println!("done"); + + Ok((sleds, oxide)) +} + +async fn launch_probes( + sleds: Vec, + oxide: &oxide_client::Client, +) -> Result> { + for (i, sled) in sleds.into_iter().enumerate() { + println!("checking if probe{i} exists"); + api_retry!(if let Err(e) = oxide + .probe_view() + .project(Name::try_from("classone").unwrap()) + .probe(Name::try_from(format!("probe{i}")).unwrap()) + .send() + .await + { + if let Some(reqwest::StatusCode::NOT_FOUND) = e.status() { + print!("probe{i} does not exist, creating ... "); + oxide + .probe_create() + .project(Name::try_from("classone").unwrap()) + .body(ProbeCreate { + description: format!("probe {i}"), + ip_pool: Some("default".parse().unwrap()), + name: format!("probe{i}").parse().unwrap(), + sled, + }) + .send() + .await?; + println!("done"); + Ok(()) + } else { + Err(e) + } + } else { + println!("probe{i} already exists"); + Ok(()) + })?; + } + + Ok(api_retry!( + oxide + .probe_list() + .project(Name::try_from("classone").unwrap()) + .limit(u32::MAX) + .send() + .await + )? + .into_inner() + .items + .iter() + .map(|x| x.external_ips.get(0).unwrap().ip) + .filter_map(|x| match x { + IpAddr::V4(ip) => Some(ip), + IpAddr::V6(_) => None, + }) + .collect()) +} + +fn test_connectivity(args: &RunArgs, addrs: Vec) -> Result<()> { + let ttl = 255; + println!("testing connectivity to probes"); + let report = ping4_test_run( + &addrs, + ttl, + args.packet_rate, + args.test_duration.into(), + ); + + let out = serde_json::to_string_pretty(&report).unwrap(); + std::fs::write("connectivity-report.json", out.as_str()).unwrap(); + + for state in report.v4.iter() { + if state.lost > args.icmp_loss_tolerance { + panic!( + "{} has loss = {} packets which is greater than tolerance {}", + state.dest, state.lost, args.icmp_loss_tolerance, + ); + } + if state.rx_count == 0 { + panic!("received no responses from {}", state.dest); + } + } + println!("all connectivity tests within loss tolerance"); + Ok(()) +} diff --git a/end-to-end-tests/src/bin/dhcp-server.rs b/end-to-end-tests/src/bin/dhcp-server.rs new file mode 100644 index 0000000000..69681fa413 --- /dev/null +++ b/end-to-end-tests/src/bin/dhcp-server.rs @@ -0,0 +1,125 @@ +//! This is a dirt simple DHCP server for handing out addresses in a given +//! range. Leases do not expire. If the server runs out of addresses, it +//! panics. This is a stopgap program to hand out addresses to VMs in CI. It's +//! in no way meant to be a generic DHCP server solution. + +use anyhow::Result; +use clap::Parser; +use dhcproto::{ + v4::{ + self, Decodable, Decoder, DhcpOptions, Encodable, Message, Opcode, + OptionCode, + }, + Encoder, +}; +use end_to_end_tests::helpers::cli::oxide_cli_style; +use macaddr::MacAddr6; +use std::{ + collections::HashMap, + net::{Ipv4Addr, SocketAddrV4, UdpSocket}, +}; + +#[derive(Parser, Debug)] +#[clap(version, about, long_about = None, styles = oxide_cli_style())] +struct Cli { + /// First address in DHCP range. + begin: Ipv4Addr, + /// Last address in DHCP range. + end: Ipv4Addr, + /// Default router to advertise. + router: Ipv4Addr, + /// Server address to advertise. + server: Ipv4Addr, +} + +pub fn main() -> Result<()> { + let cli = Cli::parse(); + let mut current = cli.begin; + let mut assignments = HashMap::::new(); + + let sock = UdpSocket::bind("0.0.0.0:67")?; + loop { + let mut buf = [0; 8192]; + let (n, src) = sock.recv_from(&mut buf)?; + + let mut msg = match Message::decode(&mut Decoder::new(&buf[..n])) { + Ok(msg) => msg, + Err(e) => { + eprintln!("message decode error {e}"); + continue; + } + }; + + println!("request: {msg:#?}"); + + if msg.opcode() != Opcode::BootRequest { + continue; + } + + let mac: [u8; 6] = msg.chaddr()[0..6].try_into().unwrap(); + let mac = MacAddr6::from(mac); + + let ip = match assignments.get(&mac) { + Some(ip) => *ip, + None => { + assignments.insert(mac, current); + let ip = current; + current = Ipv4Addr::from(u32::from(current) + 1); + if u32::from(current) > u32::from(cli.end) { + panic!("address exhaustion"); + } + ip + } + }; + + let mut opts = DhcpOptions::new(); + match msg.opts().get(OptionCode::MessageType) { + Some(v4::DhcpOption::MessageType(v4::MessageType::Discover)) => { + opts.insert(v4::DhcpOption::MessageType( + v4::MessageType::Offer, + )); + } + Some(v4::DhcpOption::MessageType(v4::MessageType::Request)) => { + opts.insert(v4::DhcpOption::MessageType(v4::MessageType::Ack)); + } + Some(mtype) => eprintln!("unexpected message type {mtype:?}"), + None => { + eprintln!("no message type"); + } + }; + // hardcode to /24 + opts.insert(v4::DhcpOption::SubnetMask(Ipv4Addr::new( + 255, 255, 255, 0, + ))); + // hardcode to something stable + opts.insert(v4::DhcpOption::DomainNameServer(vec![Ipv4Addr::new( + 1, 1, 1, 1, + )])); + opts.insert(v4::DhcpOption::ServerIdentifier(cli.server)); + // just something big enough to last CI runs + opts.insert(v4::DhcpOption::AddressLeaseTime(60 * 60 * 24 * 30)); + opts.insert(v4::DhcpOption::Router(vec![cli.router])); + if let Some(opt) = msg.opts().get(OptionCode::ClientIdentifier) { + opts.insert(opt.clone()); + } + + msg.set_opcode(Opcode::BootReply); + msg.set_siaddr(cli.server); + msg.set_yiaddr(ip); + msg.set_opts(opts); + + let mut buf = Vec::new(); + let mut e = Encoder::new(&mut buf); + if let Err(e) = msg.encode(&mut e) { + eprintln!("encode reply error: {e}"); + continue; + } + + // always blast replys bcast + let dst = + SocketAddrV4::new(Ipv4Addr::new(255, 255, 255, 255), src.port()); + if let Err(e) = sock.send_to(&buf, dst) { + eprintln!("send reply error: {e}"); + } + } +} diff --git a/end-to-end-tests/src/helpers/cli.rs b/end-to-end-tests/src/helpers/cli.rs new file mode 100644 index 0000000000..049ab5a8b7 --- /dev/null +++ b/end-to-end-tests/src/helpers/cli.rs @@ -0,0 +1,22 @@ +/// An Oxide color theme for your clap-based CLIs +pub fn oxide_cli_style() -> clap::builder::Styles { + clap::builder::Styles::styled() + .header(anstyle::Style::new().bold().underline().fg_color(Some( + anstyle::Color::Rgb(anstyle::RgbColor(245, 207, 101)), + ))) + .literal(anstyle::Style::new().bold().fg_color(Some( + anstyle::Color::Rgb(anstyle::RgbColor(72, 213, 151)), + ))) + .invalid(anstyle::Style::new().bold().fg_color(Some( + anstyle::Color::Rgb(anstyle::RgbColor(72, 213, 151)), + ))) + .valid(anstyle::Style::new().bold().fg_color(Some( + anstyle::Color::Rgb(anstyle::RgbColor(72, 213, 151)), + ))) + .usage(anstyle::Style::new().bold().fg_color(Some( + anstyle::Color::Rgb(anstyle::RgbColor(245, 207, 101)), + ))) + .error(anstyle::Style::new().bold().fg_color(Some( + anstyle::Color::Rgb(anstyle::RgbColor(232, 104, 134)), + ))) +} diff --git a/end-to-end-tests/src/helpers/icmp.rs b/end-to-end-tests/src/helpers/icmp.rs new file mode 100644 index 0000000000..42617801d8 --- /dev/null +++ b/end-to-end-tests/src/helpers/icmp.rs @@ -0,0 +1,306 @@ +use colored::*; +use internet_checksum::Checksum; +use serde::{Deserialize, Serialize}; +use socket2::{Domain, Protocol, SockAddr, Socket, Type}; +use std::collections::BTreeMap; +use std::mem::MaybeUninit; +use std::net::{Ipv4Addr, SocketAddrV4}; +use std::sync::{Arc, Mutex}; +use std::thread::{sleep, spawn}; +use std::time::{Duration, Instant}; + +const HIDE_CURSOR: &str = "\x1b[?25l"; +const SHOW_CURSOR: &str = "\x1b[?25h"; +const MOVE_CURSOR_UP: &str = "\x1b[A"; + +const ICMP_ECHO_TYPE: u8 = 8; +const ICMP_ECHO_CODE: u8 = 0; + +#[derive(Debug, Serialize, Deserialize)] +struct EchoRequest { + typ: u8, + code: u8, + checksum: u16, + identifier: u16, + sequence_number: u16, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct Report { + pub v4: Vec, +} + +/// Run a ping test against the provided destination addreses, with the +/// specified time-to-live (ttl) at a given rate in packets per second +/// (pps) for the specified duration. +pub fn ping4_test_run( + dst: &[Ipv4Addr], + ttl: u32, + pps: usize, + duration: Duration, +) -> Report { + let p = Pinger4::new(ttl); + for dst in dst { + // use a random number for the ICMP id + p.add_target(rand::random(), *dst, pps, duration); + } + // Use an ASCII code to hide the blinking cursor as it makes the output hard + // to read. + print!("{HIDE_CURSOR}"); + p.clone().show(); + // wait for the test to conclude plus a bit of buffer time for packets in + // flight. + sleep(duration + Duration::from_millis(250)); + for _ in 0..p.targets.lock().unwrap().len() { + println!(); + } + // turn the blinky cursor back on + print!("{SHOW_CURSOR}"); + + // return a report to the caller + let v4 = p.targets.lock().unwrap().values().copied().collect(); + Report { v4 } +} + +struct Pinger4 { + sock: Socket, + targets: Mutex>, +} + +/// Running results for an IPv4 ping test. +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +pub struct Ping4State { + /// Destination address of the ping test. + pub dest: Ipv4Addr, + /// Low water mark for ping round trip times. + pub low: Duration, + /// High water mark for ping round trip times. + pub high: Duration, + /// Summation of ping round trip times. + pub sum: Duration, + /// The last recorded ping round trip time. + pub current: Option, + /// The number of ICMP packets considered lost. Does not start ticking + /// until at least one reply has been received. + pub lost: usize, + /// The number of packets sent. + pub tx_count: u16, + /// The number of packets received. + pub rx_count: u16, + /// The last time a packet was sent. + #[serde(skip)] + pub sent: Option, + /// The transmit counter value when we received the first reply. + #[serde(skip)] + pub first: u16, +} + +impl Ping4State { + fn new(addr: Ipv4Addr) -> Self { + Self { + dest: addr, + low: Duration::default(), + high: Duration::default(), + sum: Duration::default(), + current: None, + lost: 0, + tx_count: 0, + rx_count: 0, + sent: None, + first: 0, + } + } +} + +impl Pinger4 { + fn new(ttl: u32) -> Arc { + let sock = Socket::new(Domain::IPV4, Type::RAW, Some(Protocol::ICMPV4)) + .unwrap(); + sock.set_ttl(ttl).unwrap(); + let s = Arc::new(Self { sock, targets: Mutex::new(BTreeMap::new()) }); + s.clone().rx(); + s.clone().count_lost(); + s + } + + fn show(self: Arc) { + println!( + "{:15} {:7} {:7} {:7} {:7} {:7} {:7} {}", + "addr".dimmed(), + "low".dimmed(), + "avg".dimmed(), + "high".dimmed(), + "last".dimmed(), + "sent".dimmed(), + "received".dimmed(), + "lost".dimmed() + ); + // run the reporting on a background thread + spawn(move || loop { + // print a status line for each target + for (_id, t) in self.targets.lock().unwrap().iter() { + println!( + "{:15} {:7} {:7} {:7} {:7} {:7} {:7} {:<7}", + t.dest.to_string().cyan(), + format!("{:.3}", (t.low.as_micros() as f32 / 1000.0)), + if t.rx_count == 0 { + format!("{:.3}", 0.0) + } else { + format!( + "{:.3}", + (t.sum.as_micros() as f32 + / 1000.0 + / t.rx_count as f32) + ) + }, + format!("{:.3}", (t.high.as_micros() as f32 / 1000.0)), + match t.current { + Some(dt) => + format!("{:.3}", (dt.as_micros() as f32 / 1000.0)), + None => format!("{:.3}", 0.0), + }, + t.tx_count.to_string(), + t.rx_count.to_string(), + if t.lost == 0 { + t.lost.to_string().green() + } else { + t.lost.to_string().red() + }, + ); + } + // move the cursor back to the top for another round of reporting + for _ in 0..self.targets.lock().unwrap().len() { + print!("{MOVE_CURSOR_UP}"); + } + print!("\r"); + + sleep(Duration::from_millis(100)); + }); + } + + fn add_target( + self: &Arc, + id: u16, + addr: Ipv4Addr, + pps: usize, + duration: Duration, + ) { + self.targets.lock().unwrap().insert(id, Ping4State::new(addr)); + let interval = Duration::from_secs_f64(1.0 / pps as f64); + self.clone().tx(id, addr, interval, duration); + } + + fn tx( + self: Arc, + id: u16, + dst: Ipv4Addr, + interval: Duration, + duration: Duration, + ) { + let mut seq = 0u16; + let stop = Instant::now() + duration; + // send ICMP test packets on a background thread + spawn(move || loop { + if Instant::now() >= stop { + break; + } + let mut c = Checksum::new(); + c.add_bytes(&[ICMP_ECHO_TYPE, ICMP_ECHO_CODE]); + c.add_bytes(&id.to_be_bytes()); + c.add_bytes(&seq.to_be_bytes()); + let pkt = EchoRequest { + typ: ICMP_ECHO_TYPE, + code: ICMP_ECHO_CODE, + checksum: u16::from_be_bytes(c.checksum()), + identifier: id, + sequence_number: seq, + }; + let msg = ispf::to_bytes_be(&pkt).unwrap(); + + match self.targets.lock().unwrap().get_mut(&id) { + Some(ref mut tgt) => { + tgt.sent = Some(Instant::now()); + tgt.tx_count = seq; + let sa: SockAddr = SocketAddrV4::new(dst, 0).into(); + self.sock.send_to(&msg, &sa).unwrap(); + } + None => continue, + } + + seq += 1; + sleep(interval); + }); + } + + // At the end of the day this is not strictly necessary for the final + // report. But it's really nice for interactive use to have a live + // ticker for lost packet count. + fn count_lost(self: Arc) { + spawn(move || loop { + for (_, tgt) in self.targets.lock().unwrap().iter_mut() { + // Only start considering packets lost after the first packet + // is received. This allows the remote endpoint time to come + // online without considering initial packets lost while it's + // coming up. + if tgt.first != 0 { + tgt.lost = tgt + .tx_count + .saturating_sub(tgt.first) + .saturating_sub(tgt.rx_count) + as usize; + } + } + sleep(Duration::from_millis(10)); + }); + } + + fn rx(self: Arc) { + // Spawn a background thread to receive ICMP replies and do the + // necessary accounting. + spawn(move || loop { + let mut ubuf = [MaybeUninit::new(0); 10240]; + if let Ok((sz, _)) = self.sock.recv_from(&mut ubuf) { + let buf = unsafe { &slice_assume_init_ref(&ubuf[..sz]) }; + let msg: EchoRequest = match ispf::from_bytes_be(&buf[20..sz]) { + Ok(msg) => msg, + Err(_) => { + continue; + } + }; + // correlate the ICMP id with a target + match self.targets.lock().unwrap().get_mut(&msg.identifier) { + Some(ref mut target) => match target.sent { + Some(ref mut sent) => { + let t1 = Instant::now(); + let dt = t1 - *sent; + target.current = Some(dt); + if target.low == Duration::ZERO || dt < target.low { + target.low = dt; + } + if dt > target.high { + target.high = dt; + } + target.sum += dt; + target.current = Some(dt); + target.rx_count += 1; + if target.first == 0 { + target.first = target.tx_count; + } + } + None => { + println!("no sent"); + } + }, + None => { + println!("no target {}", msg.identifier); + } + } + } + }); + } +} + +// TODO: Use `MaybeUninit::slice_assume_init_ref` once it has stabilized +unsafe fn slice_assume_init_ref(slice: &[MaybeUninit]) -> &[T] { + unsafe { &*(slice as *const [MaybeUninit] as *const [T]) } +} diff --git a/end-to-end-tests/src/helpers/mod.rs b/end-to-end-tests/src/helpers/mod.rs index db03973555..b7cd6d5574 100644 --- a/end-to-end-tests/src/helpers/mod.rs +++ b/end-to-end-tests/src/helpers/mod.rs @@ -1,4 +1,6 @@ +pub mod cli; pub mod ctx; +pub mod icmp; use self::ctx::nexus_addr; use anyhow::{bail, Result}; diff --git a/end-to-end-tests/src/instance_launch.rs b/end-to-end-tests/src/instance_launch.rs index f27261e82d..1aae46fe98 100644 --- a/end-to-end-tests/src/instance_launch.rs +++ b/end-to-end-tests/src/instance_launch.rs @@ -92,6 +92,7 @@ async fn instance_launch() -> Result<()> { .first() .context("no external IPs")? .clone(); + let ExternalIp::Ephemeral { ip: ip_addr } = ip_addr else { anyhow::bail!("IP bound to instance was not ephemeral as required.") }; diff --git a/illumos-utils/src/running_zone.rs b/illumos-utils/src/running_zone.rs index 02302347cd..0dd8f85e4e 100644 --- a/illumos-utils/src/running_zone.rs +++ b/illumos-utils/src/running_zone.rs @@ -912,10 +912,16 @@ impl RunningZone { Ok(()) } + /// Return a reference to the links for this zone. pub fn links(&self) -> &Vec { &self.inner.links } + /// Return a mutable reference to the links for this zone. + pub fn links_mut(&mut self) -> &mut Vec { + &mut self.inner.links + } + /// Return the running processes associated with all the SMF services this /// zone is intended to run. pub fn service_processes( diff --git a/installinator/src/dispatch.rs b/installinator/src/dispatch.rs index 9bec14664c..1fcf351a9b 100644 --- a/installinator/src/dispatch.rs +++ b/installinator/src/dispatch.rs @@ -151,13 +151,13 @@ struct InstallOpts { #[clap(long)] install_on_gimlet: bool, - //TODO(ry) this probably needs to get plumbed somewhere instead of relying + //TODO this probably needs to get plumbed somewhere instead of relying //on a default. /// The first gimlet data link to use. #[clap(long, default_value = "cxgbe0")] data_link0: String, - //TODO(ry) this probably needs to get plumbed somewhere instead of relying + //TODO this probably needs to get plumbed somewhere instead of relying //on a default. /// The second gimlet data link to use. #[clap(long, default_value = "cxgbe1")] diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 45d1c34382..0712ba6743 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -35,6 +35,7 @@ hyper.workspace = true illumos-utils.workspace = true internal-dns.workspace = true ipnetwork.workspace = true +itertools.workspace = true macaddr.workspace = true mime_guess.workspace = true # Not under "dev-dependencies"; these also need to be implemented for @@ -104,7 +105,6 @@ diesel.workspace = true dns-server.workspace = true expectorate.workspace = true hyper-rustls.workspace = true -itertools.workspace = true gateway-messages.workspace = true gateway-test-utils.workspace = true hubtools.workspace = true diff --git a/nexus/db-model/src/deployment.rs b/nexus/db-model/src/deployment.rs index a1f285fbef..e9a650812b 100644 --- a/nexus/db-model/src/deployment.rs +++ b/nexus/db-model/src/deployment.rs @@ -15,6 +15,7 @@ use crate::{ipv6, Generation, MacAddr, Name, SqlU16, SqlU32, SqlU8}; use chrono::{DateTime, Utc}; use ipnetwork::IpNetwork; use nexus_types::deployment::BlueprintTarget; +use omicron_common::api::internal::shared::NetworkInterface; use uuid::Uuid; /// See [`nexus_types::deployment::Blueprint`]. @@ -249,7 +250,7 @@ impl BpOmicronZoneNic { pub fn into_network_interface_for_zone( self, zone_id: Uuid, - ) -> Result { + ) -> Result { let zone_nic = OmicronZoneNic::from(self); zone_nic.into_network_interface_for_zone(zone_id) } diff --git a/nexus/db-model/src/external_ip.rs b/nexus/db-model/src/external_ip.rs index 0f484f7610..337e7ef2a7 100644 --- a/nexus/db-model/src/external_ip.rs +++ b/nexus/db-model/src/external_ip.rs @@ -22,6 +22,7 @@ use nexus_types::external_api::views; use omicron_common::address::NUM_SOURCE_NAT_PORTS; use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadata; +use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; use sled_agent_client::types::InstanceExternalIpBody; @@ -34,7 +35,7 @@ impl_enum_type!( #[diesel(postgres_type(name = "ip_kind", schema = "public"))] pub struct IpKindEnum; - #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, PartialEq, Eq, Deserialize, Serialize)] + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[diesel(sql_type = IpKindEnum)] pub enum IpKind; @@ -48,7 +49,7 @@ impl_enum_type!( #[diesel(postgres_type(name = "ip_attach_state"))] pub struct IpAttachStateEnum; - #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, PartialEq, Eq, Deserialize, Serialize)] + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] #[diesel(sql_type = IpAttachStateEnum)] pub enum IpAttachState; @@ -95,10 +96,11 @@ impl std::fmt::Display for IpKind { Selectable, Queryable, Insertable, - Deserialize, - Serialize, PartialEq, Eq, + Serialize, + Deserialize, + JsonSchema, )] #[diesel(table_name = external_ip)] pub struct ExternalIp { @@ -125,6 +127,7 @@ pub struct ExternalIp { // Only Some(_) for instance Floating IPs pub project_id: Option, pub state: IpAttachState, + pub is_probe: bool, } /// A view type constructed from `ExternalIp` used to represent Floating IP @@ -171,6 +174,7 @@ pub struct IncompleteExternalIp { time_created: DateTime, kind: IpKind, is_service: bool, + is_probe: bool, parent_id: Option, pool_id: Uuid, project_id: Option, @@ -195,6 +199,7 @@ impl IncompleteExternalIp { time_created: Utc::now(), kind, is_service: false, + is_probe: false, parent_id: Some(instance_id), pool_id, project_id: None, @@ -214,6 +219,30 @@ impl IncompleteExternalIp { kind, is_service: false, parent_id: None, + is_probe: false, + pool_id, + project_id: None, + explicit_ip: None, + explicit_port_range: None, + state: kind.initial_state(), + } + } + + pub fn for_ephemeral_probe( + id: Uuid, + instance_id: Uuid, + pool_id: Uuid, + ) -> Self { + let kind = IpKind::Ephemeral; + Self { + id, + name: None, + description: None, + time_created: Utc::now(), + kind: IpKind::Ephemeral, + is_service: false, + is_probe: true, + parent_id: Some(instance_id), pool_id, project_id: None, explicit_ip: None, @@ -237,6 +266,7 @@ impl IncompleteExternalIp { time_created: Utc::now(), kind, is_service: false, + is_probe: false, parent_id: None, pool_id, project_id: Some(project_id), @@ -262,6 +292,7 @@ impl IncompleteExternalIp { time_created: Utc::now(), kind, is_service: false, + is_probe: false, parent_id: None, pool_id, project_id: Some(project_id), @@ -286,6 +317,7 @@ impl IncompleteExternalIp { time_created: Utc::now(), kind: IpKind::Floating, is_service: true, + is_probe: false, parent_id: Some(service_id), pool_id, project_id: None, @@ -317,6 +349,7 @@ impl IncompleteExternalIp { time_created: Utc::now(), kind, is_service: true, + is_probe: false, parent_id: Some(service_id), pool_id, project_id: None, @@ -341,6 +374,7 @@ impl IncompleteExternalIp { time_created: Utc::now(), kind, is_service: true, + is_probe: false, parent_id: Some(service_id), pool_id, project_id: None, @@ -359,6 +393,7 @@ impl IncompleteExternalIp { time_created: Utc::now(), kind, is_service: true, + is_probe: false, parent_id: Some(service_id), pool_id, project_id: None, @@ -392,6 +427,10 @@ impl IncompleteExternalIp { &self.is_service } + pub fn is_probe(&self) -> &bool { + &self.is_probe + } + pub fn parent_id(&self) -> &Option { &self.parent_id } diff --git a/nexus/db-model/src/inventory.rs b/nexus/db-model/src/inventory.rs index 0992eb60b5..2f69fd998c 100644 --- a/nexus/db-model/src/inventory.rs +++ b/nexus/db-model/src/inventory.rs @@ -28,6 +28,7 @@ use ipnetwork::IpNetwork; use nexus_types::inventory::{ BaseboardId, Caboose, Collection, PowerState, RotPage, RotSlot, }; +use omicron_common::api::internal::shared::NetworkInterface; use uuid::Uuid; // See [`nexus_types::inventory::PowerState`]. @@ -872,7 +873,7 @@ impl InvOmicronZoneNic { pub fn into_network_interface_for_zone( self, zone_id: Uuid, - ) -> Result { + ) -> Result { let zone_nic = OmicronZoneNic::from(self); zone_nic.into_network_interface_for_zone(zone_id) } diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index 07c9f5eec6..5c89134b78 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -45,6 +45,7 @@ mod network_interface; mod oximeter_info; mod physical_disk; mod physical_disk_kind; +mod probe; mod producer_endpoint; mod project; mod semver_version; @@ -145,6 +146,7 @@ pub use network_interface::*; pub use oximeter_info::*; pub use physical_disk::*; pub use physical_disk_kind::*; +pub use probe::*; pub use producer_endpoint::*; pub use project::*; pub use quota::*; diff --git a/nexus/db-model/src/network_interface.rs b/nexus/db-model/src/network_interface.rs index 72752ae3f8..fdcfcbf588 100644 --- a/nexus/db-model/src/network_interface.rs +++ b/nexus/db-model/src/network_interface.rs @@ -14,7 +14,7 @@ use db_macros::Resource; use diesel::AsChangeset; use nexus_types::external_api::params; use nexus_types::identity::Resource; -use omicron_common::api::external; +use omicron_common::api::{external, internal}; use uuid::Uuid; /// The max number of interfaces that may be associated with a resource, @@ -35,6 +35,7 @@ impl_enum_type! { Instance => b"instance" Service => b"service" + Probe => b"probe" } /// Generic Network Interface DB model. @@ -63,6 +64,41 @@ pub struct NetworkInterface { pub primary: bool, } +impl NetworkInterface { + pub fn into_internal( + self, + subnet: external::IpNet, + ) -> internal::shared::NetworkInterface { + internal::shared::NetworkInterface { + id: self.id(), + kind: match self.kind { + NetworkInterfaceKind::Instance => { + internal::shared::NetworkInterfaceKind::Instance { + id: self.parent_id, + } + } + NetworkInterfaceKind::Service => { + internal::shared::NetworkInterfaceKind::Service { + id: self.parent_id, + } + } + NetworkInterfaceKind::Probe => { + internal::shared::NetworkInterfaceKind::Probe { + id: self.parent_id, + } + } + }, + name: self.name().clone(), + ip: self.ip.ip(), + mac: self.mac.into(), + subnet: subnet, + vni: external::Vni::try_from(0).unwrap(), + primary: self.primary, + slot: self.slot.try_into().unwrap(), + } + } +} + /// Instance Network Interface DB model. /// /// The underlying "table" (`instance_network_interface`) is actually a view @@ -244,6 +280,13 @@ impl IncompleteNetworkInterface { ))); } } + NetworkInterfaceKind::Probe => { + if !mac.is_guest() { + return Err(external::Error::invalid_request(format!( + "invalid MAC address {mac} for probe NIC", + ))); + } + } NetworkInterfaceKind::Service => { if !mac.is_system() { return Err(external::Error::invalid_request(format!( @@ -312,6 +355,26 @@ impl IncompleteNetworkInterface { Some(slot), ) } + + pub fn new_probe( + interface_id: Uuid, + probe_id: Uuid, + subnet: VpcSubnet, + identity: external::IdentityMetadataCreateParams, + ip: Option, + mac: Option, + ) -> Result { + Self::new( + interface_id, + NetworkInterfaceKind::Probe, + probe_id, + subnet, + identity, + ip, + mac, + None, + ) + } } /// Describes a set of updates for the [`NetworkInterface`] model. diff --git a/nexus/db-model/src/omicron_zone_config.rs b/nexus/db-model/src/omicron_zone_config.rs index f4726ccd92..ce3127a9b3 100644 --- a/nexus/db-model/src/omicron_zone_config.rs +++ b/nexus/db-model/src/omicron_zone_config.rs @@ -18,6 +18,9 @@ use crate::{ipv6, MacAddr, Name, SqlU16, SqlU32, SqlU8}; use anyhow::{anyhow, bail, ensure, Context}; use ipnetwork::IpNetwork; use nexus_types::inventory::OmicronZoneType; +use omicron_common::api::internal::shared::{ + NetworkInterface, NetworkInterfaceKind, +}; use uuid::Uuid; #[derive(Debug)] @@ -410,9 +413,7 @@ impl OmicronZoneNic { ensure!( matches!( nic.kind, - nexus_types::inventory::NetworkInterfaceKind::Service( - id - ) if id == zone.id + NetworkInterfaceKind::Service{ id } if id == zone.id ), "expected zone's NIC kind to be \"service\" and the \ id to match the zone's id ({})", @@ -424,7 +425,7 @@ impl OmicronZoneNic { name: Name::from(nic.name.clone()), ip: IpNetwork::from(nic.ip), mac: MacAddr::from(nic.mac), - subnet: IpNetwork::from(nic.subnet.clone()), + subnet: IpNetwork::from(nic.subnet), vni: SqlU32::from(u32::from(nic.vni)), is_primary: nic.primary, slot: SqlU8::from(nic.slot), @@ -437,13 +438,11 @@ impl OmicronZoneNic { pub(crate) fn into_network_interface_for_zone( self, zone_id: Uuid, - ) -> anyhow::Result { - Ok(nexus_types::inventory::NetworkInterface { + ) -> anyhow::Result { + Ok(NetworkInterface { id: self.id, ip: self.ip.ip(), - kind: nexus_types::inventory::NetworkInterfaceKind::Service( - zone_id, - ), + kind: NetworkInterfaceKind::Service { id: zone_id }, mac: *self.mac, name: self.name.into(), primary: self.is_primary, diff --git a/nexus/db-model/src/probe.rs b/nexus/db-model/src/probe.rs new file mode 100644 index 0000000000..be3576dfa0 --- /dev/null +++ b/nexus/db-model/src/probe.rs @@ -0,0 +1,50 @@ +use crate::schema::probe; +use db_macros::Resource; +use nexus_types::external_api::params; +use nexus_types::identity::Resource; +use omicron_common::api::external; +use omicron_common::api::external::IdentityMetadataCreateParams; +use serde::Deserialize; +use serde::Serialize; +use uuid::Uuid; + +#[derive( + Queryable, + Insertable, + Selectable, + Clone, + Debug, + Resource, + Serialize, + Deserialize, +)] +#[diesel(table_name = probe)] +pub struct Probe { + #[diesel(embed)] + pub identity: ProbeIdentity, + + pub project_id: Uuid, + pub sled: Uuid, +} + +impl Probe { + pub fn from_create(p: ¶ms::ProbeCreate, project_id: Uuid) -> Self { + Self { + identity: ProbeIdentity::new( + Uuid::new_v4(), + IdentityMetadataCreateParams { + name: p.identity.name.clone(), + description: p.identity.description.clone(), + }, + ), + project_id, + sled: p.sled, + } + } +} + +impl Into for Probe { + fn into(self) -> external::Probe { + external::Probe { identity: self.identity().clone(), sled: self.sled } + } +} diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 09bc963936..e2b918d805 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -13,7 +13,7 @@ use omicron_common::api::external::SemverVersion; /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(39, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(40, 0, 0); table! { disk (id) { @@ -591,6 +591,7 @@ table! { project_id -> Nullable, state -> crate::IpAttachStateEnum, + is_probe -> Bool, } } @@ -1520,6 +1521,19 @@ table! { } } +table! { + probe (id) { + id -> Uuid, + name -> Text, + description -> Text, + time_created -> Timestamptz, + time_modified -> Timestamptz, + time_deleted -> Nullable, + project_id -> Uuid, + sled -> Uuid, + } +} + table! { db_metadata (singleton) { singleton -> Bool, diff --git a/nexus/db-model/src/unsigned.rs b/nexus/db-model/src/unsigned.rs index b4e9db2308..920cad1cff 100644 --- a/nexus/db-model/src/unsigned.rs +++ b/nexus/db-model/src/unsigned.rs @@ -7,6 +7,7 @@ use diesel::deserialize::{self, FromSql}; use diesel::pg::Pg; use diesel::serialize::{self, ToSql}; use diesel::sql_types; +use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::convert::TryFrom; @@ -76,6 +77,7 @@ where FromSqlRow, Serialize, Deserialize, + JsonSchema, )] #[diesel(sql_type = sql_types::Int4)] #[repr(transparent)] diff --git a/nexus/db-queries/Cargo.toml b/nexus/db-queries/Cargo.toml index e673003036..a33ca24c84 100644 --- a/nexus/db-queries/Cargo.toml +++ b/nexus/db-queries/Cargo.toml @@ -35,6 +35,7 @@ pq-sys = "*" rand.workspace = true ref-cast.workspace = true samael.workspace = true +schemars.workspace = true serde.workspace = true serde_json.workspace = true serde_urlencoded.workspace = true diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index f561a024e8..017d2f22d2 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -46,6 +46,7 @@ use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; +use omicron_common::api::external::NameOrId; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use ref_cast::RefCast; @@ -71,6 +72,42 @@ impl DataStore { self.allocate_external_ip(opctx, data).await } + /// Create an Ephemeral IP address for a probe. + pub async fn allocate_probe_ephemeral_ip( + &self, + opctx: &OpContext, + ip_id: Uuid, + probe_id: Uuid, + pool_name: Option, + ) -> CreateResult { + let pool = match pool_name { + Some(NameOrId::Name(name)) => { + let (.., pool) = LookupPath::new(opctx, &self) + .ip_pool_name(&name.into()) + .fetch_for(authz::Action::CreateChild) + .await?; + pool + } + Some(NameOrId::Id(id)) => { + let (.., pool) = LookupPath::new(opctx, &self) + .ip_pool_id(id) + .fetch_for(authz::Action::CreateChild) + .await?; + pool + } + // If no name given, use the default pool + None => { + let (.., pool) = self.ip_pools_fetch_default(&opctx).await?; + pool + } + }; + + let pool_id = pool.identity.id; + let data = + IncompleteExternalIp::for_ephemeral_probe(ip_id, probe_id, pool_id); + self.allocate_external_ip(opctx, data).await + } + /// Create an Ephemeral IP address for an instance. /// /// For consistency between instance create and External IP attach/detach @@ -725,6 +762,7 @@ impl DataStore { diesel::update(dsl::external_ip) .filter(dsl::time_deleted.is_null()) .filter(dsl::is_service.eq(false)) + .filter(dsl::is_probe.eq(false)) .filter(dsl::parent_id.eq(instance_id)) .filter(dsl::kind.ne(IpKind::Floating)) .set(( @@ -736,7 +774,31 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } - /// Detach all Floating IP address from their parent instance. + /// Delete all external IP addresses associated with the provided probe + /// ID. + /// + /// This method returns the number of records deleted, rather than the usual + /// `DeleteResult`. That's mostly useful for tests, but could be important + /// if callers have some invariants they'd like to check. + pub async fn deallocate_external_ip_by_probe_id( + &self, + opctx: &OpContext, + probe_id: Uuid, + ) -> Result { + use db::schema::external_ip::dsl; + let now = Utc::now(); + diesel::update(dsl::external_ip) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::is_probe.eq(true)) + .filter(dsl::parent_id.eq(probe_id)) + .filter(dsl::kind.ne(IpKind::Ephemeral)) + .set(dsl::time_deleted.eq(now)) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// Detach an individual Floating IP address from their parent instance. /// /// As in `deallocate_external_ip_by_instance_id`, this method returns the /// number of records altered, rather than an `UpdateResult`. @@ -774,6 +836,7 @@ impl DataStore { use db::schema::external_ip::dsl; dsl::external_ip .filter(dsl::is_service.eq(false)) + .filter(dsl::is_probe.eq(false)) .filter(dsl::parent_id.eq(instance_id)) .filter(dsl::time_deleted.is_null()) .select(ExternalIp::as_select()) @@ -796,6 +859,23 @@ impl DataStore { .find(|v| v.kind == IpKind::Ephemeral)) } + /// Fetch all external IP addresses of any kind for the provided probe + pub async fn probe_lookup_external_ips( + &self, + opctx: &OpContext, + probe_id: Uuid, + ) -> LookupResult> { + use db::schema::external_ip::dsl; + dsl::external_ip + .filter(dsl::is_probe.eq(true)) + .filter(dsl::parent_id.eq(probe_id)) + .filter(dsl::time_deleted.is_null()) + .select(ExternalIp::as_select()) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + /// Fetch all Floating IP addresses for the provided project. pub async fn floating_ips_list( &self, diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index b164186fdf..475fb27df1 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -70,6 +70,7 @@ mod ipv4_nat_entry; mod network_interface; mod oximeter; mod physical_disk; +mod probe; mod project; mod quota; mod rack; @@ -106,6 +107,7 @@ pub use db_metadata::{ pub use dns::DnsVersionUpdateBuilder; pub use instance::InstanceAndActiveVmm; pub use inventory::DataStoreInventoryTest; +pub use probe::ProbeInfo; pub use rack::RackInit; pub use silo::Discoverability; pub use switch_port::SwitchPortSettingsCombinedResult; @@ -1681,6 +1683,7 @@ mod test { first_port: crate::db::model::SqlU16(0), last_port: crate::db::model::SqlU16(10), state: nexus_db_model::IpAttachState::Attached, + is_probe: false, }) .collect::>(); diesel::insert_into(dsl::external_ip) @@ -1743,6 +1746,7 @@ mod test { first_port: crate::db::model::SqlU16(0), last_port: crate::db::model::SqlU16(10), state: nexus_db_model::IpAttachState::Attached, + is_probe: false, }; diesel::insert_into(dsl::external_ip) .values(ip.clone()) @@ -1814,6 +1818,7 @@ mod test { first_port: crate::db::model::SqlU16(0), last_port: crate::db::model::SqlU16(10), state: nexus_db_model::IpAttachState::Attached, + is_probe: false, }; // Combinations of NULL and non-NULL for: diff --git a/nexus/db-queries/src/db/datastore/network_interface.rs b/nexus/db-queries/src/db/datastore/network_interface.rs index f2782e8f67..1bccca4e97 100644 --- a/nexus/db-queries/src/db/datastore/network_interface.rs +++ b/nexus/db-queries/src/db/datastore/network_interface.rs @@ -35,11 +35,11 @@ use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::LookupResult; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use ref_cast::RefCast; -use sled_agent_client::types as sled_client_types; use uuid::Uuid; /// OPTE requires information that's currently split across the network @@ -59,8 +59,10 @@ struct NicInfo { slot: i16, } -impl From for sled_client_types::NetworkInterface { - fn from(nic: NicInfo) -> sled_client_types::NetworkInterface { +impl From for omicron_common::api::internal::shared::NetworkInterface { + fn from( + nic: NicInfo, + ) -> omicron_common::api::internal::shared::NetworkInterface { let ip_subnet = if nic.ip.is_ipv4() { external::IpNet::V4(nic.ipv4_block.0) } else { @@ -68,19 +70,22 @@ impl From for sled_client_types::NetworkInterface { }; let kind = match nic.kind { NetworkInterfaceKind::Instance => { - sled_client_types::NetworkInterfaceKind::Instance(nic.parent_id) + omicron_common::api::internal::shared::NetworkInterfaceKind::Instance{ id: nic.parent_id } } NetworkInterfaceKind::Service => { - sled_client_types::NetworkInterfaceKind::Service(nic.parent_id) + omicron_common::api::internal::shared::NetworkInterfaceKind::Service{ id: nic.parent_id } + } + NetworkInterfaceKind::Probe => { + omicron_common::api::internal::shared::NetworkInterfaceKind::Probe{ id: nic.parent_id } } }; - sled_client_types::NetworkInterface { + omicron_common::api::internal::shared::NetworkInterface { id: nic.id, kind, name: nic.name.into(), ip: nic.ip.ip(), mac: nic.mac.0, - subnet: sled_client_types::IpNet::from(ip_subnet), + subnet: ip_subnet, vni: nic.vni.0, primary: nic.primary, slot: u8::try_from(nic.slot).unwrap(), @@ -108,6 +113,14 @@ impl DataStore { self.instance_create_network_interface_raw(&opctx, interface).await } + pub async fn probe_create_network_interface( + &self, + opctx: &OpContext, + interface: IncompleteNetworkInterface, + ) -> Result { + self.create_network_interface_raw(&opctx, interface).await + } + pub(crate) async fn instance_create_network_interface_raw( &self, opctx: &OpContext, @@ -271,6 +284,33 @@ impl DataStore { Ok(()) } + /// Delete all network interfaces attached to the given probe. + pub async fn probe_delete_all_network_interfaces( + &self, + opctx: &OpContext, + probe_id: Uuid, + ) -> DeleteResult { + use db::schema::network_interface::dsl; + let now = Utc::now(); + diesel::update(dsl::network_interface) + .filter(dsl::parent_id.eq(probe_id)) + .filter(dsl::kind.eq(NetworkInterfaceKind::Probe)) + .filter(dsl::time_deleted.is_null()) + .set(dsl::time_deleted.eq(now)) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::Probe, + LookupType::ById(probe_id), + ), + ) + })?; + Ok(()) + } + /// Delete an `InstanceNetworkInterface` attached to a provided instance. /// /// Note that the primary interface for an instance cannot be deleted if @@ -313,7 +353,8 @@ impl DataStore { &self, opctx: &OpContext, partial_query: BoxedQuery, - ) -> ListResultVec { + ) -> ListResultVec + { use db::schema::network_interface; use db::schema::vpc; use db::schema::vpc_subnet; @@ -349,7 +390,7 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; Ok(rows .into_iter() - .map(sled_client_types::NetworkInterface::from) + .map(omicron_common::api::internal::shared::NetworkInterface::from) .collect()) } @@ -359,7 +400,8 @@ impl DataStore { &self, opctx: &OpContext, authz_instance: &authz::Instance, - ) -> ListResultVec { + ) -> ListResultVec + { opctx.authorize(authz::Action::ListChildren, authz_instance).await?; use db::schema::network_interface; @@ -375,13 +417,31 @@ impl DataStore { .await } + pub async fn derive_probe_network_interface_info( + &self, + opctx: &OpContext, + probe_id: Uuid, + ) -> ListResultVec + { + use db::schema::network_interface; + self.derive_network_interface_info( + opctx, + network_interface::table + .filter(network_interface::parent_id.eq(probe_id)) + .filter(network_interface::kind.eq(NetworkInterfaceKind::Probe)) + .into_boxed(), + ) + .await + } + /// Return information about all VNICs connected to a VPC required /// for the sled agent to instantiate firewall rules via OPTE. pub async fn derive_vpc_network_interface_info( &self, opctx: &OpContext, authz_vpc: &authz::Vpc, - ) -> ListResultVec { + ) -> ListResultVec + { opctx.authorize(authz::Action::ListChildren, authz_vpc).await?; use db::schema::network_interface; @@ -400,7 +460,8 @@ impl DataStore { &self, opctx: &OpContext, authz_subnet: &authz::VpcSubnet, - ) -> ListResultVec { + ) -> ListResultVec + { opctx.authorize(authz::Action::ListChildren, authz_subnet).await?; use db::schema::network_interface; @@ -443,6 +504,25 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } + /// Get network interface associated with a given probe. + pub async fn probe_get_network_interface( + &self, + opctx: &OpContext, + probe_id: Uuid, + ) -> LookupResult { + use db::schema::network_interface::dsl; + + dsl::network_interface + .filter(dsl::time_deleted.is_null()) + .filter(dsl::parent_id.eq(probe_id)) + .select(NetworkInterface::as_select()) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + /// Update a network interface associated with a given instance. pub async fn instance_update_network_interface( &self, diff --git a/nexus/db-queries/src/db/datastore/probe.rs b/nexus/db-queries/src/db/datastore/probe.rs new file mode 100644 index 0000000000..f1e737e353 --- /dev/null +++ b/nexus/db-queries/src/db/datastore/probe.rs @@ -0,0 +1,390 @@ +use std::net::IpAddr; + +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::datastore::DataStoreConnection; +use crate::db::error::public_error_from_diesel; +use crate::db::error::ErrorHandler; +use crate::db::lookup::LookupPath; +use crate::db::model::Name; +use crate::db::pagination::paginated; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::Utc; +use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; +use nexus_db_model::IncompleteNetworkInterface; +use nexus_db_model::Probe; +use nexus_db_model::VpcSubnet; +use nexus_types::external_api::params; +use nexus_types::identity::Resource; +use omicron_common::api::external::http_pagination::PaginatedBy; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::IdentityMetadataCreateParams; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::LookupResult; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::NameOrId; +use omicron_common::api::external::ResourceType; +use omicron_common::api::internal::shared::NetworkInterface; +use ref_cast::RefCast; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +#[derive(Debug, Clone, JsonSchema, Serialize, Deserialize)] +pub struct ProbeInfo { + pub id: Uuid, + pub name: Name, + sled: Uuid, + pub external_ips: Vec, + pub interface: NetworkInterface, +} + +#[derive(Debug, Clone, JsonSchema, Serialize, Deserialize)] +pub struct ProbeExternalIp { + ip: IpAddr, + first_port: u16, + last_port: u16, + kind: IpKind, +} + +impl From for ProbeExternalIp { + fn from(value: nexus_db_model::ExternalIp) -> Self { + Self { + ip: value.ip.ip(), + first_port: value.first_port.0, + last_port: value.last_port.0, + kind: value.kind.into(), + } + } +} + +#[derive(Debug, Clone, JsonSchema, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum IpKind { + Snat, + Floating, + Ephemeral, +} + +impl From for IpKind { + fn from(value: nexus_db_model::IpKind) -> Self { + match value { + nexus_db_model::IpKind::SNat => Self::Snat, + nexus_db_model::IpKind::Ephemeral => Self::Ephemeral, + nexus_db_model::IpKind::Floating => Self::Floating, + } + } +} + +impl super::DataStore { + /// List the probes for the given project. + pub async fn probe_list( + &self, + opctx: &OpContext, + authz_project: &authz::Project, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, authz_project).await?; + + use db::schema::probe::dsl; + use db::schema::vpc_subnet::dsl as vpc_subnet_dsl; + + let pool = self.pool_connection_authorized(opctx).await?; + + let probes = match pagparams { + PaginatedBy::Id(pagparams) => { + paginated(dsl::probe, dsl::id, &pagparams) + } + PaginatedBy::Name(pagparams) => paginated( + dsl::probe, + dsl::name, + &pagparams.map_name(|n| Name::ref_cast(n)), + ), + } + .filter(dsl::project_id.eq(authz_project.id())) + .filter(dsl::time_deleted.is_null()) + .select(Probe::as_select()) + .load_async(&*pool) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + let mut result = Vec::with_capacity(probes.len()); + + for probe in probes.into_iter() { + let external_ips = self + .probe_lookup_external_ips(opctx, probe.id()) + .await? + .into_iter() + .map(Into::into) + .collect(); + + let interface = + self.probe_get_network_interface(opctx, probe.id()).await?; + + let vni = self.resolve_vpc_to_vni(opctx, interface.vpc_id).await?; + + let db_subnet = vpc_subnet_dsl::vpc_subnet + .filter(vpc_subnet_dsl::id.eq(interface.subnet_id)) + .select(VpcSubnet::as_select()) + .first_async(&*pool) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })?; + + let mut interface: NetworkInterface = + interface.into_internal(db_subnet.ipv4_block.0.into()); + + interface.vni = vni.0; + + result.push(ProbeInfo { + id: probe.id(), + name: probe.name().clone().into(), + sled: probe.sled, + interface, + external_ips, + }) + } + + Ok(result) + } + + async fn resolve_probe_info( + &self, + opctx: &OpContext, + probe: &Probe, + pool: &DataStoreConnection<'_>, + ) -> LookupResult { + use db::schema::vpc_subnet::dsl as vpc_subnet_dsl; + + let external_ips = self + .probe_lookup_external_ips(opctx, probe.id()) + .await? + .into_iter() + .map(Into::into) + .collect(); + + let interface = + self.probe_get_network_interface(opctx, probe.id()).await?; + + let db_subnet = vpc_subnet_dsl::vpc_subnet + .filter(vpc_subnet_dsl::id.eq(interface.subnet_id)) + .select(VpcSubnet::as_select()) + .first_async(&**pool) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + let vni = self.resolve_vpc_to_vni(opctx, interface.vpc_id).await?; + + let mut interface: NetworkInterface = + interface.into_internal(db_subnet.ipv4_block.0.into()); + interface.vni = vni.0; + + Ok(ProbeInfo { + id: probe.id(), + name: probe.name().clone().into(), + sled: probe.sled, + interface, + external_ips, + }) + } + + /// List the probes for a given sled. This is used by sled agents for + /// determining what probes they should be running. + pub async fn probe_list_for_sled( + &self, + sled: Uuid, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + use db::schema::probe::dsl; + + let pool = self.pool_connection_authorized(opctx).await?; + + let probes = paginated(dsl::probe, dsl::id, pagparams) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::sled.eq(sled)) + .select(Probe::as_select()) + .load_async(&*pool) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + let mut result = Vec::with_capacity(probes.len()); + + for probe in probes.into_iter() { + result.push(self.resolve_probe_info(opctx, &probe, &pool).await?); + } + + Ok(result) + } + + /// Get information about a particular probe given its name or id. + pub async fn probe_get( + &self, + opctx: &OpContext, + authz_project: &authz::Project, + name_or_id: &NameOrId, + ) -> LookupResult { + use db::schema::probe; + use db::schema::probe::dsl; + let pool = self.pool_connection_authorized(opctx).await?; + + let name_or_id = name_or_id.clone(); + + let probe = match name_or_id { + NameOrId::Name(name) => dsl::probe + .filter(probe::name.eq(name.to_string())) + .filter(probe::time_deleted.is_null()) + .filter(probe::project_id.eq(authz_project.id())) + .select(Probe::as_select()) + .limit(1) + .first_async::(&*pool) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::Probe, + LookupType::ByName(name.to_string()), + ), + ) + }), + NameOrId::Id(id) => dsl::probe + .filter(probe::id.eq(id)) + .filter(probe::project_id.eq(authz_project.id())) + .select(Probe::as_select()) + .limit(1) + .first_async::(&*pool) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::Probe, + LookupType::ById(id), + ), + ) + }), + }?; + + self.resolve_probe_info(opctx, &probe, &pool).await + } + + /// Add a probe to the data store. + pub async fn probe_create( + &self, + opctx: &OpContext, + authz_project: &authz::Project, + new_probe: ¶ms::ProbeCreate, + ) -> CreateResult { + //TODO in transaction + use db::schema::probe::dsl; + let pool = self.pool_connection_authorized(opctx).await?; + + let probe = Probe::from_create(new_probe, authz_project.id()); + + let _eip = self + .allocate_probe_ephemeral_ip( + opctx, + Uuid::new_v4(), + probe.id(), + new_probe.ip_pool.clone().map(Into::into), + ) + .await?; + + let default_name = omicron_common::api::external::Name::try_from( + "default".to_string(), + ) + .unwrap(); + let internal_default_name = db::model::Name::from(default_name.clone()); + + let (.., db_subnet) = LookupPath::new(opctx, self) + .project_id(authz_project.id()) + .vpc_name(&internal_default_name) + .vpc_subnet_name(&internal_default_name) + .fetch() + .await?; + + let incomplete = IncompleteNetworkInterface::new_probe( + Uuid::new_v4(), + probe.id(), + db_subnet, + IdentityMetadataCreateParams { + name: probe.name().clone(), + description: format!( + "default primary interface for {}", + probe.name(), + ), + }, + None, //Request IP address assignment + None, //Request MAC address assignment + )?; + + let _ifx = self + .probe_create_network_interface(opctx, incomplete) + .await + .map_err(|e| { + omicron_common::api::external::Error::InternalError { + internal_message: format!( + "create network interface: {e:?}" + ), + } + })?; + + let result = diesel::insert_into(dsl::probe) + .values(probe.clone()) + .returning(Probe::as_returning()) + .get_result_async(&*pool) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(result) + } + + /// Remove a probe from the data store. + pub async fn probe_delete( + &self, + opctx: &OpContext, + authz_project: &authz::Project, + name_or_id: &NameOrId, + ) -> DeleteResult { + use db::schema::probe; + use db::schema::probe::dsl; + let pool = self.pool_connection_authorized(opctx).await?; + + let name_or_id = name_or_id.clone(); + + //TODO in transaction + let id = match name_or_id { + NameOrId::Name(name) => dsl::probe + .filter(probe::name.eq(name.to_string())) + .filter(probe::time_deleted.is_null()) + .filter(probe::project_id.eq(authz_project.id())) + .select(probe::id) + .limit(1) + .first_async::(&*pool) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })?, + NameOrId::Id(id) => id, + }; + + self.deallocate_external_ip_by_probe_id(opctx, id).await?; + + self.probe_delete_all_network_interfaces(opctx, id).await?; + + diesel::update(dsl::probe) + .filter(dsl::id.eq(id)) + .filter(dsl::project_id.eq(authz_project.id())) + .set(dsl::time_deleted.eq(Utc::now())) + .execute_async(&*pool) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(()) + } +} diff --git a/nexus/db-queries/src/db/datastore/vpc.rs b/nexus/db-queries/src/db/datastore/vpc.rs index cd972c0941..8c5d5f4f46 100644 --- a/nexus/db-queries/src/db/datastore/vpc.rs +++ b/nexus/db-queries/src/db/datastore/vpc.rs @@ -1212,6 +1212,30 @@ impl DataStore { ) }) } + + /// Look up a VNI by VPC. + pub async fn resolve_vpc_to_vni( + &self, + opctx: &OpContext, + vpc_id: Uuid, + ) -> LookupResult { + use db::schema::vpc::dsl; + dsl::vpc + .filter(dsl::id.eq(vpc_id)) + .filter(dsl::time_deleted.is_null()) + .select(dsl::vni) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::Vpc, + LookupType::ByCompositeId("VNI".to_string()), + ), + ) + }) + } } #[cfg(test)] @@ -1228,8 +1252,6 @@ mod tests { use nexus_test_utils::db::test_setup_database; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintTarget; - use nexus_types::deployment::NetworkInterface; - use nexus_types::deployment::NetworkInterfaceKind; use nexus_types::deployment::OmicronZoneConfig; use nexus_types::deployment::OmicronZoneType; use nexus_types::deployment::OmicronZonesConfig; @@ -1241,6 +1263,8 @@ mod tests { use omicron_common::api::external::IpNet; use omicron_common::api::external::MacAddr; use omicron_common::api::external::Vni; + use omicron_common::api::internal::shared::NetworkInterface; + use omicron_common::api::internal::shared::NetworkInterfaceKind; use omicron_test_utils::dev; use slog::info; use std::collections::BTreeMap; @@ -1558,13 +1582,15 @@ mod tests { external_ip: "::1".parse().unwrap(), nic: NetworkInterface { id: nic.identity.id, - kind: NetworkInterfaceKind::Service(service.id()), + kind: NetworkInterfaceKind::Service { + id: service.id(), + }, name: format!("test-nic-{}", nic.identity.id) .parse() .unwrap(), ip: nic.ip.unwrap(), mac: nic.mac.unwrap(), - subnet: IpNet::from(*NEXUS_OPTE_IPV4_SUBNET).into(), + subnet: IpNet::from(*NEXUS_OPTE_IPV4_SUBNET), vni: Vni::SERVICES_VNI, primary: true, slot: nic.slot.unwrap(), diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 739c0b0809..0502450121 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -132,6 +132,7 @@ const MAX_PORT: u16 = u16::MAX; /// CAST(candidate_first_port AS INT4) AS first_port, /// CAST(candidate_last_port AS INT4) AS last_port, /// AS project_id, +/// AS is_probe, /// AS state /// FROM /// SELECT * FROM ( @@ -419,6 +420,12 @@ impl NextExternalIp { )?; out.push_sql(" AS "); out.push_identifier(dsl::state::NAME)?; + out.push_sql(", "); + + // is_probe flag + out.push_bind_param::(self.ip.is_probe())?; + out.push_sql(" AS "); + out.push_identifier(dsl::is_probe::NAME)?; out.push_sql(" FROM ("); self.push_address_sequence_subquery(out.reborrow())?; diff --git a/nexus/db-queries/src/db/queries/network_interface.rs b/nexus/db-queries/src/db/queries/network_interface.rs index f6ce3e31e3..c0fc18aca1 100644 --- a/nexus/db-queries/src/db/queries/network_interface.rs +++ b/nexus/db-queries/src/db/queries/network_interface.rs @@ -159,6 +159,9 @@ impl InsertError { InsertError::InterfaceAlreadyExists(_name, NetworkInterfaceKind::Service) => { unimplemented!("service network interface") } + InsertError::InterfaceAlreadyExists(_name, NetworkInterfaceKind::Probe) => { + unimplemented!("probe network interface") + } InsertError::NoAvailableIpAddresses => { external::Error::invalid_request( "No available IP addresses for interface", @@ -408,6 +411,9 @@ fn decode_database_error( NetworkInterfaceKind::Service => { external::ResourceType::ServiceNetworkInterface } + NetworkInterfaceKind::Probe => { + external::ResourceType::ProbeNetworkInterface + } }; InsertError::External(error::public_error_from_diesel( err, @@ -647,7 +653,7 @@ impl NextMacShifts { impl NextMacAddress { pub fn new(vpc_id: Uuid, kind: NetworkInterfaceKind) -> Self { let (base, max_shift, min_shift) = match kind { - NetworkInterfaceKind::Instance => { + NetworkInterfaceKind::Instance | NetworkInterfaceKind::Probe => { let NextMacShifts { base, min_shift, max_shift } = NextMacShifts::for_guest(); (base.into(), max_shift, min_shift) @@ -2730,6 +2736,7 @@ mod tests { NetworkInterfaceKind::Service => { (inserted.mac.is_system(), "system") } + NetworkInterfaceKind::Probe => (inserted.mac.is_system(), "probe"), }; assert!( mac_in_range, diff --git a/nexus/reconfigurator/execution/src/resource_allocation.rs b/nexus/reconfigurator/execution/src/resource_allocation.rs index 8ca44df39e..83a484baa4 100644 --- a/nexus/reconfigurator/execution/src/resource_allocation.rs +++ b/nexus/reconfigurator/execution/src/resource_allocation.rs @@ -15,12 +15,12 @@ use nexus_db_queries::db::fixed_data::vpc_subnet::DNS_VPC_SUBNET; use nexus_db_queries::db::fixed_data::vpc_subnet::NEXUS_VPC_SUBNET; use nexus_db_queries::db::fixed_data::vpc_subnet::NTP_VPC_SUBNET; use nexus_db_queries::db::DataStore; -use nexus_types::deployment::NetworkInterface; -use nexus_types::deployment::NetworkInterfaceKind; use nexus_types::deployment::OmicronZoneType; use nexus_types::deployment::OmicronZonesConfig; use nexus_types::deployment::SourceNatConfig; use omicron_common::api::external::IdentityMetadataCreateParams; +use omicron_common::api::internal::shared::NetworkInterface; +use omicron_common::api::internal::shared::NetworkInterfaceKind; use slog::info; use slog::warn; use std::collections::BTreeMap; @@ -345,6 +345,7 @@ impl<'a> ResourceAllocator<'a> { bail!("invalid NIC kind (expected service, got instance)") } NetworkInterfaceKind::Service { .. } => (), + NetworkInterfaceKind::Probe { .. } => (), } // Only attempt to allocate `nic` if it isn't already assigned to this @@ -546,7 +547,7 @@ mod tests { external_ips.next().expect("exhausted external_ips"); let nexus_nic = NetworkInterface { id: Uuid::new_v4(), - kind: NetworkInterfaceKind::Service(nexus_id), + kind: NetworkInterfaceKind::Service { id: nexus_id }, name: "test-nexus".parse().expect("bad name"), ip: NEXUS_OPTE_IPV4_SUBNET .iter() @@ -554,7 +555,7 @@ mod tests { .unwrap() .into(), mac: MacAddr::random_system(), - subnet: IpNet::from(*NEXUS_OPTE_IPV4_SUBNET).into(), + subnet: IpNet::from(*NEXUS_OPTE_IPV4_SUBNET), vni: Vni::SERVICES_VNI, primary: true, slot: 0, @@ -566,7 +567,7 @@ mod tests { external_ips.next().expect("exhausted external_ips"); let dns_nic = NetworkInterface { id: Uuid::new_v4(), - kind: NetworkInterfaceKind::Service(dns_id), + kind: NetworkInterfaceKind::Service { id: dns_id }, name: "test-external-dns".parse().expect("bad name"), ip: DNS_OPTE_IPV4_SUBNET .iter() @@ -574,7 +575,7 @@ mod tests { .unwrap() .into(), mac: MacAddr::random_system(), - subnet: IpNet::from(*DNS_OPTE_IPV4_SUBNET).into(), + subnet: IpNet::from(*DNS_OPTE_IPV4_SUBNET), vni: Vni::SERVICES_VNI, primary: true, slot: 0, @@ -589,7 +590,7 @@ mod tests { }; let ntp_nic = NetworkInterface { id: Uuid::new_v4(), - kind: NetworkInterfaceKind::Service(ntp_id), + kind: NetworkInterfaceKind::Service { id: ntp_id }, name: "test-external-ntp".parse().expect("bad name"), ip: NTP_OPTE_IPV4_SUBNET .iter() @@ -597,7 +598,7 @@ mod tests { .unwrap() .into(), mac: MacAddr::random_system(), - subnet: IpNet::from(*NTP_OPTE_IPV4_SUBNET).into(), + subnet: IpNet::from(*NTP_OPTE_IPV4_SUBNET), vni: Vni::SERVICES_VNI, primary: true, slot: 0, @@ -888,9 +889,14 @@ mod tests { "invalid NIC kind (expected service, got instance)" ) } - NetworkInterfaceKind::Service(id) => { + NetworkInterfaceKind::Probe { .. } => { + panic!( + "invalid NIC kind (expected service, got instance)" + ) + } + NetworkInterfaceKind::Service { id } => { let id = *id; - nic.kind = NetworkInterfaceKind::Instance(id); + nic.kind = NetworkInterfaceKind::Instance { id }; } } "invalid NIC kind".to_string() diff --git a/nexus/reconfigurator/planning/src/blueprint_builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder.rs index d541e112d5..26d0d1e23f 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder.rs @@ -13,8 +13,6 @@ use ipnet::IpAdd; use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use nexus_inventory::now_db_precision; use nexus_types::deployment::Blueprint; -use nexus_types::deployment::NetworkInterface; -use nexus_types::deployment::NetworkInterfaceKind; use nexus_types::deployment::OmicronZoneConfig; use nexus_types::deployment::OmicronZoneDataset; use nexus_types::deployment::OmicronZoneType; @@ -35,6 +33,8 @@ use omicron_common::api::external::Generation; use omicron_common::api::external::IpNet; use omicron_common::api::external::MacAddr; use omicron_common::api::external::Vni; +use omicron_common::api::internal::shared::NetworkInterface; +use omicron_common::api::internal::shared::NetworkInterfaceKind; use slog::o; use slog::Logger; use std::collections::BTreeMap; @@ -532,14 +532,14 @@ impl<'a> BlueprintBuilder<'a> { .next() .ok_or(Error::ExhaustedNexusIps)? .into(), - IpNet::from(*NEXUS_OPTE_IPV4_SUBNET).into(), + IpNet::from(*NEXUS_OPTE_IPV4_SUBNET), ), IpAddr::V6(_) => ( self.nexus_v6_ips .next() .ok_or(Error::ExhaustedNexusIps)? .into(), - IpNet::from(*NEXUS_OPTE_IPV6_SUBNET).into(), + IpNet::from(*NEXUS_OPTE_IPV6_SUBNET), ), }; let mac = self @@ -548,7 +548,7 @@ impl<'a> BlueprintBuilder<'a> { .ok_or(Error::NoSystemMacAddressAvailable)?; NetworkInterface { id: Uuid::new_v4(), - kind: NetworkInterfaceKind::Service(nexus_id), + kind: NetworkInterfaceKind::Service { id: nexus_id }, name: format!("nexus-{nexus_id}").parse().unwrap(), ip, mac, diff --git a/nexus/src/app/instance_network.rs b/nexus/src/app/instance_network.rs index c0bc5d237b..eb5f83470f 100644 --- a/nexus/src/app/instance_network.rs +++ b/nexus/src/app/instance_network.rs @@ -22,11 +22,13 @@ use omicron_common::api::external::Error; use omicron_common::api::external::Ipv4Net; use omicron_common::api::external::Ipv6Net; use omicron_common::api::internal::nexus; +use omicron_common::api::internal::shared::NetworkInterface; use omicron_common::api::internal::shared::SwitchLocation; use sled_agent_client::types::DeleteVirtualNetworkInterfaceHost; use sled_agent_client::types::SetVirtualNetworkInterfaceHost; use std::collections::HashSet; use std::str::FromStr; +use std::sync::Arc; use uuid::Uuid; impl super::Nexus { @@ -452,11 +454,105 @@ impl super::Nexus { Ok(nat_entries) } + // The logic of this function should follow very closely what + // `instance_ensure_dpd_config` does. However, there are enough differences + // in the mechanics of how the logic is being carried out to justify having + // this separate function, it seems. + pub(crate) async fn probe_ensure_dpd_config( + &self, + opctx: &OpContext, + probe_id: Uuid, + sled_ip_address: std::net::Ipv6Addr, + ip_index_filter: Option, + dpd_client: &Arc, + ) -> Result<(), Error> { + let log = &self.log; + + // All external IPs map to the primary network interface, so find that + // interface. If there is no such interface, there's no way to route + // traffic destined to those IPs, so there's nothing to configure and + // it's safe to return early. + let network_interface = match self + .db_datastore + .derive_probe_network_interface_info(&opctx, probe_id) + .await? + .into_iter() + .find(|interface| interface.primary) + { + Some(interface) => interface, + None => { + info!(log, "probe has no primary network interface"; + "probe_id" => %probe_id); + return Ok(()); + } + }; + + let mac_address = + macaddr::MacAddr6::from_str(&network_interface.mac.to_string()) + .map_err(|e| { + Error::internal_error(&format!( + "failed to convert mac address: {e}" + )) + })?; + + info!(log, "looking up probe's external IPs"; + "probe_id" => %probe_id); + + let ips = self + .db_datastore + .probe_lookup_external_ips(&opctx, probe_id) + .await?; + + if let Some(wanted_index) = ip_index_filter { + if let None = ips.get(wanted_index) { + return Err(Error::internal_error(&format!( + "failed to find external ip address at index: {}", + wanted_index + ))); + } + } + + let sled_address = + Ipv6Net(Ipv6Network::new(sled_ip_address, 128).unwrap()); + + for target_ip in ips + .iter() + .enumerate() + .filter(|(index, _)| { + if let Some(wanted_index) = ip_index_filter { + *index == wanted_index + } else { + true + } + }) + .map(|(_, ip)| ip) + { + // For each external ip, add a nat entry to the database + self.ensure_nat_entry( + target_ip, + sled_address, + &network_interface, + mac_address, + opctx, + ) + .await?; + } + + // Notify dendrite that there are changes for it to reconcile. + // In the event of a failure to notify dendrite, we'll log an error + // and rely on dendrite's RPW timer to catch it up. + if let Err(e) = dpd_client.ipv4_nat_trigger_update().await { + error!(self.log, "failed to notify dendrite of nat updates"; "error" => ?e); + }; + + Ok(()) + } + async fn ensure_nat_entry( &self, target_ip: &nexus_db_model::ExternalIp, sled_address: Ipv6Net, - network_interface: &sled_agent_client::types::NetworkInterface, + network_interface: &NetworkInterface, mac_address: macaddr::MacAddr6, opctx: &OpContext, ) -> Result { @@ -675,6 +771,87 @@ impl super::Nexus { Ok(()) } + // The logic of this function should follow very closely what + // `instance_delete_dpd_config` does. However, there are enough differences + // in the mechanics of how the logic is being carried out to justify having + // this separate function, it seems. + pub(crate) async fn probe_delete_dpd_config( + &self, + opctx: &OpContext, + probe_id: Uuid, + ) -> Result<(), Error> { + let log = &self.log; + + info!(log, "deleting probe dpd configuration"; + "probe_id" => %probe_id); + + let external_ips = self + .db_datastore + .probe_lookup_external_ips(opctx, probe_id) + .await?; + + let mut errors = vec![]; + for entry in external_ips { + // Soft delete the NAT entry + match self + .db_datastore + .ipv4_nat_delete_by_external_ip(&opctx, &entry) + .await + { + Ok(_) => Ok(()), + Err(err) => match err { + Error::ObjectNotFound { .. } => { + warn!(log, "no matching nat entries to soft delete"); + Ok(()) + } + _ => { + let message = format!( + "failed to delete nat entry due to error: {err:?}" + ); + error!(log, "{}", message); + Err(Error::internal_error(&message)) + } + }, + }?; + } + + let boundary_switches = + self.boundary_switches(&self.opctx_alloc).await?; + + for switch in &boundary_switches { + debug!(&self.log, "notifying dendrite of updates"; + "probe_id" => %probe_id, + "switch" => switch.to_string()); + + let client_result = self.dpd_clients.get(switch).ok_or_else(|| { + Error::internal_error(&format!( + "unable to find dendrite client for {switch}" + )) + }); + + let dpd_client = match client_result { + Ok(client) => client, + Err(new_error) => { + errors.push(new_error); + continue; + } + }; + + // Notify dendrite that there are changes for it to reconcile. + // In the event of a failure to notify dendrite, we'll log an error + // and rely on dendrite's RPW timer to catch it up. + if let Err(e) = dpd_client.ipv4_nat_trigger_update().await { + error!(self.log, "failed to notify dendrite of nat updates"; "error" => ?e); + }; + } + + if let Some(e) = errors.into_iter().nth(0) { + return Err(e); + } + + Ok(()) + } + /// Deletes an instance's OPTE V2P mappings and the boundary switch NAT /// entries for its external IPs. /// diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 781e39ac83..68f57d31f3 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -55,6 +55,7 @@ mod ip_pool; mod metrics; mod network_interface; mod oximeter; +mod probe; mod project; mod quota; mod rack; diff --git a/nexus/src/app/probe.rs b/nexus/src/app/probe.rs new file mode 100644 index 0000000000..0fce9d3431 --- /dev/null +++ b/nexus/src/app/probe.rs @@ -0,0 +1,109 @@ +use nexus_db_model::Probe; +use nexus_db_queries::authz; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::datastore::ProbeInfo; +use nexus_db_queries::db::lookup; +use nexus_types::external_api::params; +use nexus_types::identity::Resource; +use omicron_common::api::external::Error; +use omicron_common::api::external::{ + http_pagination::PaginatedBy, CreateResult, DataPageParams, DeleteResult, + ListResultVec, LookupResult, NameOrId, +}; +use uuid::Uuid; + +impl super::Nexus { + /// List the probes in the given project. + pub(crate) async fn probe_list( + &self, + opctx: &OpContext, + project_lookup: &lookup::Project<'_>, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + let (.., authz_project) = + project_lookup.lookup_for(authz::Action::ListChildren).await?; + self.db_datastore.probe_list(opctx, &authz_project, pagparams).await + } + + /// List the probes for the given sled. This is used by sled agents to + /// determine what probes they should be running. + pub(crate) async fn probe_list_for_sled( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Uuid>, + sled: Uuid, + ) -> ListResultVec { + self.db_datastore.probe_list_for_sled(sled, opctx, pagparams).await + } + + /// Get info about a particular probe. + pub(crate) async fn probe_get( + &self, + opctx: &OpContext, + project_lookup: &lookup::Project<'_>, + name_or_id: &NameOrId, + ) -> LookupResult { + let (.., authz_project) = + project_lookup.lookup_for(authz::Action::CreateChild).await?; + self.db_datastore.probe_get(opctx, &authz_project, &name_or_id).await + } + + /// Create a probe. This adds the probe to the data store and sets up the + /// NAT state on the switch. Actual launching of the probe is done by the + /// target sled agent asynchronously. + pub(crate) async fn probe_create( + &self, + opctx: &OpContext, + project_lookup: &lookup::Project<'_>, + new_probe_params: ¶ms::ProbeCreate, + ) -> CreateResult { + let (.., authz_project) = + project_lookup.lookup_for(authz::Action::CreateChild).await?; + + let probe = self + .db_datastore + .probe_create(opctx, &authz_project, new_probe_params) + .await?; + + let (.., sled) = + self.sled_lookup(opctx, &new_probe_params.sled)?.fetch().await?; + + let boundary_switches = + self.boundary_switches(&self.opctx_alloc).await?; + + for switch in &boundary_switches { + let dpd_client = self.dpd_clients.get(switch).ok_or_else(|| { + Error::internal_error(&format!( + "could not find dpd client for {switch}" + )) + })?; + self.probe_ensure_dpd_config( + opctx, + probe.id(), + sled.ip.into(), + None, + dpd_client, + ) + .await?; + } + + Ok(probe) + } + + /// Delete a probe. This deletes the probe from the data store and tears + /// down the associated NAT state. + pub(crate) async fn probe_delete( + &self, + opctx: &OpContext, + project_lookup: &lookup::Project<'_>, + name_or_id: NameOrId, + ) -> DeleteResult { + let probe = self.probe_get(opctx, project_lookup, &name_or_id).await?; + + self.probe_delete_dpd_config(opctx, probe.id).await?; + + let (.., authz_project) = + project_lookup.lookup_for(authz::Action::CreateChild).await?; + self.db_datastore.probe_delete(opctx, &authz_project, &name_or_id).await + } +} diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index a137f19434..4030fce31d 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -760,8 +760,9 @@ impl super::Nexus { ntp_servers: Vec::new(), //TODO rack_network_config: Some(RackNetworkConfigV1 { rack_subnet: subnet, - //TODO(ry) you are here. We need to remove these too. They are - // inconsistent with a generic set of addresses on ports. + //TODO: We need to remove these. They are inconsistent with + // a generic set of addresses on ports that may not be + // contiguous. infra_ip_first: Ipv4Addr::UNSPECIFIED, infra_ip_last: Ipv4Addr::UNSPECIFIED, ports, diff --git a/nexus/src/app/sagas/project_create.rs b/nexus/src/app/sagas/project_create.rs index 40acc822c0..b31dd821f0 100644 --- a/nexus/src/app/sagas/project_create.rs +++ b/nexus/src/app/sagas/project_create.rs @@ -245,7 +245,6 @@ mod test { .filter(dsl::collection_type.eq(nexus_db_queries::db::model::CollectionTypeProvisioned::Project.to_string())) // ignore built-in services project .filter(dsl::id.ne(*SERVICES_PROJECT_ID)) - .select(VirtualProvisioningCollection::as_select()) .get_results_async::(&conn) .await diff --git a/nexus/src/app/switch_port.rs b/nexus/src/app/switch_port.rs index b9f0f94fa0..fc9ad2866a 100644 --- a/nexus/src/app/switch_port.rs +++ b/nexus/src/app/switch_port.rs @@ -40,9 +40,9 @@ impl super::Nexus { ) -> CreateResult { opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; - //TODO(ry) race conditions on exists check versus update/create. - // Normally I would use a DB lock here, but not sure what - // the Omicron way of doing things here is. + //TODO race conditions on exists check versus update/create. + // Normally I would use a DB lock here, but not sure what + // the Omicron way of doing things here is. match self .db_datastore diff --git a/nexus/src/app/vpc.rs b/nexus/src/app/vpc.rs index 3a6278053a..44b676b853 100644 --- a/nexus/src/app/vpc.rs +++ b/nexus/src/app/vpc.rs @@ -30,10 +30,10 @@ use omicron_common::api::external::NameOrId; use omicron_common::api::external::UpdateResult; use omicron_common::api::external::VpcFirewallRuleUpdateParams; use omicron_common::api::internal::nexus::HostIdentifier; -use sled_agent_client::types::NetworkInterface; use futures::future::join_all; use ipnetwork::IpNetwork; +use omicron_common::api::internal::shared::NetworkInterface; use std::collections::{HashMap, HashSet}; use std::net::IpAddr; use std::sync::Arc; diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 8fe02a0fd8..aa037e072f 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -15,7 +15,6 @@ use super::{ }; use crate::external_api::shared; use crate::ServerContext; -use dropshot::EmptyScanParams; use dropshot::HttpError; use dropshot::HttpResponseAccepted; use dropshot::HttpResponseCreated; @@ -34,18 +33,20 @@ use dropshot::{ channel, endpoint, WebsocketChannelResult, WebsocketConnection, }; use dropshot::{ApiDescription, StreamingBody}; +use dropshot::{ApiEndpoint, EmptyScanParams}; use ipnetwork::IpNetwork; -use nexus_db_queries::authz; use nexus_db_queries::db; use nexus_db_queries::db::identity::Resource; use nexus_db_queries::db::lookup::ImageLookup; use nexus_db_queries::db::lookup::ImageParentLookup; use nexus_db_queries::db::model::Name; +use nexus_db_queries::{authz, db::datastore::ProbeInfo}; use nexus_types::external_api::shared::BfdStatus; use omicron_common::api::external::http_pagination::data_page_params_for; use omicron_common::api::external::http_pagination::marker_for_name; use omicron_common::api::external::http_pagination::marker_for_name_or_id; use omicron_common::api::external::http_pagination::name_or_id_pagination; +use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::http_pagination::PaginatedById; use omicron_common::api::external::http_pagination::PaginatedByName; use omicron_common::api::external::http_pagination::PaginatedByNameOrId; @@ -69,6 +70,7 @@ use omicron_common::api::external::InstanceNetworkInterface; use omicron_common::api::external::InternalContext; use omicron_common::api::external::LoopbackAddress; use omicron_common::api::external::NameOrId; +use omicron_common::api::external::Probe; use omicron_common::api::external::RouterRoute; use omicron_common::api::external::RouterRouteKind; use omicron_common::api::external::SwitchPort; @@ -353,12 +355,40 @@ pub(crate) fn external_api() -> NexusApiDescription { Ok(()) } + fn register_experimental( + api: &mut NexusApiDescription, + endpoint: T, + ) -> Result<(), String> + where + T: Into>>, + { + let mut ep: ApiEndpoint> = endpoint.into(); + // only one tag is allowed + ep.tags = vec![String::from("hidden")]; + ep.path = String::from("/experimental") + &ep.path; + api.register(ep) + } + + fn register_experimental_endpoints( + api: &mut NexusApiDescription, + ) -> Result<(), String> { + register_experimental(api, probe_list)?; + register_experimental(api, probe_view)?; + register_experimental(api, probe_create)?; + register_experimental(api, probe_delete)?; + + Ok(()) + } + let conf = serde_json::from_str(include_str!("./tag-config.json")).unwrap(); let mut api = NexusApiDescription::new().tag_config(conf); if let Err(err) = register_endpoints(&mut api) { panic!("failed to register entrypoints: {}", err); } + if let Err(err) = register_experimental_endpoints(&mut api) { + panic!("failed to register experimental entrypoints: {}", err); + } api } @@ -5999,6 +6029,125 @@ async fn current_user_ssh_key_delete( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } +/// List instrumentation probes +#[endpoint { + method = GET, + path = "/v1/probes", + tags = ["system/probes"], +}] +async fn probe_list( + rqctx: RequestContext>, + query_params: Query>, +) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + + let nexus = &apictx.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let project_lookup = + nexus.project_lookup(&opctx, scan_params.selector.clone())?; + + let probes = + nexus.probe_list(&opctx, &project_lookup, &paginated_by).await?; + + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + probes, + &|_, p: &ProbeInfo| match paginated_by { + PaginatedBy::Id(_) => NameOrId::Id(p.id), + PaginatedBy::Name(_) => NameOrId::Name(p.name.clone().into()), + }, + )?)) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// View instrumentation probe +#[endpoint { + method = GET, + path = "/v1/probes/{probe}", + tags = ["system/probes"], +}] +async fn probe_view( + rqctx: RequestContext>, + path_params: Path, + query_params: Query, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + let project_selector = query_params.into_inner(); + let project_lookup = nexus.project_lookup(&opctx, project_selector)?; + let probe = + nexus.probe_get(&opctx, &project_lookup, &path.probe).await?; + Ok(HttpResponseOk(probe)) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Create instrumentation probe +#[endpoint { + method = POST, + path = "/v1/probes", + tags = ["system/probes"], +}] +async fn probe_create( + rqctx: RequestContext>, + query_params: Query, + new_probe: TypedBody, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + + let nexus = &apictx.nexus; + let new_probe_params = &new_probe.into_inner(); + let project_selector = query_params.into_inner(); + let project_lookup = nexus.project_lookup(&opctx, project_selector)?; + let probe = nexus + .probe_create(&opctx, &project_lookup, &new_probe_params) + .await?; + Ok(HttpResponseCreated(probe.into())) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Delete instrumentation probe +#[endpoint { + method = DELETE, + path = "/v1/probes/{probe}", + tags = ["system/probes"], +}] +async fn probe_delete( + rqctx: RequestContext>, + query_params: Query, + path_params: Path, +) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + let project_selector = query_params.into_inner(); + let project_lookup = nexus.project_lookup(&opctx, project_selector)?; + nexus.probe_delete(&opctx, &project_lookup, path.probe).await?; + Ok(HttpResponseDeleted()) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + #[cfg(test)] mod test { use super::external_api; diff --git a/nexus/src/external_api/tag-config.json b/nexus/src/external_api/tag-config.json index 3bc8006cee..6974906507 100644 --- a/nexus/src/external_api/tag-config.json +++ b/nexus/src/external_api/tag-config.json @@ -86,6 +86,12 @@ "url": "http://docs.oxide.computer/api/vpcs" } }, + "system/probes": { + "description": "Probes for testing network connectivity", + "external_docs": { + "url": "http://docs.oxide.computer/api/probes" + } + }, "system/status": { "description": "Endpoints related to system health", "external_docs": { diff --git a/nexus/src/internal_api/http_entrypoints.rs b/nexus/src/internal_api/http_entrypoints.rs index e298935fee..7f8211dc8e 100644 --- a/nexus/src/internal_api/http_entrypoints.rs +++ b/nexus/src/internal_api/http_entrypoints.rs @@ -25,6 +25,7 @@ use dropshot::ResultsPage; use dropshot::TypedBody; use hyper::Body; use nexus_db_model::Ipv4NatEntryView; +use nexus_db_queries::db::datastore::ProbeInfo; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintMetadata; use nexus_types::deployment::BlueprintTarget; @@ -94,6 +95,8 @@ pub(crate) fn internal_api() -> NexusApiDescription { api.register(sled_list_uninitialized)?; api.register(sled_add)?; + api.register(probes_get)?; + Ok(()) } @@ -635,7 +638,7 @@ struct RpwNatQueryParam { /// change or until the `limit` is reached. If there are no changes, an /// empty vec is returned. #[endpoint { - method = GET, + method = GET, path = "/nat/ipv4/changeset/{from_gen}" }] async fn ipv4_nat_changeset( @@ -864,3 +867,33 @@ async fn sled_add( }; apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await } + +/// Path parameters for probes +#[derive(Deserialize, JsonSchema)] +struct ProbePathParam { + sled: Uuid, +} + +/// Get all the probes associated with a given sled. +#[endpoint { + method = GET, + path = "/probes/{sled}" +}] +async fn probes_get( + rqctx: RequestContext>, + path_params: Path, + query_params: Query, +) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let query = query_params.into_inner(); + let path = path_params.into_inner(); + let nexus = &apictx.nexus; + let opctx = crate::context::op_context_for_internal_api(&rqctx).await; + let pagparams = data_page_params_for(&rqctx, &query)?; + Ok(HttpResponseOk( + nexus.probe_list_for_sled(&opctx, &pagparams, path.sled).await?, + )) + }; + apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await +} diff --git a/nexus/tests/integration_tests/mod.rs b/nexus/tests/integration_tests/mod.rs index 4b68a6c4f2..84b867252f 100644 --- a/nexus/tests/integration_tests/mod.rs +++ b/nexus/tests/integration_tests/mod.rs @@ -23,6 +23,7 @@ mod metrics; mod oximeter; mod pantry; mod password_login; +mod probe; mod projects; mod quotas; mod rack; diff --git a/nexus/tests/integration_tests/oximeter.rs b/nexus/tests/integration_tests/oximeter.rs index 7dc453d713..9663e10fa0 100644 --- a/nexus/tests/integration_tests/oximeter.rs +++ b/nexus/tests/integration_tests/oximeter.rs @@ -152,7 +152,7 @@ async fn test_oximeter_reregistration() { // Timeouts for checks const POLL_INTERVAL: Duration = Duration::from_millis(100); - const POLL_DURATION: Duration = Duration::from_secs(30); + const POLL_DURATION: Duration = Duration::from_secs(60); // We must have at exactly one timeseries, with at least one sample. let timeseries = diff --git a/nexus/tests/integration_tests/probe.rs b/nexus/tests/integration_tests/probe.rs new file mode 100644 index 0000000000..71a695bf8c --- /dev/null +++ b/nexus/tests/integration_tests/probe.rs @@ -0,0 +1,127 @@ +use dropshot::HttpErrorResponseBody; +use http::{Method, StatusCode}; +use nexus_db_queries::db::datastore::ProbeInfo; +use nexus_test_utils::{ + http_testing::{AuthnMode, NexusRequest}, + resource_helpers::{create_default_ip_pool, create_project}, + SLED_AGENT_UUID, +}; +use nexus_test_utils_macros::nexus_test; +use nexus_types::external_api::params::ProbeCreate; +use omicron_common::api::external::{IdentityMetadataCreateParams, Probe}; + +type ControlPlaneTestContext = + nexus_test_utils::ControlPlaneTestContext; + +#[nexus_test] +async fn test_probe_basic_crud(ctx: &ControlPlaneTestContext) { + let client = &ctx.external_client; + + create_default_ip_pool(&client).await; + create_project(&client, "nebula").await; + + let probes = NexusRequest::iter_collection_authn::( + client, + "/experimental/v1/probes?project=nebula", + "", + None, + ) + .await + .expect("Failed to list probes") + .all_items; + + assert_eq!(probes.len(), 0, "Expected zero probes"); + + let params = ProbeCreate { + identity: IdentityMetadataCreateParams { + name: "class1".parse().unwrap(), + description: "subspace relay probe".to_owned(), + }, + ip_pool: None, + sled: SLED_AGENT_UUID.parse().unwrap(), + }; + + let created: Probe = NexusRequest::objects_post( + client, + "/experimental/v1/probes?project=nebula", + ¶ms, + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + let probes = NexusRequest::iter_collection_authn::( + client, + "/experimental/v1/probes?project=nebula", + "", + None, + ) + .await + .expect("Failed to list probes") + .all_items; + + assert_eq!(probes.len(), 1, "Expected one probe"); + assert_eq!(probes[0].id, created.identity.id); + + let error: HttpErrorResponseBody = NexusRequest::expect_failure( + client, + StatusCode::NOT_FOUND, + Method::GET, + "/experimental/v1/probes/class2?project=nebula", + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + assert_eq!(error.message, "not found: probe with name \"class2\""); + + NexusRequest::object_get( + client, + "/experimental/v1/probes/class1?project=nebula", + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to view probe") + .parsed_body::() + .expect("failed to parse probe info"); + + let fetched: ProbeInfo = NexusRequest::object_get( + client, + "/experimental/v1/probes/class1?project=nebula", + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + assert_eq!(fetched.id, created.identity.id); + + NexusRequest::object_delete( + client, + "/experimental/v1/probes/class1?project=nebula", + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap(); + + let probes = NexusRequest::iter_collection_authn::( + client, + "/experimental/v1/probes?project=nebula", + "", + None, + ) + .await + .expect("Failed to list probes after delete") + .all_items; + + assert_eq!(probes.len(), 0, "Expected zero probes"); +} diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index ecffadcb4d..3b954ec6ec 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -26,6 +26,10 @@ device_access_token POST /device/token device_auth_confirm POST /device/confirm device_auth_request POST /device/auth logout POST /v1/logout +probe_create POST /experimental/v1/probes +probe_delete DELETE /experimental/v1/probes/{probe} +probe_list GET /experimental/v1/probes +probe_view GET /experimental/v1/probes/{probe} API operations found with tag "images" OPERATION ID METHOD URL PATH diff --git a/nexus/tests/output/uncovered-authz-endpoints.txt b/nexus/tests/output/uncovered-authz-endpoints.txt index d76d9c5495..d19c7970d0 100644 --- a/nexus/tests/output/uncovered-authz-endpoints.txt +++ b/nexus/tests/output/uncovered-authz-endpoints.txt @@ -1,8 +1,12 @@ API endpoints with no coverage in authz tests: +probe_delete (delete "/experimental/v1/probes/{probe}") +probe_list (get "/experimental/v1/probes") +probe_view (get "/experimental/v1/probes/{probe}") ping (get "/v1/ping") device_auth_request (post "/device/auth") device_auth_confirm (post "/device/confirm") device_access_token (post "/device/token") +probe_create (post "/experimental/v1/probes") login_saml (post "/login/{silo_name}/saml/{provider_name}") login_local (post "/v1/login/{silo_name}/local") logout (post "/v1/logout") diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index ac950d2ca3..7dcb843b7f 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -15,8 +15,6 @@ use crate::external_api::views::SledPolicy; use crate::external_api::views::SledState; use crate::inventory::Collection; -pub use crate::inventory::NetworkInterface; -pub use crate::inventory::NetworkInterfaceKind; pub use crate::inventory::OmicronZoneConfig; pub use crate::inventory::OmicronZoneDataset; pub use crate::inventory::OmicronZoneType; diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 31cb1d3e5c..c6ae47d27c 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -80,6 +80,7 @@ path_param!(ProviderPath, provider, "SAML identity provider"); path_param!(IpPoolPath, pool, "IP pool"); path_param!(SshKeyPath, ssh_key, "SSH key"); path_param!(AddressLotPath, address_lot, "address lot"); +path_param!(ProbePath, probe, "probe"); id_path_param!(GroupPath, group_id, "group"); @@ -2042,3 +2043,21 @@ pub struct UpdatesGetRepositoryParams { /// The version to get. pub system_version: SemverVersion, } + +// Probes + +/// Create time parameters for probes. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct ProbeCreate { + #[serde(flatten)] + pub identity: IdentityMetadataCreateParams, + pub sled: Uuid, + pub ip_pool: Option, +} + +/// List probes with an optional name or id. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +pub struct ProbeListSelector { + /// A name or id to use when selecting a probe. + pub name_or_id: Option, +} diff --git a/nexus/types/src/inventory.rs b/nexus/types/src/inventory.rs index 57fc7bd647..92985081dc 100644 --- a/nexus/types/src/inventory.rs +++ b/nexus/types/src/inventory.rs @@ -17,11 +17,11 @@ pub use gateway_client::types::PowerState; pub use gateway_client::types::RotSlot; pub use gateway_client::types::SpType; use omicron_common::api::external::ByteCount; +pub use omicron_common::api::internal::shared::NetworkInterface; +pub use omicron_common::api::internal::shared::NetworkInterfaceKind; pub use omicron_common::api::internal::shared::SourceNatConfig; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -pub use sled_agent_client::types::NetworkInterface; -pub use sled_agent_client::types::NetworkInterfaceKind; pub use sled_agent_client::types::OmicronZoneConfig; pub use sled_agent_client::types::OmicronZoneDataset; pub use sled_agent_client::types::OmicronZoneType; diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index fb3ff976ae..9e18c7d6bc 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -667,6 +667,75 @@ } } }, + "/probes/{sled}": { + "get": { + "summary": "Get all the probes associated with a given sled.", + "operationId": "probes_get", + "parameters": [ + { + "in": "path", + "name": "sled", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_ProbeInfo", + "type": "array", + "items": { + "$ref": "#/components/schemas/ProbeInfo" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, "/racks/{rack_id}/initialization-complete": { "put": { "summary": "Report that the Rack Setup Service initialization is complete", @@ -4358,14 +4427,31 @@ } ] }, + "IpKind": { + "type": "string", + "enum": [ + "snat", + "floating", + "ephemeral" + ] + }, "IpNet": { - "description": "IpNet\n\n
JSON schema\n\n```json { \"oneOf\": [ { \"title\": \"v4\", \"allOf\": [ { \"$ref\": \"#/components/schemas/Ipv4Net\" } ] }, { \"title\": \"v6\", \"allOf\": [ { \"$ref\": \"#/components/schemas/Ipv6Net\" } ] } ] } ```
", - "anyOf": [ + "oneOf": [ { - "$ref": "#/components/schemas/Ipv4Net" + "title": "v4", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv4Net" + } + ] }, { - "$ref": "#/components/schemas/Ipv6Net" + "title": "v6", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv6Net" + } + ] } ] }, @@ -4457,8 +4543,11 @@ ] }, "Ipv4Net": { - "description": "An IPv4 subnet, including prefix and subnet mask\n\n
JSON schema\n\n```json { \"title\": \"An IPv4 subnet\", \"description\": \"An IPv4 subnet, including prefix and subnet mask\", \"examples\": [ \"192.168.1.0/24\" ], \"type\": \"string\", \"pattern\": \"^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$\" } ```
", - "type": "string" + "example": "192.168.1.0/24", + "title": "An IPv4 subnet", + "description": "An IPv4 subnet, including prefix and subnet mask", + "type": "string", + "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$" }, "Ipv4Network": { "type": "string", @@ -4483,8 +4572,11 @@ ] }, "Ipv6Net": { - "description": "An IPv6 subnet, including prefix and subnet mask\n\n
JSON schema\n\n```json { \"title\": \"An IPv6 subnet\", \"description\": \"An IPv6 subnet, including prefix and subnet mask\", \"examples\": [ \"fd12:3456::/64\" ], \"type\": \"string\", \"pattern\": \"^([fF][dD])[0-9a-fA-F]{2}:(([0-9a-fA-F]{1,4}:){6}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,6}:)([0-9a-fA-F]{1,4})?\\\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$\" } ```
", - "type": "string" + "example": "fd12:3456::/64", + "title": "An IPv6 subnet", + "description": "An IPv6 subnet, including prefix and subnet mask", + "type": "string", + "pattern": "^([fF][dD])[0-9a-fA-F]{2}:(([0-9a-fA-F]{1,4}:){6}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,6}:)([0-9a-fA-F]{1,4})?\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" }, "Ipv6Network": { "type": "string", @@ -4825,7 +4917,7 @@ "maxLength": 63 }, "NetworkInterface": { - "description": "Information required to construct a virtual network interface\n\n
JSON schema\n\n```json { \"description\": \"Information required to construct a virtual network interface\", \"type\": \"object\", \"required\": [ \"id\", \"ip\", \"kind\", \"mac\", \"name\", \"primary\", \"slot\", \"subnet\", \"vni\" ], \"properties\": { \"id\": { \"type\": \"string\", \"format\": \"uuid\" }, \"ip\": { \"type\": \"string\", \"format\": \"ip\" }, \"kind\": { \"$ref\": \"#/components/schemas/NetworkInterfaceKind\" }, \"mac\": { \"$ref\": \"#/components/schemas/MacAddr\" }, \"name\": { \"$ref\": \"#/components/schemas/Name\" }, \"primary\": { \"type\": \"boolean\" }, \"slot\": { \"type\": \"integer\", \"format\": \"uint8\", \"minimum\": 0.0 }, \"subnet\": { \"$ref\": \"#/components/schemas/IpNet\" }, \"vni\": { \"$ref\": \"#/components/schemas/Vni\" } } } ```
", + "description": "Information required to construct a virtual network interface", "type": "object", "properties": { "id": { @@ -4873,7 +4965,7 @@ ] }, "NetworkInterfaceKind": { - "description": "The type of network interface\n\n
JSON schema\n\n```json { \"description\": \"The type of network interface\", \"oneOf\": [ { \"description\": \"A vNIC attached to a guest instance\", \"type\": \"object\", \"required\": [ \"id\", \"type\" ], \"properties\": { \"id\": { \"type\": \"string\", \"format\": \"uuid\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"instance\" ] } } }, { \"description\": \"A vNIC associated with an internal service\", \"type\": \"object\", \"required\": [ \"id\", \"type\" ], \"properties\": { \"id\": { \"type\": \"string\", \"format\": \"uuid\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"service\" ] } } } ] } ```
", + "description": "The type of network interface", "oneOf": [ { "description": "A vNIC attached to a guest instance", @@ -4914,6 +5006,26 @@ "id", "type" ] + }, + { + "description": "A vNIC associated with a probe", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "probe" + ] + } + }, + "required": [ + "id", + "type" + ] } ] }, @@ -5504,6 +5616,66 @@ "speed400_g" ] }, + "ProbeExternalIp": { + "type": "object", + "properties": { + "first_port": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "ip": { + "type": "string", + "format": "ip" + }, + "kind": { + "$ref": "#/components/schemas/IpKind" + }, + "last_port": { + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "first_port", + "ip", + "kind", + "last_port" + ] + }, + "ProbeInfo": { + "type": "object", + "properties": { + "external_ips": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ProbeExternalIp" + } + }, + "id": { + "type": "string", + "format": "uuid" + }, + "interface": { + "$ref": "#/components/schemas/NetworkInterface" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "sled": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "external_ips", + "id", + "interface", + "name", + "sled" + ] + }, "ProducerEndpoint": { "description": "Information announced by a metric server, used so that clients can contact it and collect available metric data from it.", "type": "object", diff --git a/openapi/nexus.json b/openapi/nexus.json index 3d31331a90..cacd875acc 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -101,6 +101,206 @@ } } }, + "/experimental/v1/probes": { + "get": { + "tags": [ + "hidden" + ], + "summary": "List instrumentation probes", + "operationId": "probe_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProbeInfoResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "project" + ] + } + }, + "post": { + "tags": [ + "hidden" + ], + "summary": "Create instrumentation probe", + "operationId": "probe_create", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProbeCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Probe" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/experimental/v1/probes/{probe}": { + "get": { + "tags": [ + "hidden" + ], + "summary": "View instrumentation probe", + "operationId": "probe_view", + "parameters": [ + { + "in": "path", + "name": "probe", + "description": "Name or ID of the probe", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProbeInfo" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "hidden" + ], + "summary": "Delete instrumentation probe", + "operationId": "probe_delete", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "probe", + "description": "Name or ID of the probe", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/login/{silo_name}/saml/{provider_name}": { "post": { "tags": [ @@ -13210,6 +13410,14 @@ } ] }, + "IpKind": { + "type": "string", + "enum": [ + "snat", + "floating", + "ephemeral" + ] + }, "IpNet": { "oneOf": [ { @@ -13914,6 +14122,119 @@ } ] }, + "NetworkInterface": { + "description": "Information required to construct a virtual network interface", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "ip": { + "type": "string", + "format": "ip" + }, + "kind": { + "$ref": "#/components/schemas/NetworkInterfaceKind" + }, + "mac": { + "$ref": "#/components/schemas/MacAddr" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "primary": { + "type": "boolean" + }, + "slot": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "subnet": { + "$ref": "#/components/schemas/IpNet" + }, + "vni": { + "$ref": "#/components/schemas/Vni" + } + }, + "required": [ + "id", + "ip", + "kind", + "mac", + "name", + "primary", + "slot", + "subnet", + "vni" + ] + }, + "NetworkInterfaceKind": { + "description": "The type of network interface", + "oneOf": [ + { + "description": "A vNIC attached to a guest instance", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "instance" + ] + } + }, + "required": [ + "id", + "type" + ] + }, + { + "description": "A vNIC associated with an internal service", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "service" + ] + } + }, + "required": [ + "id", + "type" + ] + }, + { + "description": "A vNIC associated with a probe", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "probe" + ] + } + }, + "required": [ + "id", + "type" + ] + } + ] + }, "Password": { "title": "A password used to authenticate a user", "description": "Passwords may be subject to additional constraints.", @@ -14019,6 +14340,161 @@ "ok" ] }, + "Probe": { + "description": "Identity-related metadata that's included in nearly all public API objects", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "sled": { + "type": "string", + "format": "uuid" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "name", + "sled", + "time_created", + "time_modified" + ] + }, + "ProbeCreate": { + "description": "Create time parameters for probes.", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "ip_pool": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "sled": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "description", + "name", + "sled" + ] + }, + "ProbeExternalIp": { + "type": "object", + "properties": { + "first_port": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "ip": { + "type": "string", + "format": "ip" + }, + "kind": { + "$ref": "#/components/schemas/IpKind" + }, + "last_port": { + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "first_port", + "ip", + "kind", + "last_port" + ] + }, + "ProbeInfo": { + "type": "object", + "properties": { + "external_ips": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ProbeExternalIp" + } + }, + "id": { + "type": "string", + "format": "uuid" + }, + "interface": { + "$ref": "#/components/schemas/NetworkInterface" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "sled": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "external_ips", + "id", + "interface", + "name", + "sled" + ] + }, + "ProbeInfoResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/ProbeInfo" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, "Project": { "description": "View of a Project", "type": "object", @@ -16503,6 +16979,12 @@ "storage" ] }, + "Vni": { + "description": "A Geneve Virtual Network Identifier", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, "Vpc": { "description": "View of a VPC", "type": "object", @@ -17452,6 +17934,13 @@ "url": "http://docs.oxide.computer/api/system-networking" } }, + { + "name": "system/probes", + "description": "Probes for testing network connectivity", + "externalDocs": { + "url": "http://docs.oxide.computer/api/probes" + } + }, { "name": "system/silos", "description": "Silos represent a logical partition of users and resources.", diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index 238b5832ca..43fac710fc 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -5670,6 +5670,26 @@ "id", "type" ] + }, + { + "description": "A vNIC associated with a probe", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "probe" + ] + } + }, + "required": [ + "id", + "type" + ] } ] }, diff --git a/package-manifest.toml b/package-manifest.toml index c6f39d2ecd..b7e96935f1 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -626,6 +626,15 @@ output.type = "zone" output.intermediate_only = true setup_hint = "Run `./tools/ci_download_transceiver_control` to download the necessary binaries" +[package.thundermuffin] +service_name = "thundermuffin" +source.type = "prebuilt" +source.repo = "thundermuffin" +source.commit = "a4a6108d7c9aac2464a0b6898e88132a8f701a13" +source.sha256 = "dc55a2accd33a347df4cbdc0026cbaccea2c004940c3fec8cadcdd633d440dfa" +output.type = "zone" +output.intermediate_only = true + # To package and install the asic variant of the switch, do: # # $ cargo run --release --bin omicron-package -- -t default target create -i standard -m gimlet -s asic @@ -740,3 +749,11 @@ source.type = "local" source.rust.binary_names = ["oxlog"] source.rust.release = true output.type = "tarball" + +[package.probe] +service_name = "probe" +source.type = "composite" +source.packages = [ + "thundermuffin.tar.gz", +] +output.type = "zone" diff --git a/schema/all-zone-requests.json b/schema/all-zone-requests.json index 8c324a15bd..e37fbfde59 100644 --- a/schema/all-zone-requests.json +++ b/schema/all-zone-requests.json @@ -302,6 +302,26 @@ ] } } + }, + { + "description": "A vNIC associated with a probe", + "type": "object", + "required": [ + "id", + "type" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "probe" + ] + } + } } ] }, diff --git a/schema/all-zones-requests.json b/schema/all-zones-requests.json index 7a07e2f9ae..0ac9e760a8 100644 --- a/schema/all-zones-requests.json +++ b/schema/all-zones-requests.json @@ -186,6 +186,26 @@ ] } } + }, + { + "description": "A vNIC associated with a probe", + "type": "object", + "required": [ + "id", + "type" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "probe" + ] + } + } } ] }, diff --git a/schema/crdb/40.0.0/up1.sql b/schema/crdb/40.0.0/up1.sql new file mode 100644 index 0000000000..7fc8c01713 --- /dev/null +++ b/schema/crdb/40.0.0/up1.sql @@ -0,0 +1,10 @@ +CREATE TABLE IF NOT EXISTS omicron.public.probe ( + id UUID NOT NULL PRIMARY KEY, + name STRING(63) NOT NULL, + description STRING(512) NOT NULL, + time_created TIMESTAMPTZ NOT NULL, + time_modified TIMESTAMPTZ NOT NULL, + time_deleted TIMESTAMPTZ, + project_id UUID NOT NULL, + sled UUID NOT NULL +); diff --git a/schema/crdb/40.0.0/up2.sql b/schema/crdb/40.0.0/up2.sql new file mode 100644 index 0000000000..6c070463a4 --- /dev/null +++ b/schema/crdb/40.0.0/up2.sql @@ -0,0 +1,4 @@ +CREATE UNIQUE INDEX IF NOT EXISTS lookup_probe_by_name ON omicron.public.probe ( + name +) WHERE + time_deleted IS NULL; diff --git a/schema/crdb/40.0.0/up3.sql b/schema/crdb/40.0.0/up3.sql new file mode 100644 index 0000000000..3b71ba4313 --- /dev/null +++ b/schema/crdb/40.0.0/up3.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.external_ip ADD COLUMN IF NOT EXISTS is_probe BOOL NOT NULL DEFAULT false; diff --git a/schema/crdb/40.0.0/up4.sql b/schema/crdb/40.0.0/up4.sql new file mode 100644 index 0000000000..6c989cf8c8 --- /dev/null +++ b/schema/crdb/40.0.0/up4.sql @@ -0,0 +1 @@ +ALTER TYPE omicron.public.network_interface_kind ADD VALUE IF NOT EXISTS 'probe'; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 79b6131d85..27faf3f79f 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -3548,6 +3548,26 @@ SELECT deleted FROM interleaved_versions; +CREATE TABLE IF NOT EXISTS omicron.public.probe ( + id UUID NOT NULL PRIMARY KEY, + name STRING(63) NOT NULL, + description STRING(512) NOT NULL, + time_created TIMESTAMPTZ NOT NULL, + time_modified TIMESTAMPTZ NOT NULL, + time_deleted TIMESTAMPTZ, + project_id UUID NOT NULL, + sled UUID NOT NULL +); + +CREATE UNIQUE INDEX IF NOT EXISTS lookup_probe_by_name ON omicron.public.probe ( + name +) WHERE + time_deleted IS NULL; + +ALTER TABLE omicron.public.external_ip ADD COLUMN IF NOT EXISTS is_probe BOOL NOT NULL DEFAULT false; + +ALTER TYPE omicron.public.network_interface_kind ADD VALUE IF NOT EXISTS 'probe'; + INSERT INTO omicron.public.db_metadata ( singleton, time_created, @@ -3555,7 +3575,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '39.0.0', NULL) + ( TRUE, NOW(), NOW(), '40.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/rss-service-plan-v2.json b/schema/rss-service-plan-v2.json index 10d8f8ab95..ee0b21af81 100644 --- a/schema/rss-service-plan-v2.json +++ b/schema/rss-service-plan-v2.json @@ -274,6 +274,26 @@ ] } } + }, + { + "description": "A vNIC associated with a probe", + "type": "object", + "required": [ + "id", + "type" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "probe" + ] + } + } } ] }, diff --git a/sled-agent/src/lib.rs b/sled-agent/src/lib.rs index bfc23b248d..a4686bdb88 100644 --- a/sled-agent/src/lib.rs +++ b/sled-agent/src/lib.rs @@ -30,6 +30,7 @@ mod long_running_tasks; mod metrics; mod nexus; pub mod params; +mod probe_manager; mod profile; pub mod rack_setup; pub mod server; diff --git a/sled-agent/src/params.rs b/sled-agent/src/params.rs index d192f745f6..f30c910efc 100644 --- a/sled-agent/src/params.rs +++ b/sled-agent/src/params.rs @@ -742,7 +742,7 @@ impl From for sled_agent_client::types::OmicronZoneType { domain, ntp_servers, snat_cfg, - nic: nic.into(), + nic: nic, }, OmicronZoneType::Clickhouse { address, dataset } => { Other::Clickhouse { @@ -778,7 +778,7 @@ impl From for sled_agent_client::types::OmicronZoneType { dataset: dataset.into(), http_address: http_address.to_string(), dns_address: dns_address.to_string(), - nic: nic.into(), + nic: nic, }, OmicronZoneType::InternalDns { dataset, @@ -815,7 +815,7 @@ impl From for sled_agent_client::types::OmicronZoneType { external_ip, external_tls, internal_address: internal_address.to_string(), - nic: nic.into(), + nic: nic, }, OmicronZoneType::Oximeter { address } => { Other::Oximeter { address: address.to_string() } diff --git a/sled-agent/src/probe_manager.rs b/sled-agent/src/probe_manager.rs new file mode 100644 index 0000000000..8481dc4b79 --- /dev/null +++ b/sled-agent/src/probe_manager.rs @@ -0,0 +1,383 @@ +use crate::nexus::NexusClientWithResolver; +use anyhow::{anyhow, Result}; +use illumos_utils::dladm::Etherstub; +use illumos_utils::link::VnicAllocator; +use illumos_utils::opte::params::VpcFirewallRule; +use illumos_utils::opte::{DhcpCfg, PortManager}; +use illumos_utils::running_zone::{RunningZone, ZoneBuilderFactory}; +use illumos_utils::zone::Zones; +use nexus_client::types::{ProbeExternalIp, ProbeInfo}; +use omicron_common::api::external::{ + VpcFirewallRuleAction, VpcFirewallRuleDirection, VpcFirewallRulePriority, + VpcFirewallRuleStatus, +}; +use omicron_common::api::internal::shared::NetworkInterface; +use rand::prelude::SliceRandom; +use rand::SeedableRng; +use sled_storage::dataset::ZONE_DATASET; +use sled_storage::manager::StorageHandle; +use slog::{error, warn, Logger}; +use std::collections::{HashMap, HashSet}; +use std::hash::{Hash, Hasher}; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::Mutex; +use tokio::task::JoinHandle; +use tokio::time::sleep; +use uuid::Uuid; +use zone::Zone; + +/// Prefix used for probe zone names +const PROBE_ZONE_PREFIX: &str = "oxz_probe"; + +/// How long to wait between check-ins with nexus +const RECONCILIATION_INTERVAL: Duration = Duration::from_secs(1); + +/// The scope to use when allocating VNICs +const VNIC_ALLOCATOR_SCOPE: &str = "probe"; + +/// The probe manager periodically asks nexus what probes it should be running. +/// It checks the probes it should be running versus the probes it's actually +/// running and reconciles any differences. +pub(crate) struct ProbeManager { + inner: Arc, +} + +pub(crate) struct ProbeManagerInner { + join_handle: Mutex>>, + nexus_client: NexusClientWithResolver, + log: Logger, + sled_id: Uuid, + vnic_allocator: VnicAllocator, + storage: StorageHandle, + port_manager: PortManager, + running_probes: Mutex>, +} + +impl ProbeManager { + pub(crate) fn new( + sled_id: Uuid, + nexus_client: NexusClientWithResolver, + etherstub: Etherstub, + storage: StorageHandle, + port_manager: PortManager, + log: Logger, + ) -> Self { + Self { + inner: Arc::new(ProbeManagerInner { + join_handle: Mutex::new(None), + vnic_allocator: VnicAllocator::new( + VNIC_ALLOCATOR_SCOPE, + etherstub, + ), + running_probes: Mutex::new(HashMap::new()), + nexus_client, + log, + sled_id, + storage, + port_manager, + }), + } + } + + pub(crate) async fn run(&self) { + self.inner.run().await; + } +} + +/// State information about a probe. This is a common representation that +/// captures elements from both the nexus and running-zone representation of a +/// probe. +#[derive(Debug, Clone)] +struct ProbeState { + /// Id as determined by nexus + id: Uuid, + /// Runtime state on this sled + status: zone::State, + /// The external IP addresses the probe has been assigned. + external_ips: Vec, + /// The probes networking interface. + interface: Option, +} + +impl PartialEq for ProbeState { + fn eq(&self, other: &Self) -> bool { + self.id.eq(&other.id) + } +} + +impl Eq for ProbeState {} + +impl Hash for ProbeState { + fn hash(&self, state: &mut H) { + self.id.hash(state) + } +} + +/// Translate from the nexus API `ProbeInfo` into a `ProbeState` +impl From for ProbeState { + fn from(value: ProbeInfo) -> Self { + Self { + id: value.id, + status: zone::State::Running, + external_ips: value.external_ips, + interface: Some(value.interface), + } + } +} + +/// Translate from running zone state into a `ProbeState` +impl TryFrom for ProbeState { + type Error = String; + fn try_from(value: Zone) -> std::result::Result { + Ok(Self { + id: value + .name() + .strip_prefix(&format!("{PROBE_ZONE_PREFIX}_")) + .ok_or(String::from("not a probe prefix"))? + .parse() + .map_err(|e| format!("invalid uuid: {e}"))?, + status: value.state(), + external_ips: Vec::new(), + interface: None, + }) + } +} + +impl ProbeManagerInner { + /// Run the probe manager. If it's already running this is a no-op. + async fn run(self: &Arc) { + let mut join_handle = self.join_handle.lock().await; + if join_handle.is_none() { + *join_handle = Some(self.clone().reconciler()) + } + } + + /// Run the reconciler loop on a background thread. + fn reconciler(self: Arc) -> JoinHandle<()> { + tokio::spawn(async move { + loop { + sleep(RECONCILIATION_INTERVAL).await; + + // Collect the target and current state. Use set operations + // to determine what probes need to be added, removed and/or + // modified. + + let target = match self.target_state().await { + Ok(state) => state, + Err(e) => { + error!(self.log, "get target probe state: {e}"); + continue; + } + }; + + let current = match self.current_state().await { + Ok(state) => state, + Err(e) => { + error!(self.log, "get current probe state: {e}"); + continue; + } + }; + + self.add(target.difference(¤t)).await; + self.remove(current.difference(&target)).await; + self.check(current.intersection(&target)).await; + } + }) + } + + /// Add a set of probes to this sled. + async fn add<'a, I>(self: &Arc, probes: I) + where + I: Iterator, + { + for probe in probes { + info!(self.log, "adding probe {}", probe.id); + if let Err(e) = self.add_probe(probe).await { + error!(self.log, "add probe: {e}"); + } + } + } + + /// Add a probe to this sled. This sets up resources for the probe zone + /// such as storage and networking. Then it configures, installs and + /// boots the probe zone. + async fn add_probe(self: &Arc, probe: &ProbeState) -> Result<()> { + let mut rng = rand::rngs::StdRng::from_entropy(); + let root = self + .storage + .get_latest_resources() + .await + .all_u2_mountpoints(ZONE_DATASET) + .choose(&mut rng) + .ok_or_else(|| anyhow!("u2 not found"))? + .clone(); + + let nic = probe + .interface + .as_ref() + .ok_or(anyhow!("no interface specified for probe"))?; + + let eip = probe + .external_ips + .get(0) + .ok_or(anyhow!("expected an external ip"))?; + + let port = self.port_manager.create_port( + &nic, + None, + Some(eip.ip), + &[], // floating ips + &[VpcFirewallRule { + status: VpcFirewallRuleStatus::Enabled, + direction: VpcFirewallRuleDirection::Inbound, + targets: vec![nic.clone()], + filter_hosts: None, + filter_ports: None, + filter_protocols: None, + action: VpcFirewallRuleAction::Allow, + priority: VpcFirewallRulePriority(100), + }], + DhcpCfg::default(), + )?; + + let installed_zone = ZoneBuilderFactory::default() + .builder() + .with_log(self.log.clone()) + .with_underlay_vnic_allocator(&self.vnic_allocator) + .with_zone_root_path(&root) + .with_zone_image_paths(&["/opt/oxide".into()]) + .with_zone_type("probe") + .with_unique_name(probe.id) + .with_datasets(&[]) + .with_filesystems(&[]) + .with_data_links(&[]) + .with_devices(&[]) + .with_opte_ports(vec![port]) + .with_links(vec![]) + .with_limit_priv(vec![]) + .install() + .await?; + + info!(self.log, "installed probe {}", probe.id); + + //TODO SMF properties for probe services? + + let rz = RunningZone::boot(installed_zone).await?; + rz.ensure_address_for_port("overlay", 0).await?; + info!(self.log, "started probe {}", probe.id); + + self.running_probes.lock().await.insert(probe.id, rz); + + Ok(()) + } + + /// Remove a set of probes from this sled. + async fn remove<'a, I>(self: &Arc, probes: I) + where + I: Iterator, + { + for probe in probes { + info!(self.log, "removing probe {}", probe.id); + self.remove_probe(probe.id).await; + } + } + + /// Remove a probe from this sled. This tears down the zone and it's + /// network resources. + async fn remove_probe(self: &Arc, id: Uuid) { + match self.running_probes.lock().await.remove(&id) { + Some(mut running_zone) => { + for l in running_zone.links_mut() { + if let Err(e) = l.delete() { + error!(self.log, "delete probe link {}: {e}", l.name()); + } + } + running_zone.release_opte_ports(); + if let Err(e) = running_zone.stop().await { + error!(self.log, "stop probe: {e}") + } + // TODO are there storage resources that need to be cleared + // out here too? + } + None => { + warn!(self.log, "attempt to stop non-running probe: {id}") + } + } + } + + /// Check that probes that should be running are running, and with the + /// correct configuration. + async fn check<'a, I>(self: &Arc, probes: I) + where + I: Iterator, + { + for probe in probes { + if probe.status == zone::State::Running { + continue; + } + warn!( + self.log, + "probe {} found in unexpected state {:?}", + probe.id, + probe.status + ) + //TODO somehow handle the hooligans here? + } + } + + /// Collect target probe state from the nexus internal API. + async fn target_state(self: &Arc) -> Result> { + Ok(self + .nexus_client + .client() + .probes_get( + &self.sled_id, + None, //limit + None, //page token + None, //sort by + ) + .await? + .into_inner() + .into_iter() + .map(Into::into) + .collect()) + } + + /// Collect the current probe state from the running zones on this sled. + async fn current_state(self: &Arc) -> Result> { + Ok(Zones::get() + .await? + .into_iter() + .filter_map(|z| ProbeState::try_from(z).ok()) + .collect()) + } +} + +#[cfg(test)] +mod test { + use super::*; + use uuid::Uuid; + + #[test] + fn probe_state_set_ops() { + let a = ProbeState { + id: Uuid::new_v4(), + status: zone::State::Configured, + external_ips: Vec::new(), + interface: None, + }; + + let mut b = a.clone(); + b.status = zone::State::Running; + + let target = HashSet::from([a]); + let current = HashSet::from([b]); + + let to_add = target.difference(¤t); + let to_remove = current.difference(&target); + + assert_eq!(to_add.count(), 0); + assert_eq!(to_remove.count(), 0); + } +} diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index 8f737f879b..4a21a6fe89 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -25,6 +25,7 @@ use crate::params::{ OmicronZonesConfig, SledRole, TimeSync, VpcFirewallRule, ZoneBundleMetadata, Zpool, }; +use crate::probe_manager::ProbeManager; use crate::services::{self, ServiceManager}; use crate::storage_monitor::UnderlayAccess; use crate::updates::{ConfigUpdates, UpdateManager}; @@ -309,6 +310,9 @@ struct SledAgentInner { // Handle to the traffic manager for writing OS updates to our boot disks. boot_disk_os_writer: BootDiskOsWriter, + + // Component of Sled Agent responsible for managing instrumentation probes. + probes: ProbeManager, } impl SledAgentInner { @@ -571,6 +575,15 @@ impl SledAgent { nexus_notifier_task.run().await; }); + let probes = ProbeManager::new( + request.body.id, + nexus_client.clone(), + etherstub.clone(), + storage_manager.clone(), + port_manager.clone(), + log.new(o!("component" => "ProbeManager")), + ); + let sled_agent = SledAgent { inner: Arc::new(SledAgentInner { id: request.body.id, @@ -578,6 +591,7 @@ impl SledAgent { start_request: request, storage: long_running_task_handles.storage_manager.clone(), instances, + probes, hardware: long_running_task_handles.hardware_manager.clone(), updates, port_manager, @@ -593,6 +607,8 @@ impl SledAgent { log: log.clone(), }; + sled_agent.inner.probes.run().await; + // We immediately add a notification to the request queue about our // existence. If inspection of the hardware later informs us that we're // actually running on a scrimlet, that's fine, the updated value will diff --git a/tools/ci_download_maghemite_mgd b/tools/ci_download_maghemite_mgd index 9890e4505e..bf6be1d5b1 100755 --- a/tools/ci_download_maghemite_mgd +++ b/tools/ci_download_maghemite_mgd @@ -29,6 +29,8 @@ PACKAGE_BASE_URL="$ARTIFACT_URL/$REPO/image/$COMMIT" function main { + rm -rf $DOWNLOAD_DIR/root + # # Process command-line arguments. We generally don't expect any, but # we allow callers to specify a value to override OSTYPE, just for diff --git a/tools/ci_download_maghemite_openapi b/tools/ci_download_maghemite_openapi index 56ce640a76..7255e57cf4 100755 --- a/tools/ci_download_maghemite_openapi +++ b/tools/ci_download_maghemite_openapi @@ -15,10 +15,10 @@ TARGET_DIR="out" # Location where intermediate artifacts are downloaded / unpacked. DOWNLOAD_DIR="$TARGET_DIR/downloads" - - function main { + rm -rf $DOWNLOAD_DIR/root + if [[ $# != 0 ]]; then echo "unexpected arguments" >&2 exit 2 diff --git a/tools/ci_download_thundermuffin b/tools/ci_download_thundermuffin new file mode 100755 index 0000000000..014d1b30b2 --- /dev/null +++ b/tools/ci_download_thundermuffin @@ -0,0 +1,153 @@ +#!/bin/bash + +# +# ci_download_probe_packages: fetches thundermuffin binary tarball package, +# unpacks it, and creates a copy, all in the current directory +# + +set -o pipefail +set -o xtrace +set -o errexit + +SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +ARG0="$(basename "${BASH_SOURCE[0]}")" + +source "$SOURCE_DIR/thundermuffin_checksums" +source "$SOURCE_DIR/thundermuffin_version" + +TARGET_DIR="out" +# Location where intermediate artifacts are downloaded / unpacked. +DOWNLOAD_DIR="$TARGET_DIR/downloads" +# Location where the final thundermuffin directory should end up. +DEST_DIR="./$TARGET_DIR/thundermuffin" +BIN_DIR="$DEST_DIR/root/opt/oxide/thundermuffin/bin" + +ARTIFACT_URL="https://buildomat.eng.oxide.computer/public/file" + +REPO='oxidecomputer/thundermuffin' +PACKAGE_BASE_URL="$ARTIFACT_URL/$REPO/image/$COMMIT" + +function main +{ + rm -rf $DOWNLOAD_DIR/root + + # + # Process command-line arguments. We generally don't expect any, but + # we allow callers to specify a value to override OSTYPE, just for + # testing. + # + if [[ $# != 0 ]]; then + CIDL_OS="$1" + shift + else + CIDL_OS="$OSTYPE" + fi + + if [[ $# != 0 ]]; then + echo "unexpected arguments" >&2 + exit 2 + fi + + # Configure this program + configure_os "$CIDL_OS" + + CIDL_SHA256FUNC="do_sha256sum" + TARBALL_FILENAME="thundermuffin.tar.gz" + PACKAGE_URL="$PACKAGE_BASE_URL/$TARBALL_FILENAME" + TARBALL_FILE="$DOWNLOAD_DIR/$TARBALL_FILENAME" + + # Download the file. + echo "URL: $PACKAGE_URL" + echo "Local file: $TARBALL_FILE" + + mkdir -p "$DOWNLOAD_DIR" + mkdir -p "$DEST_DIR" + + fetch_and_verify + + do_untar "$TARBALL_FILE" + + do_assemble + + $SET_BINARIES +} + +function fail +{ + echo "$ARG0: $@" >&2 + exit 1 +} + +function configure_os +{ + echo "current directory: $PWD" + echo "configuring based on OS: \"$1\"" + case "$1" in + solaris*) + SET_BINARIES="" + ;; + *) + echo "WARNING: binaries for $1 are not published by thundermuffin" + SET_BINARIES="unsupported_os" + ;; + esac +} + +function do_download_curl +{ + curl --silent --show-error --fail --location --output "$2" "$1" +} + +function do_sha256sum +{ + sha256sum < "$1" | awk '{print $1}' +} + +function do_untar +{ + tar xzf "$1" -C "$DOWNLOAD_DIR" +} + +function do_assemble +{ + rm -r "$DEST_DIR" || true + mkdir "$DEST_DIR" + cp -r "$DOWNLOAD_DIR/root" "$DEST_DIR/root" +} + +function fetch_and_verify +{ + local DO_DOWNLOAD="true" + if [[ -f "$TARBALL_FILE" ]]; then + # If the file exists with a valid checksum, we can skip downloading. + calculated_sha256="$($CIDL_SHA256FUNC "$TARBALL_FILE")" || \ + fail "failed to calculate sha256sum" + if [[ "$calculated_sha256" == "$CIDL_SHA256" ]]; then + DO_DOWNLOAD="false" + fi + fi + + if [ "$DO_DOWNLOAD" == "true" ]; then + echo "Downloading..." + do_download_curl "$PACKAGE_URL" "$TARBALL_FILE" || \ + fail "failed to download file" + + # Verify the sha256sum. + calculated_sha256="$($CIDL_SHA256FUNC "$TARBALL_FILE")" || \ + fail "failed to calculate sha256sum" + if [[ "$calculated_sha256" != "$CIDL_SHA256" ]]; then + fail "sha256sum mismatch \ + (expected $CIDL_SHA256, found $calculated_sha256)" + fi + fi + +} + +function unsupported_os +{ + mkdir -p "$BIN_DIR" + echo "echo 'unsupported os' && exit 1" >> "$BIN_DIR/thundermuffin" + chmod +x "$BIN_DIR/thundermuffin" +} + +main "$@" diff --git a/tools/install_builder_prerequisites.sh b/tools/install_builder_prerequisites.sh index 5fa8ec11ba..09b8a27677 100755 --- a/tools/install_builder_prerequisites.sh +++ b/tools/install_builder_prerequisites.sh @@ -217,6 +217,9 @@ retry ./tools/ci_download_maghemite_mgd # xcvradm binary which is bundled with the switch zone. retry ./tools/ci_download_transceiver_control +# Download thundermuffin. This is required to launch network probes. +retry ./tools/ci_download_thundermuffin + # Validate the PATH: expected_in_path=( 'pg_config' diff --git a/tools/thundermuffin_checksums b/tools/thundermuffin_checksums new file mode 100644 index 0000000000..5e10539bdd --- /dev/null +++ b/tools/thundermuffin_checksums @@ -0,0 +1 @@ +CIDL_SHA256="dc55a2accd33a347df4cbdc0026cbaccea2c004940c3fec8cadcdd633d440dfa" diff --git a/tools/thundermuffin_version b/tools/thundermuffin_version new file mode 100644 index 0000000000..cbca739f5c --- /dev/null +++ b/tools/thundermuffin_version @@ -0,0 +1 @@ +COMMIT="a4a6108d7c9aac2464a0b6898e88132a8f701a13" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index e70c7c329e..5efbb6c1f1 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -14,26 +14,26 @@ publish = false ### BEGIN HAKARI SECTION [dependencies] -ahash = { version = "0.8.7" } -aho-corasick = { version = "1.0.4" } -anyhow = { version = "1.0.75", features = ["backtrace"] } +ahash = { version = "0.8.8" } +aho-corasick = { version = "1.1.2" } +anyhow = { version = "1.0.79", features = ["backtrace"] } base16ct = { version = "0.2.0", default-features = false, features = ["alloc"] } bit-set = { version = "0.5.3" } bit-vec = { version = "0.6.3" } bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1.3.2" } -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["serde"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["serde"] } bstr-6f8ce4dd05d13bba = { package = "bstr", version = "0.2.17" } -bstr-dff4ba8e3ae991db = { package = "bstr", version = "1.6.0" } +bstr-dff4ba8e3ae991db = { package = "bstr", version = "1.9.0" } byteorder = { version = "1.5.0" } bytes = { version = "1.5.0", features = ["serde"] } -chrono = { version = "0.4.31", features = ["alloc", "serde"] } +chrono = { version = "0.4.34", features = ["serde"] } cipher = { version = "0.4.4", default-features = false, features = ["block-padding", "zeroize"] } -clap = { version = "4.5.0", features = ["cargo", "derive", "env", "wrap_help"] } -clap_builder = { version = "4.5.0", default-features = false, features = ["cargo", "color", "env", "std", "suggestions", "usage", "wrap_help"] } +clap = { version = "4.5.1", features = ["cargo", "derive", "env", "wrap_help"] } +clap_builder = { version = "4.5.1", default-features = false, features = ["cargo", "color", "env", "std", "suggestions", "usage", "wrap_help"] } console = { version = "0.15.8" } -const-oid = { version = "0.9.5", default-features = false, features = ["db", "std"] } -crossbeam-epoch = { version = "0.9.15" } -crossbeam-utils = { version = "0.8.16" } +const-oid = { version = "0.9.6", default-features = false, features = ["db", "std"] } +crossbeam-epoch = { version = "0.9.18" } +crossbeam-utils = { version = "0.8.19" } crossterm = { version = "0.27.0", features = ["event-stream", "serde"] } crypto-common = { version = "0.1.6", default-features = false, features = ["getrandom", "std"] } der = { version = "0.7.8", default-features = false, features = ["derive", "flagset", "oid", "pem", "std"] } @@ -52,12 +52,12 @@ futures-task = { version = "0.3.30", default-features = false, features = ["std" futures-util = { version = "0.3.30", features = ["channel", "io", "sink"] } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "2739c18e80697aa6bc235c935176d14b4d757ee9", features = ["std"] } generic-array = { version = "0.14.7", default-features = false, features = ["more_lengths", "zeroize"] } -getrandom = { version = "0.2.10", default-features = false, features = ["js", "rdrand", "std"] } +getrandom = { version = "0.2.12", default-features = false, features = ["js", "rdrand", "std"] } group = { version = "0.13.0", default-features = false, features = ["alloc"] } hashbrown = { version = "0.14.3", features = ["raw"] } hex = { version = "0.4.3", features = ["serde"] } hmac = { version = "0.12.1", default-features = false, features = ["reset"] } -hyper = { version = "0.14.27", features = ["full"] } +hyper = { version = "0.14.28", features = ["full"] } indexmap = { version = "2.2.5", features = ["serde"] } inout = { version = "0.1.3", default-features = false, features = ["std"] } ipnetwork = { version = "0.20.0", features = ["schemars"] } @@ -67,12 +67,12 @@ lazy_static = { version = "1.4.0", default-features = false, features = ["spin_n libc = { version = "0.2.153", features = ["extra_traits"] } log = { version = "0.4.20", default-features = false, features = ["std"] } managed = { version = "0.8.0", default-features = false, features = ["alloc", "map"] } -memchr = { version = "2.6.3" } +memchr = { version = "2.7.1" } nom = { version = "7.1.3" } num-bigint = { version = "0.4.4", features = ["rand"] } num-integer = { version = "0.1.46", features = ["i128"] } -num-iter = { version = "0.1.43", default-features = false, features = ["i128"] } -num-traits = { version = "0.2.16", features = ["i128", "libm"] } +num-iter = { version = "0.1.44", default-features = false, features = ["i128"] } +num-traits = { version = "0.2.18", features = ["i128", "libm"] } openapiv3 = { version = "2.0.0", default-features = false, features = ["skip_serializing_defaults"] } pem-rfc7468 = { version = "0.7.0", default-features = false, features = ["std"] } petgraph = { version = "0.6.4", features = ["serde-1"] } @@ -83,33 +83,32 @@ proc-macro2 = { version = "1.0.78" } rand = { version = "0.8.5" } rand_chacha = { version = "0.3.1", default-features = false, features = ["std"] } regex = { version = "1.10.3" } -regex-automata = { version = "0.4.4", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } +regex-automata = { version = "0.4.5", default-features = false, features = ["dfa-onepass", "dfa-search", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } regex-syntax = { version = "0.8.2" } -reqwest = { version = "0.11.24", features = ["blocking", "json", "rustls-tls", "stream"] } +reqwest = { version = "0.11.24", features = ["blocking", "cookies", "json", "rustls-tls", "stream"] } ring = { version = "0.17.8", features = ["std"] } schemars = { version = "0.8.16", features = ["bytes", "chrono", "uuid1"] } semver = { version = "1.0.22", features = ["serde"] } serde = { version = "1.0.197", features = ["alloc", "derive", "rc"] } serde_json = { version = "1.0.114", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } -similar = { version = "2.3.0", features = ["inline", "unicode"] } +similar = { version = "2.4.0", features = ["inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } -socket2 = { version = "0.5.5", default-features = false, features = ["all"] } spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } subtle = { version = "2.5.0" } syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit"] } syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.51", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } -time = { version = "0.3.27", features = ["formatting", "local-offset", "macros", "parsing"] } +time = { version = "0.3.34", features = ["formatting", "local-offset", "macros", "parsing"] } tokio = { version = "1.36.0", features = ["full", "test-util"] } tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } tokio-stream = { version = "0.1.14", features = ["net"] } tokio-util = { version = "0.7.10", features = ["codec", "io-util"] } toml = { version = "0.7.8" } toml_edit-3c51e837cfc5589a = { package = "toml_edit", version = "0.22.6", features = ["serde"] } -tracing = { version = "0.1.37", features = ["log"] } +tracing = { version = "0.1.40", features = ["log"] } trust-dns-proto = { version = "0.22.0" } -unicode-bidi = { version = "0.3.13" } +unicode-bidi = { version = "0.3.15" } unicode-normalization = { version = "0.1.22" } usdt = { version = "0.3.5" } usdt-impl = { version = "0.5.0", default-features = false, features = ["asm", "des"] } @@ -120,26 +119,26 @@ zeroize = { version = "1.7.0", features = ["std", "zeroize_derive"] } zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } [build-dependencies] -ahash = { version = "0.8.7" } -aho-corasick = { version = "1.0.4" } -anyhow = { version = "1.0.75", features = ["backtrace"] } +ahash = { version = "0.8.8" } +aho-corasick = { version = "1.1.2" } +anyhow = { version = "1.0.79", features = ["backtrace"] } base16ct = { version = "0.2.0", default-features = false, features = ["alloc"] } bit-set = { version = "0.5.3" } bit-vec = { version = "0.6.3" } bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1.3.2" } -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["serde"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["serde"] } bstr-6f8ce4dd05d13bba = { package = "bstr", version = "0.2.17" } -bstr-dff4ba8e3ae991db = { package = "bstr", version = "1.6.0" } +bstr-dff4ba8e3ae991db = { package = "bstr", version = "1.9.0" } byteorder = { version = "1.5.0" } bytes = { version = "1.5.0", features = ["serde"] } -chrono = { version = "0.4.31", features = ["alloc", "serde"] } +chrono = { version = "0.4.34", features = ["serde"] } cipher = { version = "0.4.4", default-features = false, features = ["block-padding", "zeroize"] } -clap = { version = "4.5.0", features = ["cargo", "derive", "env", "wrap_help"] } -clap_builder = { version = "4.5.0", default-features = false, features = ["cargo", "color", "env", "std", "suggestions", "usage", "wrap_help"] } +clap = { version = "4.5.1", features = ["cargo", "derive", "env", "wrap_help"] } +clap_builder = { version = "4.5.1", default-features = false, features = ["cargo", "color", "env", "std", "suggestions", "usage", "wrap_help"] } console = { version = "0.15.8" } -const-oid = { version = "0.9.5", default-features = false, features = ["db", "std"] } -crossbeam-epoch = { version = "0.9.15" } -crossbeam-utils = { version = "0.8.16" } +const-oid = { version = "0.9.6", default-features = false, features = ["db", "std"] } +crossbeam-epoch = { version = "0.9.18" } +crossbeam-utils = { version = "0.8.19" } crossterm = { version = "0.27.0", features = ["event-stream", "serde"] } crypto-common = { version = "0.1.6", default-features = false, features = ["getrandom", "std"] } der = { version = "0.7.8", default-features = false, features = ["derive", "flagset", "oid", "pem", "std"] } @@ -158,12 +157,12 @@ futures-task = { version = "0.3.30", default-features = false, features = ["std" futures-util = { version = "0.3.30", features = ["channel", "io", "sink"] } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "2739c18e80697aa6bc235c935176d14b4d757ee9", features = ["std"] } generic-array = { version = "0.14.7", default-features = false, features = ["more_lengths", "zeroize"] } -getrandom = { version = "0.2.10", default-features = false, features = ["js", "rdrand", "std"] } +getrandom = { version = "0.2.12", default-features = false, features = ["js", "rdrand", "std"] } group = { version = "0.13.0", default-features = false, features = ["alloc"] } hashbrown = { version = "0.14.3", features = ["raw"] } hex = { version = "0.4.3", features = ["serde"] } hmac = { version = "0.12.1", default-features = false, features = ["reset"] } -hyper = { version = "0.14.27", features = ["full"] } +hyper = { version = "0.14.28", features = ["full"] } indexmap = { version = "2.2.5", features = ["serde"] } inout = { version = "0.1.3", default-features = false, features = ["std"] } ipnetwork = { version = "0.20.0", features = ["schemars"] } @@ -173,12 +172,12 @@ lazy_static = { version = "1.4.0", default-features = false, features = ["spin_n libc = { version = "0.2.153", features = ["extra_traits"] } log = { version = "0.4.20", default-features = false, features = ["std"] } managed = { version = "0.8.0", default-features = false, features = ["alloc", "map"] } -memchr = { version = "2.6.3" } +memchr = { version = "2.7.1" } nom = { version = "7.1.3" } num-bigint = { version = "0.4.4", features = ["rand"] } num-integer = { version = "0.1.46", features = ["i128"] } -num-iter = { version = "0.1.43", default-features = false, features = ["i128"] } -num-traits = { version = "0.2.16", features = ["i128", "libm"] } +num-iter = { version = "0.1.44", default-features = false, features = ["i128"] } +num-traits = { version = "0.2.18", features = ["i128", "libm"] } openapiv3 = { version = "2.0.0", default-features = false, features = ["skip_serializing_defaults"] } pem-rfc7468 = { version = "0.7.0", default-features = false, features = ["std"] } petgraph = { version = "0.6.4", features = ["serde-1"] } @@ -189,34 +188,33 @@ proc-macro2 = { version = "1.0.78" } rand = { version = "0.8.5" } rand_chacha = { version = "0.3.1", default-features = false, features = ["std"] } regex = { version = "1.10.3" } -regex-automata = { version = "0.4.4", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } +regex-automata = { version = "0.4.5", default-features = false, features = ["dfa-onepass", "dfa-search", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } regex-syntax = { version = "0.8.2" } -reqwest = { version = "0.11.24", features = ["blocking", "json", "rustls-tls", "stream"] } +reqwest = { version = "0.11.24", features = ["blocking", "cookies", "json", "rustls-tls", "stream"] } ring = { version = "0.17.8", features = ["std"] } schemars = { version = "0.8.16", features = ["bytes", "chrono", "uuid1"] } semver = { version = "1.0.22", features = ["serde"] } serde = { version = "1.0.197", features = ["alloc", "derive", "rc"] } serde_json = { version = "1.0.114", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } -similar = { version = "2.3.0", features = ["inline", "unicode"] } +similar = { version = "2.4.0", features = ["inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } -socket2 = { version = "0.5.5", default-features = false, features = ["all"] } spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } subtle = { version = "2.5.0" } syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit"] } syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.51", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } -time = { version = "0.3.27", features = ["formatting", "local-offset", "macros", "parsing"] } -time-macros = { version = "0.2.13", default-features = false, features = ["formatting", "parsing"] } +time = { version = "0.3.34", features = ["formatting", "local-offset", "macros", "parsing"] } +time-macros = { version = "0.2.17", default-features = false, features = ["formatting", "parsing"] } tokio = { version = "1.36.0", features = ["full", "test-util"] } tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } tokio-stream = { version = "0.1.14", features = ["net"] } tokio-util = { version = "0.7.10", features = ["codec", "io-util"] } toml = { version = "0.7.8" } toml_edit-3c51e837cfc5589a = { package = "toml_edit", version = "0.22.6", features = ["serde"] } -tracing = { version = "0.1.37", features = ["log"] } +tracing = { version = "0.1.40", features = ["log"] } trust-dns-proto = { version = "0.22.0" } -unicode-bidi = { version = "0.3.13" } +unicode-bidi = { version = "0.3.15" } unicode-normalization = { version = "0.1.22" } usdt = { version = "0.3.5" } usdt-impl = { version = "0.5.0", default-features = false, features = ["asm", "des"] } @@ -227,45 +225,45 @@ zeroize = { version = "1.7.0", features = ["std", "zeroize_derive"] } zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } [target.x86_64-unknown-linux-gnu.dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["std"] } dof = { version = "0.3.0", default-features = false, features = ["des"] } mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } [target.x86_64-unknown-linux-gnu.build-dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["std"] } dof = { version = "0.3.0", default-features = false, features = ["des"] } mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } [target.x86_64-apple-darwin.dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["std"] } mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } [target.x86_64-apple-darwin.build-dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["std"] } mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } [target.aarch64-apple-darwin.dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["std"] } mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } [target.aarch64-apple-darwin.build-dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["std"] } mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } [target.x86_64-unknown-illumos.dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["std"] } dof = { version = "0.3.0", default-features = false, features = ["des"] } mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } @@ -274,7 +272,7 @@ toml_datetime = { version = "0.6.5", default-features = false, features = ["serd toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] } [target.x86_64-unknown-illumos.build-dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["std"] } dof = { version = "0.3.0", default-features = false, features = ["des"] } mio = { version = "0.8.11", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } From 1eaad083ca8a4363f783f550266084d84ba8c866 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Sat, 9 Mar 2024 19:20:01 -0800 Subject: [PATCH 095/157] test environment could be more realistic (#5239) --- Cargo.lock | 2 + dev-tools/omdb/tests/env.out | 3 + dev-tools/omdb/tests/successes.out | 13 +- dev-tools/omicron-dev/src/bin/omicron-dev.rs | 10 +- .../tests/output/collector_basic.txt | 4 +- .../output/collector_sled_agent_errors.txt | 2 +- .../app/background/inventory_collection.rs | 4 +- nexus/src/lib.rs | 22 +- nexus/test-interface/src/lib.rs | 1 + nexus/test-utils/Cargo.toml | 2 + nexus/test-utils/src/lib.rs | 376 +++++++++++++++--- .../tests/integration_tests/initialization.rs | 29 +- nexus/tests/integration_tests/sleds.rs | 14 +- sled-agent/src/sim/sled_agent.rs | 2 +- 14 files changed, 383 insertions(+), 101 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0cfc7b4500..db48902f75 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4648,6 +4648,7 @@ dependencies = [ "headers", "http 0.2.12", "hyper 0.14.28", + "illumos-utils", "internal-dns", "nexus-config", "nexus-db-queries", @@ -4664,6 +4665,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", + "sled-agent-client", "slog", "tokio", "tokio-util", diff --git a/dev-tools/omdb/tests/env.out b/dev-tools/omdb/tests/env.out index 3e6e89d508..ef8cf1631e 100644 --- a/dev-tools/omdb/tests/env.out +++ b/dev-tools/omdb/tests/env.out @@ -3,6 +3,7 @@ termination: Exited(0) --------------------------------------------- stdout: SERIAL IP ROLE ID +sim-039be560 [::1]:REDACTED_PORT scrimlet REDACTED_UUID_REDACTED_UUID_REDACTED sim-b6d65341 [::1]:REDACTED_PORT scrimlet REDACTED_UUID_REDACTED_UUID_REDACTED --------------------------------------------- stderr: @@ -268,6 +269,7 @@ termination: Exited(0) --------------------------------------------- stdout: SERIAL IP ROLE ID +sim-039be560 [::1]:REDACTED_PORT scrimlet REDACTED_UUID_REDACTED_UUID_REDACTED sim-b6d65341 [::1]:REDACTED_PORT scrimlet REDACTED_UUID_REDACTED_UUID_REDACTED --------------------------------------------- stderr: @@ -281,6 +283,7 @@ termination: Exited(0) --------------------------------------------- stdout: SERIAL IP ROLE ID +sim-039be560 [::1]:REDACTED_PORT scrimlet REDACTED_UUID_REDACTED_UUID_REDACTED sim-b6d65341 [::1]:REDACTED_PORT scrimlet REDACTED_UUID_REDACTED_UUID_REDACTED --------------------------------------------- stderr: diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index fe590acf55..2da6e4dceb 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -77,12 +77,10 @@ termination: Exited(0) stdout: SERVICE INSTANCE_ID ADDR SLED_SERIAL CruciblePantry REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT sim-b6d65341 -Dendrite REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT sim-b6d65341 -Dendrite REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT sim-b6d65341 ExternalDns REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT sim-b6d65341 InternalDns REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT sim-b6d65341 Nexus REDACTED_UUID_REDACTED_UUID_REDACTED [::ffff:127.0.0.1]:REDACTED_PORT sim-b6d65341 -Mgd REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT sim-b6d65341 +Mgd REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT sim-039be560 Mgd REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT sim-b6d65341 --------------------------------------------- stderr: @@ -93,17 +91,19 @@ EXECUTING COMMAND: omdb ["db", "services", "list-by-sled"] termination: Exited(0) --------------------------------------------- stdout: +sled: sim-039be560 (id REDACTED_UUID_REDACTED_UUID_REDACTED) + + SERVICE INSTANCE_ID ADDR + Mgd REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT + sled: sim-b6d65341 (id REDACTED_UUID_REDACTED_UUID_REDACTED) SERVICE INSTANCE_ID ADDR CruciblePantry REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT - Dendrite REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT - Dendrite REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT ExternalDns REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT InternalDns REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT Nexus REDACTED_UUID_REDACTED_UUID_REDACTED [::ffff:127.0.0.1]:REDACTED_PORT Mgd REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT - Mgd REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT --------------------------------------------- stderr: @@ -115,6 +115,7 @@ termination: Exited(0) --------------------------------------------- stdout: SERIAL IP ROLE ID +sim-039be560 [::1]:REDACTED_PORT scrimlet REDACTED_UUID_REDACTED_UUID_REDACTED sim-b6d65341 [::1]:REDACTED_PORT scrimlet REDACTED_UUID_REDACTED_UUID_REDACTED --------------------------------------------- stderr: diff --git a/dev-tools/omicron-dev/src/bin/omicron-dev.rs b/dev-tools/omicron-dev/src/bin/omicron-dev.rs index 5e0c6486d6..705049bdb1 100644 --- a/dev-tools/omicron-dev/src/bin/omicron-dev.rs +++ b/dev-tools/omicron-dev/src/bin/omicron-dev.rs @@ -543,10 +543,12 @@ async fn cmd_run_all(args: &RunAllArgs) -> Result<(), anyhow::Error> { cptestctx.silo_name, cptestctx.external_dns_zone_name, ); - println!( - "omicron-dev: management gateway: http://{}", - cptestctx.gateway.client.bind_address, - ); + for (location, gateway) in &cptestctx.gateway { + println!( + "omicron-dev: management gateway: http://{} ({})", + gateway.client.bind_address, location, + ); + } println!("omicron-dev: silo name: {}", cptestctx.silo_name,); println!( "omicron-dev: privileged user name: {}", diff --git a/nexus/inventory/tests/output/collector_basic.txt b/nexus/inventory/tests/output/collector_basic.txt index 4a05f09e1c..0fc1c552ab 100644 --- a/nexus/inventory/tests/output/collector_basic.txt +++ b/nexus/inventory/tests/output/collector_basic.txt @@ -71,12 +71,12 @@ rot pages found: CfpaScratch baseboard part "i86pc" serial "SimGimlet01": data_base64 "Z2ltbGV0LWNmcGEtc2NyYXRjaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" sled agents found: - sled 03265caf-da7d-46c7-b1c2-39fa90ce5c65 (Gimlet) + sled 03265caf-da7d-46c7-b1c2-39fa90ce5c65 (Scrimlet) baseboard Some(BaseboardId { part_number: "sim-gimlet", serial_number: "sim-03265caf-da7d-46c7-b1c2-39fa90ce5c65" }) zone generation: Generation(3) zones found: zone 8b88a56f-3eb6-4d80-ba42-75d867bc427d type oximeter - sled 9cb9b78f-5614-440c-b66d-e8e81fab69b0 (Gimlet) + sled 9cb9b78f-5614-440c-b66d-e8e81fab69b0 (Scrimlet) baseboard Some(BaseboardId { part_number: "sim-gimlet", serial_number: "sim-9cb9b78f-5614-440c-b66d-e8e81fab69b0" }) zone generation: Generation(3) zones found: diff --git a/nexus/inventory/tests/output/collector_sled_agent_errors.txt b/nexus/inventory/tests/output/collector_sled_agent_errors.txt index aaa31fd1bb..7b9bbce84e 100644 --- a/nexus/inventory/tests/output/collector_sled_agent_errors.txt +++ b/nexus/inventory/tests/output/collector_sled_agent_errors.txt @@ -70,7 +70,7 @@ rot pages found: CfpaScratch baseboard part "i86pc" serial "SimGimlet01": data_base64 "Z2ltbGV0LWNmcGEtc2NyYXRjaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" sled agents found: - sled 9cb9b78f-5614-440c-b66d-e8e81fab69b0 (Gimlet) + sled 9cb9b78f-5614-440c-b66d-e8e81fab69b0 (Scrimlet) baseboard Some(BaseboardId { part_number: "sim-gimlet", serial_number: "sim-9cb9b78f-5614-440c-b66d-e8e81fab69b0" }) zone generation: Generation(3) zones found: diff --git a/nexus/src/app/background/inventory_collection.rs b/nexus/src/app/background/inventory_collection.rs index 27f08ec738..0666c136fc 100644 --- a/nexus/src/app/background/inventory_collection.rs +++ b/nexus/src/app/background/inventory_collection.rs @@ -314,9 +314,9 @@ mod test { page_size: NonZeroU32::new(3).unwrap(), }; - // There will be one sled agent set up as part of the test context. + // There will be two sled agents set up as part of the test context. let found_urls = db_enum.list_sled_agents().await.unwrap(); - assert_eq!(found_urls.len(), 1); + assert_eq!(found_urls.len(), 2); // Insert some sleds. let rack_id = Uuid::new_v4(); diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index 771a78f0b1..c0fba31afb 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -27,6 +27,7 @@ use dropshot::ConfigDropshot; use external_api::http_entrypoints::external_api; use internal_api::http_entrypoints::internal_api; use nexus_config::NexusConfig; +use nexus_types::external_api::views::SledProvisionPolicy; use nexus_types::internal_api::params::ServiceKind; use omicron_common::address::IpRange; use omicron_common::api::internal::shared::{ @@ -237,6 +238,7 @@ impl nexus_test_interface::NexusServer for Server { external_dns_zone_name: &str, recovery_silo: nexus_types::internal_api::params::RecoverySiloConfig, certs: Vec, + disable_sled_id: Uuid, ) -> Self { // Perform the "handoff from RSS". // @@ -302,7 +304,25 @@ impl nexus_test_interface::NexusServer for Server { .expect("Could not initialize rack"); // Start the Nexus external API. - Server::start(internal_server).await.unwrap() + let rv = Server::start(internal_server).await.unwrap(); + + // Historically, tests have assumed that there's only one provisionable + // sled, and that's convenient for a lot of purposes. Mark our second + // sled non-provisionable. + let nexus = &rv.apictx().nexus; + nexus + .sled_set_provision_policy( + &opctx, + &nexus_db_queries::db::lookup::LookupPath::new( + &opctx, + nexus.datastore(), + ) + .sled_id(disable_sled_id), + SledProvisionPolicy::NonProvisionable, + ) + .await + .unwrap(); + rv } async fn get_http_server_external_address(&self) -> SocketAddr { diff --git a/nexus/test-interface/src/lib.rs b/nexus/test-interface/src/lib.rs index 0f53ac6445..10bc9e63f0 100644 --- a/nexus/test-interface/src/lib.rs +++ b/nexus/test-interface/src/lib.rs @@ -56,6 +56,7 @@ pub trait NexusServer: Send + Sync + 'static { external_dns_zone_name: &str, recovery_silo: nexus_types::internal_api::params::RecoverySiloConfig, tls_certificates: Vec, + disable_sled_id: Uuid, ) -> Self; async fn get_http_server_external_address(&self) -> SocketAddr; diff --git a/nexus/test-utils/Cargo.toml b/nexus/test-utils/Cargo.toml index e612547fa8..861527108b 100644 --- a/nexus/test-utils/Cargo.toml +++ b/nexus/test-utils/Cargo.toml @@ -20,6 +20,7 @@ gateway-test-utils.workspace = true headers.workspace = true http.workspace = true hyper.workspace = true +illumos-utils.workspace = true internal-dns.workspace = true nexus-config.workspace = true nexus-db-queries.workspace = true @@ -35,6 +36,7 @@ oximeter-producer.workspace = true serde.workspace = true serde_json.workspace = true serde_urlencoded.workspace = true +sled-agent-client.workspace = true slog.workspace = true tokio.workspace = true tokio-util.workspace = true diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 4ef77b3352..9681d9ff97 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -33,12 +33,20 @@ use nexus_types::internal_api::params::RecoverySiloConfig; use nexus_types::internal_api::params::ServiceKind; use nexus_types::internal_api::params::ServiceNic; use nexus_types::internal_api::params::ServicePutRequest; +use nexus_types::inventory::OmicronZoneConfig; +use nexus_types::inventory::OmicronZoneDataset; +use nexus_types::inventory::OmicronZoneType; +use nexus_types::inventory::OmicronZonesConfig; use omicron_common::address::DNS_OPTE_IPV4_SUBNET; use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; +use omicron_common::api::external::Generation; use omicron_common::api::external::MacAddr; +use omicron_common::api::external::Vni; use omicron_common::api::external::{IdentityMetadata, Name}; use omicron_common::api::internal::nexus::ProducerEndpoint; use omicron_common::api::internal::nexus::ProducerKind; +use omicron_common::api::internal::shared::NetworkInterface; +use omicron_common::api::internal::shared::NetworkInterfaceKind; use omicron_common::api::internal::shared::SwitchLocation; use omicron_sled_agent::sim; use omicron_test_utils::dev; @@ -65,6 +73,7 @@ pub mod http_testing; pub mod resource_helpers; pub const SLED_AGENT_UUID: &str = "b6d65341-167c-41df-9b5c-41cded99c229"; +pub const SLED_AGENT2_UUID: &str = "039be560-54cc-49e3-88df-1a29dadbf913"; pub const RACK_UUID: &str = "c19a698f-c6f9-4a17-ae30-20d711b8f7dc"; pub const SWITCH_UUID: &str = "dae4e1f1-410e-4314-bff1-fec0504be07e"; pub const OXIMETER_UUID: &str = "39e6175b-4df2-4730-b11d-cbc1e60a2e78"; @@ -88,9 +97,11 @@ pub struct ControlPlaneTestContext { pub logctx: LogContext, pub sled_agent_storage: camino_tempfile::Utf8TempDir, pub sled_agent: sim::Server, + pub sled_agent2_storage: camino_tempfile::Utf8TempDir, + pub sled_agent2: sim::Server, pub oximeter: Oximeter, pub producer: ProducerServer, - pub gateway: GatewayTestContext, + pub gateway: HashMap, pub dendrite: HashMap, pub mgd: HashMap, pub external_dns_zone_name: String, @@ -110,9 +121,12 @@ impl ControlPlaneTestContext { self.database.cleanup().await.unwrap(); self.clickhouse.cleanup().await.unwrap(); self.sled_agent.http_server.close().await.unwrap(); + self.sled_agent2.http_server.close().await.unwrap(); self.oximeter.close().await.unwrap(); self.producer.close().await.unwrap(); - self.gateway.teardown().await; + for (_, gateway) in self.gateway { + gateway.teardown().await; + } for (_, mut dendrite) in self.dendrite { dendrite.cleanup().await.unwrap(); } @@ -179,18 +193,18 @@ impl RackInitRequestBuilder { // Keeps track of: // - The "ServicePutRequest" (for handoff to Nexus) // - The internal DNS configuration for this service - fn add_service( + fn add_service_with_id( &mut self, + zone_id: Uuid, address: SocketAddrV6, kind: ServiceKind, service_name: internal_dns::ServiceName, sled_id: Uuid, ) { - let zone_id = Uuid::new_v4(); self.services.push(ServicePutRequest { address, kind, - service_id: Uuid::new_v4(), + service_id: zone_id, sled_id, zone_id: Some(zone_id), }); @@ -203,6 +217,22 @@ impl RackInitRequestBuilder { .expect("Failed to set up DNS for {kind}"); } + fn add_service_without_dns( + &mut self, + zone_id: Uuid, + address: SocketAddrV6, + kind: ServiceKind, + sled_id: Uuid, + ) { + self.services.push(ServicePutRequest { + address, + kind, + service_id: zone_id, + sled_id, + zone_id: Some(zone_id), + }); + } + // Keeps track of: // - The "DatasetPutRequest" (for handoff to Nexus) // - The internal DNS configuration for this service @@ -245,9 +275,11 @@ pub struct ControlPlaneTestContextBuilder<'a, N: NexusServer> { pub clickhouse: Option, pub sled_agent_storage: Option, pub sled_agent: Option, + pub sled_agent2_storage: Option, + pub sled_agent2: Option, pub oximeter: Option, pub producer: Option, - pub gateway: Option, + pub gateway: HashMap, pub dendrite: HashMap, pub mgd: HashMap, @@ -260,6 +292,8 @@ pub struct ControlPlaneTestContextBuilder<'a, N: NexusServer> { pub external_dns: Option, pub internal_dns: Option, dns_config: Option, + omicron_zones: Vec, + omicron_zones2: Vec, pub silo_name: Option, pub user_name: Option, @@ -289,9 +323,11 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { clickhouse: None, sled_agent_storage: None, sled_agent: None, + sled_agent2_storage: None, + sled_agent2: None, oximeter: None, producer: None, - gateway: None, + gateway: HashMap::new(), dendrite: HashMap::new(), mgd: HashMap::new(), nexus_internal: None, @@ -300,6 +336,8 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { external_dns: None, internal_dns: None, dns_config: None, + omicron_zones: Vec::new(), + omicron_zones2: Vec::new(), silo_name: None, user_name: None, } @@ -380,6 +418,18 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { DatasetKind::Cockroach, internal_dns::ServiceName::Cockroach, ); + let pool_name = illumos_utils::zpool::ZpoolName::new_external(zpool_id) + .to_string() + .parse() + .unwrap(); + self.omicron_zones.push(OmicronZoneConfig { + id: dataset_id, + underlay_address: *address.ip(), + zone_type: OmicronZoneType::CockroachDb { + address: address.to_string(), + dataset: OmicronZoneDataset { pool_name }, + }, + }); self.database = Some(database); } @@ -416,37 +466,40 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .as_mut() .expect("Tests expect to set a port of Clickhouse") .set_port(port); + + let pool_name = illumos_utils::zpool::ZpoolName::new_external(zpool_id) + .to_string() + .parse() + .unwrap(); + self.omicron_zones.push(OmicronZoneConfig { + id: dataset_id, + underlay_address: *address.ip(), + zone_type: OmicronZoneType::Clickhouse { + address: address.to_string(), + dataset: OmicronZoneDataset { pool_name }, + }, + }); } - pub async fn start_gateway(&mut self) { - // For now, this MGS is not configured to match up in any way with - // either the simulated sled agent or the Dendrite instances. It's - // useful for testing stuff unrelated to that. But at some point we - // will probably want the reported data to match up better. + pub async fn start_gateway( + &mut self, + switch_location: SwitchLocation, + port: Option, + ) { debug!(&self.logctx.log, "Starting Management Gateway"); - let gateway = gateway_test_utils::setup::test_setup( + let (mgs_config, sp_sim_config) = + gateway_test_utils::setup::load_test_config(); + let mgs_addr = + port.map(|port| SocketAddrV6::new(Ipv6Addr::LOCALHOST, port, 0, 0)); + let gateway = gateway_test_utils::setup::test_setup_with_config( self.test_name, gateway_messages::SpPort::One, + mgs_config, + &sp_sim_config, + mgs_addr, ) .await; - let fake_mgs_zone_id = Uuid::new_v4(); - let SocketAddr::V6(v6addr) = gateway.client.bind_address else { - panic!("MGS unexpectedly listening on IPv4?"); - }; - let zone = self - .rack_init_builder - .internal_dns_config - .host_zone(fake_mgs_zone_id, *v6addr.ip()) - .expect("Failed to add DNS for MGS zone"); - self.rack_init_builder - .internal_dns_config - .service_backend_zone( - internal_dns::ServiceName::ManagementGatewayService, - &zone, - v6addr.port(), - ) - .expect("Failed to add DNS for MGS service"); - self.gateway = Some(gateway); + self.gateway.insert(switch_location, gateway); } pub async fn start_dendrite(&mut self, switch_location: SwitchLocation) { @@ -466,11 +519,16 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { let config = DpdConfig { address: std::net::SocketAddr::V6(address) }; self.config.pkg.dendrite.insert(switch_location, config); - let sled_id = Uuid::parse_str(SLED_AGENT_UUID).unwrap(); - self.rack_init_builder.add_service( + let sled_id = Uuid::parse_str(match switch_location { + SwitchLocation::Switch0 => SLED_AGENT_UUID, + SwitchLocation::Switch1 => SLED_AGENT2_UUID, + }) + .unwrap(); + + self.rack_init_builder.add_service_without_dns( + sled_id, address, ServiceKind::Dendrite, - internal_dns::ServiceName::Dendrite, sled_id, ); } @@ -490,15 +548,46 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { let config = MgdConfig { address: std::net::SocketAddr::V6(address) }; self.config.pkg.mgd.insert(switch_location, config); - let sled_id = Uuid::parse_str(SLED_AGENT_UUID).unwrap(); - self.rack_init_builder.add_service( + let sled_id = Uuid::parse_str(match switch_location { + SwitchLocation::Switch0 => SLED_AGENT_UUID, + SwitchLocation::Switch1 => SLED_AGENT2_UUID, + }) + .unwrap(); + + self.rack_init_builder.add_service_without_dns( + sled_id, address, ServiceKind::Mgd, - internal_dns::ServiceName::Mgd, sled_id, ); } + pub async fn record_switch_dns(&mut self) { + let log = &self.logctx.log; + debug!(log, "Recording DNS for the switch zones"); + for (sled_id, switch_location) in &[ + (SLED_AGENT_UUID, SwitchLocation::Switch0), + (SLED_AGENT2_UUID, SwitchLocation::Switch1), + ] { + let id = sled_id.parse().unwrap(); + self.rack_init_builder + .internal_dns_config + .host_zone_switch( + id, + Ipv6Addr::LOCALHOST, + self.dendrite.get(switch_location).unwrap().port, + self.gateway + .get(switch_location) + .unwrap() + .client + .bind_address + .port(), + self.mgd.get(switch_location).unwrap().port, + ) + .unwrap(); + } + } + pub async fn start_oximeter(&mut self) { let log = &self.logctx.log; debug!(log, "Starting Oximeter"); @@ -585,16 +674,14 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .mac_addrs .next() .expect("ran out of MAC addresses"); - self.rack_init_builder.add_service( + let external_address = + self.config.deployment.dropshot_external.dropshot.bind_address.ip(); + let nexus_id = self.config.deployment.id; + self.rack_init_builder.add_service_with_id( + nexus_id, address, ServiceKind::Nexus { - external_address: self - .config - .deployment - .dropshot_external - .dropshot - .bind_address - .ip(), + external_address, nic: ServiceNic { id: Uuid::new_v4(), name: "nexus".parse().unwrap(), @@ -610,6 +697,32 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { sled_id, ); + self.omicron_zones.push(OmicronZoneConfig { + id: nexus_id, + underlay_address: *address.ip(), + zone_type: OmicronZoneType::Nexus { + external_dns_servers: self + .config + .deployment + .external_dns_servers + .clone(), + external_ip: external_address, + external_tls: self.config.deployment.dropshot_external.tls, + internal_address: address.to_string(), + nic: NetworkInterface { + id: Uuid::new_v4(), + ip: external_address, + kind: NetworkInterfaceKind::Service { id: nexus_id }, + mac, + name: format!("nexus-{}", nexus_id).parse().unwrap(), + primary: true, + slot: 0, + subnet: (*NEXUS_OPTE_IPV4_SUBNET).into(), + vni: Vni::SERVICES_VNI, + }, + }, + }); + self.nexus_internal = Some(nexus_internal); self.nexus_internal_addr = Some(nexus_internal_addr); } @@ -701,6 +814,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { &external_dns_zone_name, recovery_silo, tls_certificates, + SLED_AGENT2_UUID.parse().unwrap(), ) .await; @@ -729,12 +843,22 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { self.server = Some(server); } - pub async fn start_sled(&mut self, sim_mode: sim::SimMode) { + pub async fn start_sled( + &mut self, + switch_location: SwitchLocation, + sim_mode: sim::SimMode, + ) { let nexus_address = self.nexus_internal_addr.expect("Must launch Nexus first"); // Set up a single sled agent. - let sa_id = Uuid::parse_str(SLED_AGENT_UUID).unwrap(); + let sa_id: Uuid = if switch_location == SwitchLocation::Switch0 { + SLED_AGENT_UUID + } else { + SLED_AGENT2_UUID + } + .parse() + .unwrap(); let tempdir = camino_tempfile::tempdir().unwrap(); let sled_agent = start_sled_agent( self.logctx.log.new(o!( @@ -749,8 +873,40 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .await .expect("Failed to start sled agent"); - self.sled_agent = Some(sled_agent); - self.sled_agent_storage = Some(tempdir); + if switch_location == SwitchLocation::Switch0 { + self.sled_agent = Some(sled_agent); + self.sled_agent_storage = Some(tempdir); + } else { + self.sled_agent2 = Some(sled_agent); + self.sled_agent2_storage = Some(tempdir); + } + } + + pub async fn configure_sled_agent( + &mut self, + switch_location: SwitchLocation, + ) { + let (field, zones) = if switch_location == SwitchLocation::Switch0 { + (&self.sled_agent, &self.omicron_zones) + } else { + (&self.sled_agent2, &self.omicron_zones2) + }; + + // Tell our Sled Agent to report the zones that we configured. + let Some(sled_agent) = field else { + panic!("expected sled agent has not been created"); + }; + let client = sled_agent_client::Client::new( + &format!("http://{}", sled_agent.http_server.local_addr()), + self.logctx.log.clone(), + ); + client + .omicron_zones_put(&OmicronZonesConfig { + zones: zones.clone(), + generation: Generation::new().next(), + }) + .await + .expect("Failed to configure sled agent with our zones"); } // Set up the Crucible Pantry on an existing Sled Agent. @@ -768,12 +924,21 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { }; let sled_id = Uuid::parse_str(SLED_AGENT_UUID).unwrap(); - self.rack_init_builder.add_service( + let zone_id = Uuid::new_v4(); + self.rack_init_builder.add_service_with_id( + zone_id, address, ServiceKind::CruciblePantry, internal_dns::ServiceName::CruciblePantry, sled_id, ); + self.omicron_zones.push(OmicronZoneConfig { + id: zone_id, + underlay_address: *address.ip(), + zone_type: OmicronZoneType::CruciblePantry { + address: address.to_string(), + }, + }); } // Set up an external DNS server. @@ -796,7 +961,9 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .mac_addrs .next() .expect("ran out of MAC addresses"); - self.rack_init_builder.add_service( + let zone_id = Uuid::new_v4(); + self.rack_init_builder.add_service_with_id( + zone_id, dropshot_address, ServiceKind::ExternalDns { external_address: (*dns_address.ip()).into(), @@ -814,6 +981,33 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { internal_dns::ServiceName::ExternalDns, sled_id, ); + + let zpool_id = Uuid::new_v4(); + let pool_name = illumos_utils::zpool::ZpoolName::new_external(zpool_id) + .to_string() + .parse() + .unwrap(); + self.omicron_zones.push(OmicronZoneConfig { + id: zone_id, + underlay_address: *dropshot_address.ip(), + zone_type: OmicronZoneType::ExternalDns { + dataset: OmicronZoneDataset { pool_name }, + dns_address: dns_address.to_string(), + http_address: dropshot_address.to_string(), + nic: NetworkInterface { + id: Uuid::new_v4(), + ip: (*dns_address.ip()).into(), + kind: NetworkInterfaceKind::Service { id: zone_id }, + mac, + name: format!("external-dns-{}", zone_id).parse().unwrap(), + primary: true, + slot: 0, + subnet: (*DNS_OPTE_IPV4_SUBNET).into(), + vni: Vni::SERVICES_VNI, + }, + }, + }); + self.external_dns = Some(dns); } @@ -826,13 +1020,32 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { let SocketAddr::V6(address) = dns.dropshot_server.local_addr() else { panic!("Unsupported IPv4 DNS address"); }; - self.rack_init_builder.add_service( + let zone_id = Uuid::new_v4(); + self.rack_init_builder.add_service_with_id( + zone_id, address, ServiceKind::InternalDns, internal_dns::ServiceName::InternalDns, sled_id, ); + let zpool_id = Uuid::new_v4(); + let pool_name = illumos_utils::zpool::ZpoolName::new_external(zpool_id) + .to_string() + .parse() + .unwrap(); + self.omicron_zones.push(OmicronZoneConfig { + id: zone_id, + underlay_address: *address.ip(), + zone_type: OmicronZoneType::InternalDns { + dataset: OmicronZoneDataset { pool_name }, + dns_address: dns.dns_server.local_address().to_string(), + http_address: address.to_string(), + gz_address: Ipv6Addr::LOCALHOST, + gz_address_index: 0, + }, + }); + self.internal_dns = Some(dns); } @@ -846,10 +1059,12 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { clickhouse: self.clickhouse.unwrap(), sled_agent_storage: self.sled_agent_storage.unwrap(), sled_agent: self.sled_agent.unwrap(), + sled_agent2_storage: self.sled_agent2_storage.unwrap(), + sled_agent2: self.sled_agent2.unwrap(), oximeter: self.oximeter.unwrap(), producer: self.producer.unwrap(), logctx: self.logctx, - gateway: self.gateway.unwrap(), + gateway: self.gateway, dendrite: self.dendrite, mgd: self.mgd, external_dns_zone_name: self.external_dns_zone_name.unwrap(), @@ -873,13 +1088,16 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { if let Some(sled_agent) = self.sled_agent { sled_agent.http_server.close().await.unwrap(); } + if let Some(sled_agent2) = self.sled_agent2 { + sled_agent2.http_server.close().await.unwrap(); + } if let Some(oximeter) = self.oximeter { oximeter.close().await.unwrap(); } if let Some(producer) = self.producer { producer.close().await.unwrap(); } - if let Some(gateway) = self.gateway { + for (_, gateway) in self.gateway { gateway.teardown().await; } for (_, mut dendrite) in self.dendrite { @@ -990,8 +1208,20 @@ async fn setup_with_config_impl( Box::new(|builder| builder.start_clickhouse().boxed()), ), ( - "start_gateway", - Box::new(|builder| builder.start_gateway().boxed()), + "start_gateway_switch0", + Box::new(|builder| { + builder + .start_gateway(SwitchLocation::Switch0, None) + .boxed() + }), + ), + ( + "start_gateway_switch1", + Box::new(|builder| { + builder + .start_gateway(SwitchLocation::Switch1, None) + .boxed() + }), ), ( "start_dendrite_switch0", @@ -1017,6 +1247,10 @@ async fn setup_with_config_impl( builder.start_mgd(SwitchLocation::Switch1).boxed() }), ), + ( + "record_switch_dns", + Box::new(|builder| builder.record_switch_dns().boxed()), + ), ( "start_internal_dns", Box::new(|builder| builder.start_internal_dns().boxed()), @@ -1030,9 +1264,19 @@ async fn setup_with_config_impl( Box::new(|builder| builder.start_nexus_internal().boxed()), ), ( - "start_sled", + "start_sled1", Box::new(move |builder| { - builder.start_sled(sim_mode).boxed() + builder + .start_sled(SwitchLocation::Switch0, sim_mode) + .boxed() + }), + ), + ( + "start_sled2", + Box::new(move |builder| { + builder + .start_sled(SwitchLocation::Switch1, sim_mode) + .boxed() }), ), ( @@ -1043,6 +1287,22 @@ async fn setup_with_config_impl( "populate_internal_dns", Box::new(|builder| builder.populate_internal_dns().boxed()), ), + ( + "configure_sled_agent1", + Box::new(|builder| { + builder + .configure_sled_agent(SwitchLocation::Switch0) + .boxed() + }), + ), + ( + "configure_sled_agent2", + Box::new(|builder| { + builder + .configure_sled_agent(SwitchLocation::Switch1) + .boxed() + }), + ), ( "start_nexus_external", Box::new(|builder| { diff --git a/nexus/tests/integration_tests/initialization.rs b/nexus/tests/integration_tests/initialization.rs index b77a121080..a76aef832e 100644 --- a/nexus/tests/integration_tests/initialization.rs +++ b/nexus/tests/integration_tests/initialization.rs @@ -2,17 +2,13 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use std::collections::HashMap; -use std::net::{Ipv6Addr, SocketAddrV6}; - -use gateway_messages::SpPort; -use gateway_test_utils::setup as mgs_setup; use nexus_config::Database; use nexus_config::InternalDns; use nexus_test_interface::NexusServer; use nexus_test_utils::{load_test_config, ControlPlaneTestContextBuilder}; use omicron_common::address::MGS_PORT; use omicron_common::api::internal::shared::SwitchLocation; +use std::collections::HashMap; use tokio::time::sleep; use tokio::time::timeout; use tokio::time::Duration; @@ -78,19 +74,6 @@ async fn test_nexus_boots_before_cockroach() { #[tokio::test] async fn test_nexus_boots_before_dendrite() { - // Start MGS + Sim SP. This is needed for the Dendrite client initialization - // inside of Nexus initialization - let (mgs_config, sp_sim_config) = mgs_setup::load_test_config(); - let mgs_addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, MGS_PORT, 0, 0); - let mgs = mgs_setup::test_setup_with_config( - "test_nexus_boots_before_dendrite", - SpPort::One, - mgs_config, - &sp_sim_config, - Some(mgs_addr), - ) - .await; - let mut config = load_test_config(); let mut builder = @@ -101,6 +84,14 @@ async fn test_nexus_boots_before_dendrite() { let log = builder.logctx.log.new(o!("component" => "test")); + // Start MGS + Sim SP. This is needed for the Dendrite client initialization + // inside of Nexus initialization. We must use MGS_PORT here because Nexus + // hardcodes it. + info!(&log, "Starting MGS"); + builder.start_gateway(SwitchLocation::Switch0, Some(MGS_PORT)).await; + builder.start_gateway(SwitchLocation::Switch1, None).await; + info!(&log, "Started MGS"); + let populate = true; builder.start_crdb(populate).await; builder.start_internal_dns().await; @@ -150,6 +141,7 @@ async fn test_nexus_boots_before_dendrite() { info!(log, "Started mgd"); info!(log, "Populating internal DNS records"); + builder.record_switch_dns().await; builder.populate_internal_dns().await; info!(log, "Populated internal DNS records"); @@ -157,7 +149,6 @@ async fn test_nexus_boots_before_dendrite() { nexus_handle.await.expect("Test: Task starting Nexus has failed"); builder.teardown().await; - mgs.teardown().await; } // Helper to ensure we perform the same setup for the positive and negative test diff --git a/nexus/tests/integration_tests/sleds.rs b/nexus/tests/integration_tests/sleds.rs index b551cf51b5..743a76be17 100644 --- a/nexus/tests/integration_tests/sleds.rs +++ b/nexus/tests/integration_tests/sleds.rs @@ -48,9 +48,9 @@ async fn sled_instance_list( async fn test_sleds_list(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - // Verify that there is one sled to begin with. + // Verify that there are two sleds to begin with. let sleds_url = "/v1/system/hardware/sleds"; - assert_eq!(sleds_list(&client, &sleds_url).await.len(), 1); + assert_eq!(sleds_list(&client, &sleds_url).await.len(), 2); // Now start a few more sled agents. let nsleds = 3; @@ -76,7 +76,7 @@ async fn test_sleds_list(cptestctx: &ControlPlaneTestContext) { // List sleds again. let sleds_found = sleds_list(&client, &sleds_url).await; - assert_eq!(sleds_found.len(), nsleds + 1); + assert_eq!(sleds_found.len(), nsleds + 2); let sledids_found = sleds_found.iter().map(|sv| sv.identity.id).collect::>(); @@ -97,9 +97,9 @@ async fn test_physical_disk_create_list_delete( let external_client = &cptestctx.external_client; let internal_client = &cptestctx.internal_client; - // Verify that there is one sled to begin with. + // Verify that there are two sleds to begin with. let sleds_url = "/v1/system/hardware/sleds"; - assert_eq!(sleds_list(&external_client, &sleds_url).await.len(), 1); + assert_eq!(sleds_list(&external_client, &sleds_url).await.len(), 2); // The test framework may set up some disks initially. let disks_url = @@ -140,9 +140,9 @@ async fn test_physical_disk_create_list_delete( async fn test_sled_instance_list(cptestctx: &ControlPlaneTestContext) { let external_client = &cptestctx.external_client; - // Verify that there is one sled to begin with. + // Verify that there are two sleds to begin with. let sleds_url = "/v1/system/hardware/sleds"; - assert_eq!(sleds_list(&external_client, &sleds_url).await.len(), 1); + assert_eq!(sleds_list(&external_client, &sleds_url).await.len(), 2); // Verify that there are no instances. let instances_url = diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index 483b2d6aa8..0b90bef590 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -740,7 +740,7 @@ impl SledAgent { Ok(Inventory { sled_id: self.id, sled_agent_address, - sled_role: SledRole::Gimlet, + sled_role: SledRole::Scrimlet, baseboard: self.config.hardware.baseboard.clone(), usable_hardware_threads: self.config.hardware.hardware_threads, usable_physical_ram: ByteCount::try_from( From 9dc85d649f88f2cac9e1873ce9c019a5d3664e69 Mon Sep 17 00:00:00 2001 From: Andy Fiddaman Date: Sun, 10 Mar 2024 22:18:51 +0000 Subject: [PATCH 096/157] oxide/opte-interface-setup should default to disabled (#5230) The `oxide/opte-interface-setup` service is present in all NTP zones, but only the boundary zones require an OPTE interface. sled-agent explicitly enables the service for just the boundary zones, but because the service is enabled by default it ends up attempting to start in the non-boundaries too, where it fails. ``` [ Mar 8 12:56:26 Executing start method ("/opt/oxide/zone-network-cli/bin/zone-networking opte-interface-set-up -i unknown -g unknown -p unknown"). ] note: configured to log to "/dev/stderr" error: invalid value 'unknown' for '--opte_interface ': ERROR: Missing OPTE interface For more information, try '--help'. [ Mar 8 12:56:26 Method "start" exited with status 2. ] ``` --- smf/opte-interface-setup/manifest.xml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/smf/opte-interface-setup/manifest.xml b/smf/opte-interface-setup/manifest.xml index 5b886c8a71..68641f56fc 100644 --- a/smf/opte-interface-setup/manifest.xml +++ b/smf/opte-interface-setup/manifest.xml @@ -4,7 +4,7 @@ - + - + @@ -35,7 +35,7 @@