diff --git a/common/src/ledger.rs b/common/src/ledger.rs index ccd08661dd..18e7584262 100644 --- a/common/src/ledger.rs +++ b/common/src/ledger.rs @@ -135,7 +135,7 @@ impl Ledger { } if !one_successful_write { - warn!(self.log, "No successful writes to ledger"); + error!(self.log, "No successful writes to ledger"); return Err(Error::FailedToWrite { failed_paths }); } Ok(()) diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 8f7d4e8c7b..b33a063e82 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -87,7 +87,7 @@ impl super::Nexus { Ok(db_rack) } - /// Marks the rack as initialized with a set of services. + /// Marks the rack as initialized with information supplied by RSS. /// /// This function is a no-op if the rack has already been initialized. pub(crate) async fn rack_initialize( diff --git a/nexus/test-interface/src/lib.rs b/nexus/test-interface/src/lib.rs index 67b60d4bdd..54478c0876 100644 --- a/nexus/test-interface/src/lib.rs +++ b/nexus/test-interface/src/lib.rs @@ -80,6 +80,10 @@ pub trait NexusServer: Send + Sync + 'static { // control over dataset provisioning is shifting to Nexus. There is // a short window where RSS controls dataset provisioning, but afterwards, // Nexus should be calling the shots on "when to provision datasets". + // Furthermore, with https://github.com/oxidecomputer/omicron/pull/5172, + // physical disk and zpool provisioning has already moved into Nexus. This + // provides a "back-door" for tests to control the set of control plane + // disks that are considered active. // // For test purposes, we have many situations where we want to carve up // zpools and datasets precisely for disk-based tests. As a result, we diff --git a/nexus/types/src/internal_api/params.rs b/nexus/types/src/internal_api/params.rs index d74811e60b..a811106c2c 100644 --- a/nexus/types/src/internal_api/params.rs +++ b/nexus/types/src/internal_api/params.rs @@ -239,7 +239,7 @@ pub struct RackInitializationRequest { /// "Managed" physical disks owned by the control plane pub physical_disks: Vec, - /// Zpools created withing the physical disks created by the control plane. + /// Zpools created within the physical disks created by the control plane. pub zpools: Vec, /// Datasets on the rack which have been provisioned by RSS. diff --git a/sled-agent/src/bootstrap/bootstore_setup.rs b/sled-agent/src/bootstrap/bootstore_setup.rs index 9e3a2f7b12..ee9a321474 100644 --- a/sled-agent/src/bootstrap/bootstore_setup.rs +++ b/sled-agent/src/bootstrap/bootstore_setup.rs @@ -26,7 +26,7 @@ const BOOTSTORE_FSM_STATE_FILE: &str = "bootstore-fsm-state.json"; const BOOTSTORE_NETWORK_CONFIG_FILE: &str = "bootstore-network-config.json"; pub fn new_bootstore_config( - iter_all: &AllDisks, + all_disks: &AllDisks, baseboard: Baseboard, global_zone_bootstrap_ip: Ipv6Addr, ) -> Result { @@ -37,15 +37,17 @@ pub fn new_bootstore_config( learn_timeout: Duration::from_secs(5), rack_init_timeout: Duration::from_secs(300), rack_secret_request_timeout: Duration::from_secs(5), - fsm_state_ledger_paths: bootstore_fsm_state_paths(&iter_all)?, - network_config_ledger_paths: bootstore_network_config_paths(&iter_all)?, + fsm_state_ledger_paths: bootstore_fsm_state_paths(&all_disks)?, + network_config_ledger_paths: bootstore_network_config_paths( + &all_disks, + )?, }) } fn bootstore_fsm_state_paths( - iter_all: &AllDisks, + all_disks: &AllDisks, ) -> Result, StartError> { - let paths: Vec<_> = iter_all + let paths: Vec<_> = all_disks .all_m2_mountpoints(CLUSTER_DATASET) .into_iter() .map(|p| p.join(BOOTSTORE_FSM_STATE_FILE)) @@ -58,9 +60,9 @@ fn bootstore_fsm_state_paths( } fn bootstore_network_config_paths( - iter_all: &AllDisks, + all_disks: &AllDisks, ) -> Result, StartError> { - let paths: Vec<_> = iter_all + let paths: Vec<_> = all_disks .all_m2_mountpoints(CLUSTER_DATASET) .into_iter() .map(|p| p.join(BOOTSTORE_NETWORK_CONFIG_FILE)) diff --git a/sled-agent/src/hardware_monitor.rs b/sled-agent/src/hardware_monitor.rs index 2b59b1df04..455a827e98 100644 --- a/sled-agent/src/hardware_monitor.rs +++ b/sled-agent/src/hardware_monitor.rs @@ -182,7 +182,7 @@ impl HardwareMonitor { // // Here and below, we're "dropping a future" rather than // awaiting it. That's intentional - the hardware monitor - // don't care when this work is finished, just when it's + // doesn't care when this work is finished, just when it's // enqueued. #[allow(clippy::let_underscore_future)] let _ = self diff --git a/sled-agent/src/long_running_tasks.rs b/sled-agent/src/long_running_tasks.rs index 8caf175beb..9b0ea7ac6c 100644 --- a/sled-agent/src/long_running_tasks.rs +++ b/sled-agent/src/long_running_tasks.rs @@ -229,7 +229,7 @@ async fn upsert_synthetic_disks_if_needed( "Upserting synthetic device to Storage Manager"; "vdev" => vdev.to_string(), ); - let disk = RawSyntheticDisk::new(vdev, i.try_into().unwrap()) + let disk = RawSyntheticDisk::load(vdev, i.try_into().unwrap()) .expect("Failed to parse synthetic disk") .into(); storage_manager.detected_raw_disk(disk).await.await.unwrap(); diff --git a/sled-storage/src/disk.rs b/sled-storage/src/disk.rs index 85ec758591..7383475cb9 100644 --- a/sled-storage/src/disk.rs +++ b/sled-storage/src/disk.rs @@ -134,7 +134,7 @@ impl SyntheticDisk { } // A synthetic disk that acts as one "found" by the hardware and that is backed -// by a zpool +// by a vdev. #[derive(Debug, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] pub struct RawSyntheticDisk { pub path: Utf8PathBuf, @@ -153,10 +153,12 @@ impl RawSyntheticDisk { ) -> Result { let file = std::fs::File::create(vdev.as_ref())?; file.set_len(length)?; - Self::new(vdev, slot) + Self::load(vdev, slot) } - pub fn new>( + /// Treats a file at path `vdev` as a synthetic disk. The file + /// should already exist, and have the desired length. + pub fn load>( vdev: P, slot: i64, ) -> Result { diff --git a/sled-storage/src/resources.rs b/sled-storage/src/resources.rs index 67211dca87..63dd65e5db 100644 --- a/sled-storage/src/resources.rs +++ b/sled-storage/src/resources.rs @@ -83,7 +83,7 @@ impl DisksManagementResult { return true; } } - return false; + false } pub fn has_retryable_error(&self) -> bool { @@ -94,7 +94,7 @@ impl DisksManagementResult { } } } - return false; + false } } @@ -105,7 +105,7 @@ impl DisksManagementResult { pub enum ManagedDisk { // A disk explicitly managed by the control plane. // - // This include U.2s which Nexus has told us to format and use. + // This includes U.2s which Nexus has told us to format and use. ExplicitlyManaged(Disk), // A disk implicitly managed by the control plane. @@ -344,7 +344,7 @@ impl StorageResources { // other modifications to the underlying storage. for (identity, managed_disk) in &mut *disks { match managed_disk { - // This leaves the prescence of the disk still in "Self", but + // This leaves the presence of the disk still in "Self", but // downgrades the disk to an unmanaged status. ManagedDisk::ExplicitlyManaged(disk) => { if self.control_plane_disks.get(identity).is_none() {