diff --git a/Cargo.lock b/Cargo.lock index 65ac0fd91c..9021187822 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4464,6 +4464,7 @@ dependencies = [ "futures", "http", "ipnetwork", + "itertools 0.13.0", "libc", "macaddr", "mockall", @@ -10805,6 +10806,7 @@ dependencies = [ "slog", "thiserror 1.0.69", "tokio", + "tokio-stream", "uuid", ] diff --git a/illumos-utils/Cargo.toml b/illumos-utils/Cargo.toml index e1421bd3ab..69276713bb 100644 --- a/illumos-utils/Cargo.toml +++ b/illumos-utils/Cargo.toml @@ -21,6 +21,7 @@ dropshot.workspace = true futures.workspace = true http.workspace = true ipnetwork.workspace = true +itertools.workspace = true libc.workspace = true macaddr.workspace = true omicron-common.workspace = true diff --git a/illumos-utils/src/zfs.rs b/illumos-utils/src/zfs.rs index f9edb8de86..ee1ac58be9 100644 --- a/illumos-utils/src/zfs.rs +++ b/illumos-utils/src/zfs.rs @@ -9,9 +9,11 @@ use anyhow::anyhow; use anyhow::bail; use anyhow::Context; use camino::{Utf8Path, Utf8PathBuf}; +use itertools::Itertools; use omicron_common::api::external::ByteCount; use omicron_common::disk::CompressionAlgorithm; use omicron_common::disk::DiskIdentity; +use omicron_common::disk::SharedDatasetConfig; use omicron_uuid_kinds::DatasetUuid; use std::collections::BTreeMap; use std::fmt; @@ -82,25 +84,19 @@ enum EnsureFilesystemErrorRaw { /// Error returned by [`Zfs::ensure_filesystem`]. #[derive(thiserror::Error, Debug)] -#[error( - "Failed to ensure filesystem '{name}' exists at '{mountpoint:?}': {err}" -)] +#[error("Failed to ensure filesystem '{name}': {err}")] pub struct EnsureFilesystemError { name: String, - mountpoint: Mountpoint, #[source] err: EnsureFilesystemErrorRaw, } /// Error returned by [`Zfs::set_oxide_value`] #[derive(thiserror::Error, Debug)] -#[error( - "Failed to set value '{name}={value}' on filesystem {filesystem}: {err}" -)] +#[error("Failed to set values '{values}' on filesystem {filesystem}: {err}")] pub struct SetValueError { filesystem: String, - name: String, - value: String, + values: String, err: crate::ExecutionError, } @@ -242,11 +238,27 @@ impl DatasetProperties { "oxide:uuid,name,avail,used,quota,reservation,compression"; } +impl TryFrom<&DatasetProperties> for SharedDatasetConfig { + type Error = anyhow::Error; + + fn try_from( + props: &DatasetProperties, + ) -> Result { + Ok(SharedDatasetConfig { + compression: props.compression.parse()?, + quota: props.quota, + reservation: props.reservation, + }) + } +} + impl DatasetProperties { /// Parses dataset properties, assuming that the caller is providing the /// output of the following command as stdout: /// - /// zfs get -rpo name,property,value,source $ZFS_GET_PROPS $DATASETS + /// zfs get \ + /// [maybe depth arguments] \ + /// -Hpo name,property,value,source $ZFS_GET_PROPS $DATASETS fn parse_many( stdout: &str, ) -> Result, anyhow::Error> { @@ -307,14 +319,16 @@ impl DatasetProperties { .parse::() .context("Failed to parse 'used'")? .try_into()?; + + // The values of "quota" and "reservation" can be either "-" or + // "0" when they are not actually set. To be cautious, we treat + // both of these values as "the value has not been set + // explicitly". As a result, setting either of these values + // explicitly to zero is indistinguishable from setting them + // with a value of "none". let quota = props .get("quota") - .filter(|(_prop, source)| { - // If a quota has not been set explicitly, it has a default - // source and a value of "zero". Rather than parsing the value - // as zero, it should be ignored. - *source != "default" - }) + .filter(|(prop, _source)| *prop != "-" && *prop != "0") .map(|(prop, _source)| { prop.parse::().context("Failed to parse 'quota'") }) @@ -322,12 +336,7 @@ impl DatasetProperties { .and_then(|v| ByteCount::try_from(v).ok()); let reservation = props .get("reservation") - .filter(|(_prop, source)| { - // If a reservation has not been set explicitly, it has a default - // source and a value of "zero". Rather than parsing the value - // as zero, it should be ignored. - *source != "default" - }) + .filter(|(prop, _source)| *prop != "-" && *prop != "0") .map(|(prop, _source)| { prop.parse::() .context("Failed to parse 'reservation'") @@ -375,7 +384,40 @@ impl fmt::Display for PropertySource { } } -#[cfg_attr(any(test, feature = "testing"), mockall::automock, allow(dead_code))] +#[derive(Copy, Clone, Debug)] +pub enum WhichDatasets { + SelfOnly, + SelfAndChildren, +} + +fn build_zfs_set_key_value_pairs( + size_details: Option, + dataset_id: Option, +) -> Vec<(&'static str, String)> { + let mut props = Vec::new(); + if let Some(SizeDetails { quota, reservation, compression }) = size_details + { + let quota = quota + .map(|q| q.to_bytes().to_string()) + .unwrap_or_else(|| String::from("none")); + props.push(("quota", quota)); + + let reservation = reservation + .map(|r| r.to_bytes().to_string()) + .unwrap_or_else(|| String::from("none")); + props.push(("reservation", reservation)); + + let compression = compression.to_string(); + props.push(("compression", compression)); + } + + if let Some(id) = dataset_id { + props.push(("oxide:uuid", id.to_string())); + } + + props +} + impl Zfs { /// Lists all datasets within a pool or existing dataset. /// @@ -399,7 +441,9 @@ impl Zfs { } /// Get information about datasets within a list of zpools / datasets. - /// Returns properties for all input datasets and their direct children. + /// Returns properties for all input datasets, and optionally, for + /// their children (depending on the value of [WhichDatasets] is provided + /// as input). /// /// This function is similar to [Zfs::list_datasets], but provides a more /// substantial results about the datasets found. @@ -407,22 +451,30 @@ impl Zfs { /// Sorts results and de-duplicates them by name. pub fn get_dataset_properties( datasets: &[String], + which: WhichDatasets, ) -> Result, anyhow::Error> { let mut command = std::process::Command::new(ZFS); - let cmd = command.args(&[ - "get", - "-d", - "1", - "-Hpo", - "name,property,value,source", - ]); + let cmd = command.arg("get"); + match which { + WhichDatasets::SelfOnly => (), + WhichDatasets::SelfAndChildren => { + cmd.args(&["-d", "1"]); + } + } + cmd.args(&["-Hpo", "name,property,value,source"]); // Note: this is tightly coupled with the layout of DatasetProperties cmd.arg(DatasetProperties::ZFS_GET_PROPS); cmd.args(datasets); - let output = execute(cmd).with_context(|| { - format!("Failed to get dataset properties for {datasets:?}") + // We are intentionally ignoring the output status of this command. + // + // If one or more dataset doesn't exist, we can still read stdout to + // see about the ones that do exist. + let output = cmd.output().map_err(|err| { + anyhow!( + "Failed to get dataset properties for {datasets:?}: {err:?}" + ) })?; let stdout = String::from_utf8(output.stdout)?; @@ -490,23 +542,19 @@ impl Zfs { do_format: bool, encryption_details: Option, size_details: Option, + id: Option, additional_options: Option>, ) -> Result<(), EnsureFilesystemError> { let (exists, mounted) = Self::dataset_exists(name, &mountpoint)?; + + let props = build_zfs_set_key_value_pairs(size_details, id); if exists { - if let Some(SizeDetails { quota, reservation, compression }) = - size_details - { - // apply quota and compression mode (in case they've changed across - // sled-agent versions since creation) - Self::apply_properties( - name, - &mountpoint, - quota, - reservation, - compression, - )?; - } + Self::set_values(name, props.as_slice()).map_err(|err| { + EnsureFilesystemError { + name: name.to_string(), + err: err.err.into(), + } + })?; if encryption_details.is_none() { // If the dataset exists, we're done. Unencrypted datasets are @@ -518,14 +566,13 @@ impl Zfs { return Ok(()); } // We need to load the encryption key and mount the filesystem - return Self::mount_encrypted_dataset(name, &mountpoint); + return Self::mount_encrypted_dataset(name); } } if !do_format { return Err(EnsureFilesystemError { name: name.to_string(), - mountpoint, err: EnsureFilesystemErrorRaw::NotFoundNotFormatted, }); } @@ -561,7 +608,6 @@ impl Zfs { execute(cmd).map_err(|err| EnsureFilesystemError { name: name.to_string(), - mountpoint: mountpoint.clone(), err: err.into(), })?; @@ -574,82 +620,27 @@ impl Zfs { let cmd = command.args(["chown", "-R", &user, &mount]); execute(cmd).map_err(|err| EnsureFilesystemError { name: name.to_string(), - mountpoint: mountpoint.clone(), err: err.into(), })?; } - if let Some(SizeDetails { quota, reservation, compression }) = - size_details - { - // Apply any quota and compression mode. - Self::apply_properties( - name, - &mountpoint, - quota, - reservation, - compression, - )?; - } - - Ok(()) - } - - /// Applies the following properties to the filesystem. - /// - /// If any of the options are not supplied, a default "none" or "off" - /// value is supplied. - fn apply_properties( - name: &str, - mountpoint: &Mountpoint, - quota: Option, - reservation: Option, - compression: CompressionAlgorithm, - ) -> Result<(), EnsureFilesystemError> { - let quota = quota - .map(|q| q.to_bytes().to_string()) - .unwrap_or_else(|| String::from("none")); - let reservation = reservation - .map(|r| r.to_bytes().to_string()) - .unwrap_or_else(|| String::from("none")); - let compression = compression.to_string(); - - if let Err(err) = Self::set_value(name, "quota", "a) { - return Err(EnsureFilesystemError { - name: name.to_string(), - mountpoint: mountpoint.clone(), - // Take the execution error from the SetValueError - err: err.err.into(), - }); - } - if let Err(err) = Self::set_value(name, "reservation", &reservation) { - return Err(EnsureFilesystemError { - name: name.to_string(), - mountpoint: mountpoint.clone(), - // Take the execution error from the SetValueError - err: err.err.into(), - }); - } - if let Err(err) = Self::set_value(name, "compression", &compression) { - return Err(EnsureFilesystemError { + Self::set_values(name, props.as_slice()).map_err(|err| { + EnsureFilesystemError { name: name.to_string(), - mountpoint: mountpoint.clone(), - // Take the execution error from the SetValueError err: err.err.into(), - }); - } + } + })?; + Ok(()) } fn mount_encrypted_dataset( name: &str, - mountpoint: &Mountpoint, ) -> Result<(), EnsureFilesystemError> { let mut command = std::process::Command::new(PFEXEC); let cmd = command.args(&[ZFS, "mount", "-l", name]); execute(cmd).map_err(|err| EnsureFilesystemError { name: name.to_string(), - mountpoint: mountpoint.clone(), err: EnsureFilesystemErrorRaw::MountEncryptedFsFailed(err), })?; Ok(()) @@ -657,13 +648,11 @@ impl Zfs { pub fn mount_overlay_dataset( name: &str, - mountpoint: &Mountpoint, ) -> Result<(), EnsureFilesystemError> { let mut command = std::process::Command::new(PFEXEC); let cmd = command.args(&[ZFS, "mount", "-O", name]); execute(cmd).map_err(|err| EnsureFilesystemError { name: name.to_string(), - mountpoint: mountpoint.clone(), err: EnsureFilesystemErrorRaw::MountOverlayFsFailed(err), })?; Ok(()) @@ -689,7 +678,6 @@ impl Zfs { if &values[..3] != &[name, "filesystem", &mountpoint.to_string()] { return Err(EnsureFilesystemError { name: name.to_string(), - mountpoint: mountpoint.clone(), err: EnsureFilesystemErrorRaw::Output(stdout.to_string()), }); } @@ -714,13 +702,29 @@ impl Zfs { name: &str, value: &str, ) -> Result<(), SetValueError> { + Self::set_values(filesystem_name, &[(name, value)]) + } + + fn set_values( + filesystem_name: &str, + name_values: &[(K, V)], + ) -> Result<(), SetValueError> { + if name_values.is_empty() { + return Ok(()); + } + let mut command = std::process::Command::new(PFEXEC); - let value_arg = format!("{}={}", name, value); - let cmd = command.args(&[ZFS, "set", &value_arg, filesystem_name]); + let cmd = command.args(&[ZFS, "set"]); + for (name, value) in name_values { + cmd.arg(format!("{name}={value}")); + } + cmd.arg(filesystem_name); execute(cmd).map_err(|err| SetValueError { filesystem: filesystem_name.to_string(), - name: name.to_string(), - value: value.to_string(), + values: name_values + .iter() + .map(|(k, v)| format!("{k}={v}")) + .join(","), err, })?; Ok(()) @@ -808,10 +812,7 @@ impl Zfs { err, }) } -} -// These methods don't work with mockall, so they exist in a separate impl block -impl Zfs { /// Calls "zfs get" to acquire multiple values /// /// - `names`: The properties being acquired diff --git a/sled-agent/src/backing_fs.rs b/sled-agent/src/backing_fs.rs index 265c0152e2..d24f85ad80 100644 --- a/sled-agent/src/backing_fs.rs +++ b/sled-agent/src/backing_fs.rs @@ -150,6 +150,7 @@ pub(crate) fn ensure_backing_fs( true, // do_format None, // encryption_details, size_details, + None, Some(vec!["canmount=noauto".to_string()]), // options )?; @@ -180,7 +181,7 @@ pub(crate) fn ensure_backing_fs( info!(log, "Mounting {} on {}", dataset, mountpoint); - Zfs::mount_overlay_dataset(&dataset, &mountpoint)?; + Zfs::mount_overlay_dataset(&dataset)?; if let Some(subdirs) = bfs.subdirs { for dir in subdirs { diff --git a/sled-agent/src/bootstrap/pre_server.rs b/sled-agent/src/bootstrap/pre_server.rs index 5b89506242..19a63cb71d 100644 --- a/sled-agent/src/bootstrap/pre_server.rs +++ b/sled-agent/src/bootstrap/pre_server.rs @@ -293,6 +293,7 @@ fn ensure_zfs_ramdisk_dataset() -> Result<(), StartError> { encryption_details, quota, None, + None, ) .map_err(StartError::EnsureZfsRamdiskDataset) } diff --git a/sled-storage/Cargo.toml b/sled-storage/Cargo.toml index 2439c52aa7..d47a89e0ae 100644 --- a/sled-storage/Cargo.toml +++ b/sled-storage/Cargo.toml @@ -28,6 +28,7 @@ sled-hardware.workspace = true slog.workspace = true thiserror.workspace = true tokio.workspace = true +tokio-stream.workspace = true uuid.workspace = true omicron-workspace-hack.workspace = true diff --git a/sled-storage/src/dataset.rs b/sled-storage/src/dataset.rs index e90a4c0092..717595ad9e 100644 --- a/sled-storage/src/dataset.rs +++ b/sled-storage/src/dataset.rs @@ -269,6 +269,7 @@ pub(crate) async fn ensure_zpool_has_datasets( Some(encryption_details), None, None, + None, ); keyfile.zero_and_unlink().await.map_err(|error| { @@ -331,6 +332,7 @@ pub(crate) async fn ensure_zpool_has_datasets( encryption_details, size_details, None, + None, )?; if dataset.wipe { diff --git a/sled-storage/src/error.rs b/sled-storage/src/error.rs index 351dd7f353..f00e35e654 100644 --- a/sled-storage/src/error.rs +++ b/sled-storage/src/error.rs @@ -77,6 +77,9 @@ pub enum Error { #[error("Not ready to manage U.2s (key manager is not ready)")] KeyManagerNotReady, + #[error("Physical disk configuration changed for the same generation number: {generation}")] + PhysicalDiskConfigurationChanged { generation: Generation }, + #[error("Physical disk configuration out-of-date (asked for {requested}, but latest is {current})")] PhysicalDiskConfigurationOutdated { requested: Generation, diff --git a/sled-storage/src/manager.rs b/sled-storage/src/manager.rs index a760285d3f..b1832ca92b 100644 --- a/sled-storage/src/manager.rs +++ b/sled-storage/src/manager.rs @@ -14,7 +14,9 @@ use camino::Utf8Path; use camino::Utf8PathBuf; use debug_ignore::DebugIgnore; use futures::future::FutureExt; -use illumos_utils::zfs::{DatasetProperties, Mountpoint, Zfs}; +use futures::Stream; +use futures::StreamExt; +use illumos_utils::zfs::{DatasetProperties, Mountpoint, WhichDatasets, Zfs}; use illumos_utils::zpool::{ZpoolName, ZPOOL_MOUNTPOINT_ROOT}; use key_manager::StorageKeyRequester; use omicron_common::disk::{ @@ -26,6 +28,7 @@ use omicron_common::ledger::Ledger; use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use slog::{error, info, o, warn, Logger}; +use std::collections::BTreeMap; use std::collections::HashSet; use std::future::Future; use tokio::sync::{mpsc, oneshot, watch}; @@ -929,21 +932,107 @@ impl StorageManager { // includes details about all possible errors that may occur on // a per-dataset granularity. async fn datasets_ensure_internal( - &mut self, + &self, log: &Logger, config: &DatasetsConfig, ) -> DatasetsManagementResult { - let mut status = vec![]; - for dataset in config.datasets.values() { - status.push(self.dataset_ensure_internal(log, dataset).await); - } + // Gather properties about these datasets, if they exist. + // + // This pre-fetching lets us avoid individually querying them later. + let old_datasets = Zfs::get_dataset_properties( + config + .datasets + .values() + .map(|config| config.name.full_name()) + .collect::>() + .as_slice(), + WhichDatasets::SelfOnly, + ) + .unwrap_or_default() + .into_iter() + .map(|props| (props.name.clone(), props)) + .collect::>(); + + let futures = config.datasets.values().map(|dataset| async { + self.dataset_ensure_internal( + log, + dataset, + old_datasets.get(&dataset.name.full_name()), + ) + .await + }); + + // This "Box::pin" is a workaround for: https://github.com/rust-lang/rust/issues/64552 + // + // Ideally, we would just use: + // + // ``` + // let status: Vec<_> = futures::stream::iter(futures) + // .buffered(...) + // .collect() + // .await; + // ``` + const DATASET_ENSURE_CONCURRENCY_LIMIT: usize = 16; + let results: std::pin::Pin + Send>> = Box::pin( + futures::stream::iter(futures) + .buffered(DATASET_ENSURE_CONCURRENCY_LIMIT), + ); + + let status: Vec = results.collect().await; + DatasetsManagementResult { status } } + fn should_skip_dataset_ensure( + log: &Logger, + config: &DatasetConfig, + old_dataset: Option<&DatasetProperties>, + ) -> Result { + let Some(old_dataset) = old_dataset else { + info!(log, "This dataset did not exist"); + return Ok(false); + }; + + let Some(old_id) = old_dataset.id else { + info!(log, "Old properties missing UUID"); + return Ok(false); + }; + + if old_id != config.id { + return Err(Error::UuidMismatch { + name: config.name.full_name(), + old: old_id.into_untyped_uuid(), + new: config.id.into_untyped_uuid(), + }); + } + + let old_props = match SharedDatasetConfig::try_from(old_dataset) { + Ok(old_props) => old_props, + Err(err) => { + warn!(log, "Failed to parse old properties"; "err" => #%err); + return Ok(false); + } + }; + + info!(log, "Parsed old dataset properties"; "props" => ?old_props); + if old_props != config.inner { + info!( + log, + "Dataset properties changed"; + "old_props" => ?old_props, + "requested_props" => ?config.inner, + ); + return Ok(false); + } + info!(log, "No changes necessary, returning early"); + return Ok(true); + } + async fn dataset_ensure_internal( - &mut self, + &self, log: &Logger, config: &DatasetConfig, + old_dataset: Option<&DatasetProperties>, ) -> DatasetManagementStatus { let log = log.new(o!("name" => config.name.full_name())); info!(log, "Ensuring dataset"); @@ -952,6 +1041,15 @@ impl StorageManager { err: None, }; + match Self::should_skip_dataset_ensure(&log, config, old_dataset) { + Ok(true) => return status, + Ok(false) => (), + Err(err) => { + status.err = Some(err.to_string()); + return status; + } + } + let mountpoint_root = &self.resources.disks().mount_config().root; let mountpoint_path = config.name.mountpoint(mountpoint_root); let details = DatasetCreationDetails { @@ -961,9 +1059,9 @@ impl StorageManager { }; if let Err(err) = self - .ensure_dataset_with_id( + .ensure_dataset( config.name.pool(), - config.id, + Some(config.id), &config.inner, &details, ) @@ -1017,8 +1115,11 @@ impl StorageManager { ]; info!(log, "Listing datasets within zpool"; "zpool" => zpool.to_string()); - Zfs::get_dataset_properties(datasets_of_interest.as_slice()) - .map_err(Error::Other) + Zfs::get_dataset_properties( + datasets_of_interest.as_slice(), + WhichDatasets::SelfAndChildren, + ) + .map_err(Error::Other) } // Ensures that a dataset exists, nested somewhere arbitrary within @@ -1039,8 +1140,13 @@ impl StorageManager { full_name: config.name.full_name(), }; - self.ensure_dataset(config.name.root.pool(), &config.inner, &details) - .await?; + self.ensure_dataset( + config.name.root.pool(), + None, + &config.inner, + &details, + ) + .await?; Ok(()) } @@ -1073,15 +1179,18 @@ impl StorageManager { info!(log, "Listing nested datasets"); let full_name = name.full_name(); - let properties = - Zfs::get_dataset_properties(&[full_name]).map_err(|e| { - warn!( - log, - "Failed to access nested dataset"; - "name" => ?name - ); - crate::dataset::DatasetError::Other(e) - })?; + let properties = Zfs::get_dataset_properties( + &[full_name], + WhichDatasets::SelfAndChildren, + ) + .map_err(|e| { + warn!( + log, + "Failed to access nested dataset"; + "name" => ?name + ); + crate::dataset::DatasetError::Other(e) + })?; let root_path = name.root.full_name(); Ok(properties @@ -1159,12 +1268,29 @@ impl StorageManager { requested: config.generation, current: ledger_data.generation, }); - } - - // TODO: If the generation is equal, check that the values are - // also equal. + } else if config.generation == ledger_data.generation { + info!( + log, + "Requested generation number matches prior request", + ); - info!(log, "Request looks newer than prior requests"); + if ledger_data != &config { + error!( + log, + "Requested configuration changed (with the same generation)"; + "generation" => ?config.generation + ); + return Err(Error::PhysicalDiskConfigurationChanged { + generation: config.generation, + }); + } + info!( + log, + "Request looks identical to last request, re-sending" + ); + } else { + info!(log, "Request looks newer than prior requests"); + } ledger } None => { @@ -1291,40 +1417,13 @@ impl StorageManager { } } - // Invokes [Self::ensure_dataset] and also ensures the dataset has an - // expected UUID as a ZFS property. - async fn ensure_dataset_with_id( - &mut self, - zpool: &ZpoolName, - id: DatasetUuid, - config: &SharedDatasetConfig, - details: &DatasetCreationDetails, - ) -> Result<(), Error> { - self.ensure_dataset(zpool, config, details).await?; - - // Ensure the dataset has a usable UUID. - if let Ok(id_str) = Zfs::get_oxide_value(&details.full_name, "uuid") { - if let Ok(found_id) = id_str.parse::() { - if found_id != id { - return Err(Error::UuidMismatch { - name: details.full_name.clone(), - old: found_id.into_untyped_uuid(), - new: id.into_untyped_uuid(), - }); - } - return Ok(()); - } - } - Zfs::set_oxide_value(&details.full_name, "uuid", &id.to_string())?; - Ok(()) - } - // Ensures a dataset exists within a zpool. // // Confirms that the zpool exists and is managed by this sled. async fn ensure_dataset( - &mut self, + &self, zpool: &ZpoolName, + dataset_id: Option, config: &SharedDatasetConfig, details: &DatasetCreationDetails, ) -> Result<(), Error> { @@ -1365,6 +1464,7 @@ impl StorageManager { do_format, encryption_details, size_details, + dataset_id, None, )?; @@ -1401,6 +1501,7 @@ impl StorageManager { do_format, encryption_details, size_details, + Some(DatasetUuid::from_untyped_uuid(request.dataset_id)), None, )?; // Ensure the dataset has a usable UUID. @@ -1725,7 +1826,7 @@ mod tests { ); let mut harness = StorageManagerTestHarness::new(&logctx.log).await; - // Queue up a disks, as we haven't told the `StorageManager` that + // Queue up a disk, as we haven't told the `StorageManager` that // the `KeyManager` is ready yet. let raw_disks = harness.add_vdevs(&["u2_under_test.vdev", "m2_helping.vdev"]).await; @@ -1996,6 +2097,111 @@ mod tests { logctx.cleanup_successful(); } + #[tokio::test] + async fn ensure_many_datasets() { + illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); + let logctx = test_setup_log("ensure_many_datasets"); + let mut harness = StorageManagerTestHarness::new(&logctx.log).await; + + // Test setup: Add U.2s and an M.2, adopt them into the "control plane" + // for usage. + harness.handle().key_manager_ready().await; + let raw_disks = harness + .add_vdevs(&[ + "u2_0.vdev", + "u2_1.vdev", + "u2_2.vdev", + "u2_3.vdev", + "u2_4.vdev", + "u2_5.vdev", + "u2_6.vdev", + "u2_7.vdev", + "u2_8.vdev", + "u2_9.vdev", + "m2_helping.vdev", + ]) + .await; + let config = harness.make_config(1, &raw_disks); + + let result = harness + .handle() + .omicron_physical_disks_ensure(config.clone()) + .await + .expect("Ensuring disks should work after key manager is ready"); + assert!(!result.has_error(), "{:?}", result); + + let result = harness + .handle() + .omicron_physical_disks_ensure(config.clone()) + .await + .expect("Ensuring disks should work after key manager is ready"); + assert!(!result.has_error(), "{:?}", result); + + // Create datasets on the newly formatted U.2s + let mut datasets = BTreeMap::new(); + for i in 0..10 { + let zpool_name = ZpoolName::new_external(config.disks[i].pool_id); + + let id = DatasetUuid::new_v4(); + let name = + DatasetName::new(zpool_name.clone(), DatasetKind::Crucible); + datasets.insert( + id, + DatasetConfig { + id, + name, + inner: SharedDatasetConfig::default(), + }, + ); + + let id = DatasetUuid::new_v4(); + let name = DatasetName::new(zpool_name.clone(), DatasetKind::Debug); + datasets.insert( + id, + DatasetConfig { + id, + name, + inner: SharedDatasetConfig::default(), + }, + ); + + let id = DatasetUuid::new_v4(); + let name = DatasetName::new( + zpool_name.clone(), + DatasetKind::TransientZoneRoot, + ); + datasets.insert( + id, + DatasetConfig { + id, + name, + inner: SharedDatasetConfig::default(), + }, + ); + } + // "Generation = 1" is reserved as "no requests seen yet", so we jump + // past it. + let generation = Generation::new().next(); + let config = DatasetsConfig { generation, datasets }; + + let status = + harness.handle().datasets_ensure(config.clone()).await.unwrap(); + assert!(!status.has_error()); + + // List datasets, expect to see what we just created + let observed_config = + harness.handle().datasets_config_list().await.unwrap(); + assert_eq!(config, observed_config); + + // Calling "datasets_ensure" with the same input should succeed. + let status = + harness.handle().datasets_ensure(config.clone()).await.unwrap(); + assert!(!status.has_error()); + + harness.cleanup().await; + logctx.cleanup_successful(); + } + #[tokio::test] async fn nested_dataset() { illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst);