Skip to content

Commit

Permalink
address comments
Browse files Browse the repository at this point in the history
Created using spr 1.3.6-beta.1
  • Loading branch information
sunshowers committed Dec 19, 2024
2 parents 3ee6676 + 510ef5a commit 9661c95
Show file tree
Hide file tree
Showing 5 changed files with 128 additions and 54 deletions.
4 changes: 2 additions & 2 deletions package/src/bin/omicron-package.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,8 @@ async fn do_show_cargo_commands(config: &Config) -> Result<()> {
let cargo_plan =
build_cargo_plan(&metadata, config.packages_to_build(), &features)?;

let release_command = cargo_plan.release.build_command("build", true);
let debug_command = cargo_plan.debug.build_command("build", true);
let release_command = cargo_plan.release.build_command("build");
let debug_command = cargo_plan.debug.build_command("build");

print!("release command: ");
if let Some(command) = release_command {
Expand Down
48 changes: 29 additions & 19 deletions package/src/cargo_plan.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@ pub fn build_cargo_plan<'a>(
})
.collect::<BTreeMap<_, _>>();

let mut release = CargoTargets::default();
let mut debug = CargoTargets::default();
let mut release = CargoTargets::new(BuildKind::Release);
let mut debug = CargoTargets::new(BuildKind::Debug);

for (name, pkg) in package_map.0 {
// If this is a Rust package, `name` (the map key) is the name of the
Expand Down Expand Up @@ -82,26 +82,32 @@ pub struct CargoPlan<'a> {

impl CargoPlan<'_> {
pub async fn run(&self, command: &str, log: &Logger) -> Result<()> {
self.release.run(command, true, log).await?;
self.debug.run(command, false, log).await?;
self.release.run(command, log).await?;
self.debug.run(command, log).await?;
Ok(())
}
}

/// A set of packages, binaries, and features to operate on.
#[derive(Debug, Default)]
#[derive(Debug)]
pub struct CargoTargets<'a> {
pub kind: BuildKind,
pub packages: BTreeSet<&'a String>,
pub bins: BTreeSet<&'a String>,
pub features: BTreeSet<&'a String>,
}

impl CargoTargets<'_> {
pub fn build_command(
&self,
command: &str,
release: bool,
) -> Option<Command> {
fn new(kind: BuildKind) -> Self {
Self {
kind,
packages: BTreeSet::new(),
bins: BTreeSet::new(),
features: BTreeSet::new(),
}
}

pub fn build_command(&self, command: &str) -> Option<Command> {
if self.bins.is_empty() {
return None;
}
Expand Down Expand Up @@ -131,20 +137,18 @@ impl CargoTargets<'_> {
},
));
}
if release {
cmd.arg("--release");
match self.kind {
BuildKind::Release => {
cmd.arg("--release");
}
BuildKind::Debug => {}
}

Some(cmd)
}

pub async fn run(
&self,
command: &str,
release: bool,
log: &Logger,
) -> Result<()> {
let Some(mut cmd) = self.build_command(command, release) else {
pub async fn run(&self, command: &str, log: &Logger) -> Result<()> {
let Some(mut cmd) = self.build_command(command) else {
return Ok(());
};

Expand All @@ -160,3 +164,9 @@ impl CargoTargets<'_> {
Ok(())
}
}

#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum BuildKind {
Release,
Debug,
}
1 change: 0 additions & 1 deletion package/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,6 @@ pub enum BuildCommand {
#[clap(long)]
intermediate: bool,
},
/// List the build commands that will be run
/// Builds the packages specified in a manifest, and places them into an
/// 'out' directory.
Package {
Expand Down
21 changes: 15 additions & 6 deletions sled-agent/src/sled_agent.rs
Original file line number Diff line number Diff line change
Expand Up @@ -962,12 +962,21 @@ impl SledAgent {
continue;
};

// First, ensure the dataset exists
let dataset_id = zone.id.into_untyped_uuid();
self.inner
.storage
.upsert_filesystem(dataset_id, dataset_name)
.await?;
// NOTE: This code will be deprecated by https://github.com/oxidecomputer/omicron/pull/7160
//
// However, we need to ensure that all blueprints have datasets
// within them before we can remove this back-fill.
//
// Therefore, we do something hairy here: We ensure the filesystem
// exists, but don't specify any dataset UUID value.
//
// This means that:
// - If the dataset exists and has a UUID, this will be a no-op
// - If the dataset doesn't exist, it'll be created without its
// oxide:uuid zfs property set
// - If a subsequent call to "datasets_ensure" tries to set a UUID,
// it should be able to get set (once).
self.inner.storage.upsert_filesystem(None, dataset_name).await?;
}

self.inner
Expand Down
108 changes: 82 additions & 26 deletions sled-storage/src/manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ use std::collections::HashSet;
use std::future::Future;
use tokio::sync::{mpsc, oneshot, watch};
use tokio::time::{interval, Duration, MissedTickBehavior};
use uuid::Uuid;

// The size of the mpsc bounded channel used to communicate
// between the `StorageHandle` and `StorageManager`.
Expand Down Expand Up @@ -100,7 +99,7 @@ enum StorageManagerState {

#[derive(Debug)]
pub(crate) struct NewFilesystemRequest {
dataset_id: Uuid,
dataset_id: Option<DatasetUuid>,
dataset_name: DatasetName,
responder: DebugIgnore<oneshot::Sender<Result<(), Error>>>,
}
Expand Down Expand Up @@ -526,7 +525,7 @@ impl StorageHandle {
// and ask for the set of all datasets from Nexus.
pub async fn upsert_filesystem(
&self,
dataset_id: Uuid,
dataset_id: Option<DatasetUuid>,
dataset_name: DatasetName,
) -> Result<(), Error> {
let (tx, rx) = oneshot::channel();
Expand Down Expand Up @@ -1499,27 +1498,9 @@ impl StorageManager {
zoned,
encryption_details,
size_details,
id: Some(DatasetUuid::from_untyped_uuid(request.dataset_id)),
id: request.dataset_id,
additional_options: None,
})?;
// Ensure the dataset has a usable UUID.
if let Ok(id_str) = Zfs::get_oxide_value(&fs_name, "uuid") {
if let Ok(id) = id_str.parse::<Uuid>() {
if id != request.dataset_id {
return Err(Error::UuidMismatch {
name: request.dataset_name.full_name(),
old: id,
new: request.dataset_id,
});
}
return Ok(());
}
}
Zfs::set_oxide_value(
&fs_name,
"uuid",
&request.dataset_id.to_string(),
)?;

Ok(())
}
Expand All @@ -1544,7 +1525,6 @@ mod tests {
use std::collections::BTreeMap;
use std::str::FromStr;
use std::sync::atomic::Ordering;
use uuid::Uuid;

// A helper struct to advance time.
struct TimeTravel {}
Expand Down Expand Up @@ -2005,16 +1985,92 @@ mod tests {
.expect("Ensuring disks should work after key manager is ready");
assert!(!result.has_error(), "{:?}", result);

// Create a filesystem on the newly formatted U.2
let dataset_id = Uuid::new_v4();
// Create a filesystem on the newly formatted U.2.
//
// We can call "upsert_filesystem" both with and without a UUID.
let dataset_id = DatasetUuid::new_v4();
let zpool_name = ZpoolName::new_external(config.disks[0].pool_id);
let dataset_name =
DatasetName::new(zpool_name.clone(), DatasetKind::Crucible);
harness
.handle()
.upsert_filesystem(Some(dataset_id), dataset_name.clone())
.await
.unwrap();
// Observe the dataset exists, and the UUID is set.
let observed_dataset = &Zfs::get_dataset_properties(
&[dataset_name.full_name()],
WhichDatasets::SelfOnly,
)
.unwrap()[0];
assert_eq!(observed_dataset.id, Some(dataset_id));

harness
.handle()
.upsert_filesystem(None, dataset_name.clone())
.await
.unwrap();
// Observe the dataset still exists, and the UUID is still set,
// even though we did not ask for a new value explicitly.
let observed_dataset = &Zfs::get_dataset_properties(
&[dataset_name.full_name()],
WhichDatasets::SelfOnly,
)
.unwrap()[0];
assert_eq!(observed_dataset.id, Some(dataset_id));

harness.cleanup().await;
logctx.cleanup_successful();
}

#[tokio::test]
async fn upsert_filesystem_no_uuid() {
illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst);
let logctx = test_setup_log("upsert_filesystem");
let mut harness = StorageManagerTestHarness::new(&logctx.log).await;

// Test setup: Add a U.2 and M.2, adopt them into the "control plane"
// for usage.
harness.handle().key_manager_ready().await;
let raw_disks =
harness.add_vdevs(&["u2_under_test.vdev", "m2_helping.vdev"]).await;
let config = harness.make_config(1, &raw_disks);
let result = harness
.handle()
.omicron_physical_disks_ensure(config.clone())
.await
.expect("Ensuring disks should work after key manager is ready");
assert!(!result.has_error(), "{:?}", result);

// Create a filesystem on the newly formatted U.2, without a UUID
let zpool_name = ZpoolName::new_external(config.disks[0].pool_id);
let dataset_name =
DatasetName::new(zpool_name.clone(), DatasetKind::Crucible);
harness
.handle()
.upsert_filesystem(dataset_id, dataset_name)
.upsert_filesystem(None, dataset_name.clone())
.await
.unwrap();
let observed_dataset = &Zfs::get_dataset_properties(
&[dataset_name.full_name()],
WhichDatasets::SelfOnly,
)
.unwrap()[0];
assert_eq!(observed_dataset.id, None);

// Later, we can set the UUID to a specific value
let dataset_id = DatasetUuid::new_v4();
harness
.handle()
.upsert_filesystem(Some(dataset_id), dataset_name.clone())
.await
.unwrap();
let observed_dataset = &Zfs::get_dataset_properties(
&[dataset_name.full_name()],
WhichDatasets::SelfOnly,
)
.unwrap()[0];
assert_eq!(observed_dataset.id, Some(dataset_id));

harness.cleanup().await;
logctx.cleanup_successful();
Expand Down

0 comments on commit 9661c95

Please sign in to comment.