diff --git a/clients/nexus-client/src/lib.rs b/clients/nexus-client/src/lib.rs index 777f351056..0eb7c6ed8f 100644 --- a/clients/nexus-client/src/lib.rs +++ b/clients/nexus-client/src/lib.rs @@ -60,6 +60,7 @@ progenitor::generate_api!( TypedUuidForUpstairsKind = omicron_uuid_kinds::TypedUuid, TypedUuidForUpstairsRepairKind = omicron_uuid_kinds::TypedUuid, TypedUuidForUpstairsSessionKind = omicron_uuid_kinds::TypedUuid, + TypedUuidForVolumeKind = omicron_uuid_kinds::TypedUuid, TypedUuidForZpoolKind = omicron_uuid_kinds::TypedUuid, }, patch = { diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index e501e650b1..6861134435 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -135,6 +135,7 @@ use omicron_uuid_kinds::InstanceUuid; use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::PropolisUuid; use omicron_uuid_kinds::SledUuid; +use omicron_uuid_kinds::VolumeUuid; use sled_agent_client::types::VolumeConstructionRequest; use std::borrow::Cow; use std::cmp::Ordering; @@ -1536,7 +1537,7 @@ async fn cmd_db_disk_info( disk_name, instance_name, propolis_zone: format!("oxz_propolis-server_{}", propolis_id), - volume_id: disk.volume_id.to_string(), + volume_id: disk.volume_id().to_string(), disk_state: disk.runtime_state.disk_state.to_string(), } } else { @@ -1545,7 +1546,7 @@ async fn cmd_db_disk_info( disk_name, instance_name, propolis_zone: NO_ACTIVE_PROPOLIS_MSG.to_string(), - volume_id: disk.volume_id.to_string(), + volume_id: disk.volume_id().to_string(), disk_state: disk.runtime_state.disk_state.to_string(), } } @@ -1557,7 +1558,7 @@ async fn cmd_db_disk_info( disk_name: disk.name().to_string(), instance_name: "-".to_string(), propolis_zone: "-".to_string(), - volume_id: disk.volume_id.to_string(), + volume_id: disk.volume_id().to_string(), disk_state: disk.runtime_state.disk_state.to_string(), } }; @@ -1571,7 +1572,7 @@ async fn cmd_db_disk_info( println!("{}", table); // Get the dataset backing this volume. - let regions = datastore.get_allocated_regions(disk.volume_id).await?; + let regions = datastore.get_allocated_regions(disk.volume_id()).await?; let mut rows = Vec::with_capacity(3); for (dataset, region) in regions { @@ -1605,7 +1606,7 @@ async fn cmd_db_disk_info( println!("{}", table); - get_and_display_vcr(disk.volume_id, datastore).await?; + get_and_display_vcr(disk.volume_id(), datastore).await?; Ok(()) } @@ -1613,13 +1614,13 @@ async fn cmd_db_disk_info( // If found, attempt to parse the .data field into a VolumeConstructionRequest // and display it if successful. async fn get_and_display_vcr( - volume_id: Uuid, + volume_id: VolumeUuid, datastore: &DataStore, ) -> Result<(), anyhow::Error> { // Get the VCR from the volume and display selected parts. use db::schema::volume::dsl as volume_dsl; let volumes = volume_dsl::volume - .filter(volume_dsl::id.eq(volume_id)) + .filter(volume_dsl::id.eq(to_db_typed_uuid(volume_id))) .limit(1) .select(Volume::as_select()) .load_async(&*datastore.pool_connection_for_tests().await?) @@ -1729,7 +1730,7 @@ async fn cmd_db_disk_physical( .context("loading region")?; for rs in regions { - volume_ids.insert(rs.volume_id()); + volume_ids.insert(rs.volume_id().into_untyped_uuid()); } } @@ -1977,8 +1978,8 @@ impl From for SnapshotRow { state: format_snapshot(&s.state).to_string(), size: s.size.to_string(), source_disk_id: s.disk_id.to_string(), - source_volume_id: s.volume_id.to_string(), - destination_volume_id: s.destination_volume_id.to_string(), + source_volume_id: s.volume_id().to_string(), + destination_volume_id: s.destination_volume_id().to_string(), } } } @@ -2046,8 +2047,8 @@ async fn cmd_db_snapshot_info( let mut dest_volume_ids = Vec::new(); let mut source_volume_ids = Vec::new(); let rows = snapshots.into_iter().map(|snapshot| { - dest_volume_ids.push(snapshot.destination_volume_id); - source_volume_ids.push(snapshot.volume_id); + dest_volume_ids.push(snapshot.destination_volume_id()); + source_volume_ids.push(snapshot.volume_id()); SnapshotRow::from(snapshot) }); if rows.len() == 0 { @@ -2362,7 +2363,7 @@ async fn cmd_db_region_list( struct RegionRow { id: Uuid, dataset_id: DatasetUuid, - volume_id: Uuid, + volume_id: VolumeUuid, block_size: i64, blocks_per_extent: u64, extent_count: u64, @@ -2414,7 +2415,8 @@ async fn cmd_db_region_used_by( String::from("listing regions") }); - let volumes: Vec = regions.iter().map(|x| x.volume_id()).collect(); + let volumes: Vec = + regions.iter().map(|x| x.volume_id().into_untyped_uuid()).collect(); let disks_used: Vec = { let volumes = volumes.clone(); @@ -2504,7 +2506,7 @@ async fn cmd_db_region_used_by( #[derive(Tabled)] struct RegionRow { id: Uuid, - volume_id: Uuid, + volume_id: VolumeUuid, usage_type: String, usage_id: String, usage_name: String, @@ -2515,7 +2517,7 @@ async fn cmd_db_region_used_by( .into_iter() .map(|region: Region| { if let Some(image) = - images_used.iter().find(|x| x.volume_id == region.volume_id()) + images_used.iter().find(|x| x.volume_id() == region.volume_id()) { RegionRow { id: region.id(), @@ -2528,7 +2530,7 @@ async fn cmd_db_region_used_by( } } else if let Some(snapshot) = snapshots_used .iter() - .find(|x| x.volume_id == region.volume_id()) + .find(|x| x.volume_id() == region.volume_id()) { RegionRow { id: region.id(), @@ -2541,7 +2543,7 @@ async fn cmd_db_region_used_by( } } else if let Some(snapshot) = snapshots_used .iter() - .find(|x| x.destination_volume_id == region.volume_id()) + .find(|x| x.destination_volume_id() == region.volume_id()) { RegionRow { id: region.id(), @@ -2553,7 +2555,7 @@ async fn cmd_db_region_used_by( deleted: snapshot.time_deleted().is_some(), } } else if let Some(disk) = - disks_used.iter().find(|x| x.volume_id == region.volume_id()) + disks_used.iter().find(|x| x.volume_id() == region.volume_id()) { RegionRow { id: region.id(), @@ -2602,7 +2604,7 @@ async fn cmd_db_region_find_deleted( #[derive(Tabled)] struct VolumeRow { - volume_id: Uuid, + volume_id: VolumeUuid, } let region_rows: Vec = freed_crucible_resources @@ -4423,7 +4425,7 @@ async fn cmd_db_region_snapshot_replacement_request( .insert_region_snapshot_replacement_request_with_volume_id( opctx, request, - db_snapshots[0].volume_id, + db_snapshots[0].volume_id(), ) .await?; diff --git a/nexus/db-model/src/disk.rs b/nexus/db-model/src/disk.rs index b00c1a7ca4..41cc1556b6 100644 --- a/nexus/db-model/src/disk.rs +++ b/nexus/db-model/src/disk.rs @@ -3,6 +3,7 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use super::{BlockSize, ByteCount, DiskState, Generation}; +use crate::typed_uuid::DbTypedUuid; use crate::{schema::disk, unsigned::SqlU8}; use chrono::{DateTime, Utc}; use db_macros::Resource; @@ -10,6 +11,8 @@ use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::api::external; use omicron_common::api::internal; +use omicron_uuid_kinds::VolumeKind; +use omicron_uuid_kinds::VolumeUuid; use serde::{Deserialize, Serialize}; use std::convert::TryFrom; use std::net::SocketAddrV6; @@ -38,7 +41,7 @@ pub struct Disk { pub project_id: Uuid, /// Root volume of the disk - pub volume_id: Uuid, + volume_id: DbTypedUuid, /// runtime state of the Disk #[diesel(embed)] @@ -81,7 +84,7 @@ impl Disk { pub fn new( disk_id: Uuid, project_id: Uuid, - volume_id: Uuid, + volume_id: VolumeUuid, params: params::DiskCreate, block_size: BlockSize, runtime_initial: DiskRuntimeState, @@ -103,7 +106,7 @@ impl Disk { identity, rcgen: external::Generation::new().into(), project_id, - volume_id, + volume_id: volume_id.into(), runtime_state: runtime_initial, slot: None, size: params.size.into(), @@ -129,6 +132,10 @@ impl Disk { pub fn pantry_address(&self) -> Option { self.pantry_address.as_ref().map(|x| x.parse().unwrap()) } + + pub fn volume_id(&self) -> VolumeUuid { + self.volume_id.into() + } } /// Conversion to the external API type. diff --git a/nexus/db-model/src/image.rs b/nexus/db-model/src/image.rs index 6cdf3201be..4cebe8ee4b 100644 --- a/nexus/db-model/src/image.rs +++ b/nexus/db-model/src/image.rs @@ -10,10 +10,13 @@ use super::{BlockSize, ByteCount, Digest}; use crate::schema::{image, project_image, silo_image}; +use crate::typed_uuid::DbTypedUuid; use db_macros::Resource; use nexus_types::external_api::views; use nexus_types::identity::Resource; use omicron_common::api::external::Error; +use omicron_uuid_kinds::VolumeKind; +use omicron_uuid_kinds::VolumeUuid; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -36,7 +39,7 @@ pub struct Image { pub silo_id: Uuid, pub project_id: Option, - pub volume_id: Uuid, + pub volume_id: DbTypedUuid, pub url: Option, pub os: String, pub version: String, @@ -47,6 +50,12 @@ pub struct Image { pub size: ByteCount, } +impl Image { + pub fn volume_id(&self) -> VolumeUuid { + self.volume_id.into() + } +} + #[derive( Queryable, Selectable, Clone, Debug, Resource, Serialize, Deserialize, )] @@ -57,7 +66,7 @@ pub struct ProjectImage { pub silo_id: Uuid, pub project_id: Uuid, - pub volume_id: Uuid, + pub volume_id: DbTypedUuid, pub url: Option, pub os: String, pub version: String, @@ -68,6 +77,12 @@ pub struct ProjectImage { pub size: ByteCount, } +impl ProjectImage { + pub fn volume_id(&self) -> VolumeUuid { + self.volume_id.into() + } +} + #[derive( Queryable, Selectable, Clone, Debug, Resource, Serialize, Deserialize, )] @@ -77,7 +92,7 @@ pub struct SiloImage { pub identity: SiloImageIdentity, pub silo_id: Uuid, - pub volume_id: Uuid, + pub volume_id: DbTypedUuid, pub url: Option, pub os: String, pub version: String, @@ -88,6 +103,12 @@ pub struct SiloImage { pub size: ByteCount, } +impl SiloImage { + pub fn volume_id(&self) -> VolumeUuid { + self.volume_id.into() + } +} + impl TryFrom for ProjectImage { type Error = Error; diff --git a/nexus/db-model/src/region.rs b/nexus/db-model/src/region.rs index 417affea89..b6d51c9a37 100644 --- a/nexus/db-model/src/region.rs +++ b/nexus/db-model/src/region.rs @@ -10,6 +10,8 @@ use db_macros::Asset; use omicron_common::api::external; use omicron_uuid_kinds::DatasetKind; use omicron_uuid_kinds::DatasetUuid; +use omicron_uuid_kinds::VolumeKind; +use omicron_uuid_kinds::VolumeUuid; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -34,7 +36,7 @@ pub struct Region { identity: RegionIdentity, dataset_id: DbTypedUuid, - volume_id: Uuid, + volume_id: DbTypedUuid, block_size: ByteCount, @@ -58,7 +60,7 @@ pub struct Region { impl Region { pub fn new( dataset_id: DatasetUuid, - volume_id: Uuid, + volume_id: VolumeUuid, block_size: ByteCount, blocks_per_extent: u64, extent_count: u64, @@ -68,7 +70,7 @@ impl Region { Self { identity: RegionIdentity::new(Uuid::new_v4()), dataset_id: dataset_id.into(), - volume_id, + volume_id: volume_id.into(), block_size, blocks_per_extent: blocks_per_extent as i64, extent_count: extent_count as i64, @@ -81,8 +83,8 @@ impl Region { pub fn id(&self) -> Uuid { self.identity.id } - pub fn volume_id(&self) -> Uuid { - self.volume_id + pub fn volume_id(&self) -> VolumeUuid { + self.volume_id.into() } pub fn dataset_id(&self) -> DatasetUuid { self.dataset_id.into() diff --git a/nexus/db-model/src/region_replacement.rs b/nexus/db-model/src/region_replacement.rs index 57ead4b68e..817563fa93 100644 --- a/nexus/db-model/src/region_replacement.rs +++ b/nexus/db-model/src/region_replacement.rs @@ -4,9 +4,12 @@ use super::impl_enum_type; use crate::schema::region_replacement; +use crate::typed_uuid::DbTypedUuid; use crate::Region; use chrono::DateTime; use chrono::Utc; +use omicron_uuid_kinds::VolumeKind; +use omicron_uuid_kinds::VolumeUuid; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -144,10 +147,10 @@ pub struct RegionReplacement { pub old_region_id: Uuid, /// The volume whose region is being replaced - pub volume_id: Uuid, + pub volume_id: DbTypedUuid, /// A synthetic volume that only is used to later delete the old region - pub old_region_volume_id: Option, + pub old_region_volume_id: Option>, /// The new region that will be used to replace the old one pub new_region_id: Option, @@ -162,16 +165,24 @@ impl RegionReplacement { Self::new(region.id(), region.volume_id()) } - pub fn new(old_region_id: Uuid, volume_id: Uuid) -> Self { + pub fn new(old_region_id: Uuid, volume_id: VolumeUuid) -> Self { Self { id: Uuid::new_v4(), request_time: Utc::now(), old_region_id, - volume_id, + volume_id: volume_id.into(), old_region_volume_id: None, new_region_id: None, replacement_state: RegionReplacementState::Requested, operating_saga_id: None, } } + + pub fn volume_id(&self) -> VolumeUuid { + self.volume_id.into() + } + + pub fn old_region_volume_id(&self) -> Option { + self.old_region_volume_id.map(|u| u.into()) + } } diff --git a/nexus/db-model/src/region_snapshot_replacement.rs b/nexus/db-model/src/region_snapshot_replacement.rs index 28627d8379..36730a072a 100644 --- a/nexus/db-model/src/region_snapshot_replacement.rs +++ b/nexus/db-model/src/region_snapshot_replacement.rs @@ -10,6 +10,8 @@ use chrono::DateTime; use chrono::Utc; use omicron_uuid_kinds::DatasetKind; use omicron_uuid_kinds::DatasetUuid; +use omicron_uuid_kinds::VolumeKind; +use omicron_uuid_kinds::VolumeUuid; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -133,7 +135,7 @@ pub struct RegionSnapshotReplacement { pub old_snapshot_id: Uuid, /// A synthetic volume that only is used to later delete the old snapshot - pub old_snapshot_volume_id: Option, + pub old_snapshot_volume_id: Option>, pub new_region_id: Option, @@ -145,7 +147,7 @@ pub struct RegionSnapshotReplacement { /// an additional reference count bump is required. This volume should live /// as long as this request so that all necessary replacements can be /// completed. - pub new_region_volume_id: Option, + pub new_region_volume_id: Option>, } impl RegionSnapshotReplacement { @@ -175,4 +177,12 @@ impl RegionSnapshotReplacement { operating_saga_id: None, } } + + pub fn old_snapshot_volume_id(&self) -> Option { + self.old_snapshot_volume_id.map(|v| v.into()) + } + + pub fn new_region_volume_id(&self) -> Option { + self.new_region_volume_id.map(|v| v.into()) + } } diff --git a/nexus/db-model/src/region_snapshot_replacement_step.rs b/nexus/db-model/src/region_snapshot_replacement_step.rs index 3c9a60056e..8223628742 100644 --- a/nexus/db-model/src/region_snapshot_replacement_step.rs +++ b/nexus/db-model/src/region_snapshot_replacement_step.rs @@ -4,8 +4,11 @@ use super::impl_enum_type; use crate::schema::region_snapshot_replacement_step; +use crate::typed_uuid::DbTypedUuid; use chrono::DateTime; use chrono::Utc; +use omicron_uuid_kinds::VolumeKind; +use omicron_uuid_kinds::VolumeUuid; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -90,10 +93,10 @@ pub struct RegionSnapshotReplacementStep { pub request_time: DateTime, /// A volume that references the snapshot - pub volume_id: Uuid, + pub volume_id: DbTypedUuid, /// A synthetic volume that only is used to later delete the old snapshot - pub old_snapshot_volume_id: Option, + pub old_snapshot_volume_id: Option>, pub replacement_state: RegionSnapshotReplacementStepState, @@ -101,15 +104,23 @@ pub struct RegionSnapshotReplacementStep { } impl RegionSnapshotReplacementStep { - pub fn new(request_id: Uuid, volume_id: Uuid) -> Self { + pub fn new(request_id: Uuid, volume_id: VolumeUuid) -> Self { Self { id: Uuid::new_v4(), request_id, request_time: Utc::now(), - volume_id, + volume_id: volume_id.into(), old_snapshot_volume_id: None, replacement_state: RegionSnapshotReplacementStepState::Requested, operating_saga_id: None, } } + + pub fn volume_id(&self) -> VolumeUuid { + self.volume_id.into() + } + + pub fn old_snapshot_volume_id(&self) -> Option { + self.old_snapshot_volume_id.map(|v| v.into()) + } } diff --git a/nexus/db-model/src/snapshot.rs b/nexus/db-model/src/snapshot.rs index 6c160e5c6b..66fc2589cb 100644 --- a/nexus/db-model/src/snapshot.rs +++ b/nexus/db-model/src/snapshot.rs @@ -4,11 +4,14 @@ use super::{impl_enum_type, ByteCount}; use crate::schema::snapshot; +use crate::typed_uuid::DbTypedUuid; use crate::BlockSize; use crate::Generation; use db_macros::Resource; use nexus_types::external_api::views; use nexus_types::identity::Resource; +use omicron_uuid_kinds::VolumeKind; +use omicron_uuid_kinds::VolumeUuid; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -45,10 +48,10 @@ pub struct Snapshot { pub project_id: Uuid, // which disk is this a snapshot of pub disk_id: Uuid, - pub volume_id: Uuid, + pub volume_id: DbTypedUuid, // destination of all snapshot blocks - pub destination_volume_id: Uuid, + pub destination_volume_id: DbTypedUuid, pub gen: Generation, pub state: SnapshotState, @@ -80,3 +83,13 @@ impl From for views::SnapshotState { } } } + +impl Snapshot { + pub fn volume_id(&self) -> VolumeUuid { + self.volume_id.into() + } + + pub fn destination_volume_id(&self) -> VolumeUuid { + self.destination_volume_id.into() + } +} diff --git a/nexus/db-model/src/volume.rs b/nexus/db-model/src/volume.rs index bd6e349fbf..9f80ff71fa 100644 --- a/nexus/db-model/src/volume.rs +++ b/nexus/db-model/src/volume.rs @@ -7,10 +7,12 @@ use crate::collection::DatastoreCollectionConfig; use crate::schema::{region, volume}; use chrono::{DateTime, Utc}; use db_macros::Asset; +use omicron_uuid_kinds::VolumeUuid; use uuid::Uuid; #[derive(Asset, Queryable, Insertable, Debug, Selectable, Clone)] #[diesel(table_name = volume)] +#[asset(uuid_kind = VolumeKind)] pub struct Volume { #[diesel(embed)] identity: VolumeIdentity, @@ -24,7 +26,7 @@ pub struct Volume { } impl Volume { - pub fn new(id: Uuid, data: String) -> Self { + pub fn new(id: VolumeUuid, data: String) -> Self { Self { identity: VolumeIdentity::new(id), time_deleted: None, diff --git a/nexus/db-model/src/volume_repair.rs b/nexus/db-model/src/volume_repair.rs index a92fcd3425..645660c496 100644 --- a/nexus/db-model/src/volume_repair.rs +++ b/nexus/db-model/src/volume_repair.rs @@ -3,6 +3,8 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use crate::schema::volume_repair; +use crate::typed_uuid::DbTypedUuid; +use omicron_uuid_kinds::VolumeKind; use uuid::Uuid; /// When modifying a Volume by replacing its parts, Nexus should take care to @@ -15,6 +17,6 @@ use uuid::Uuid; #[derive(Queryable, Insertable, Debug, Selectable, Clone)] #[diesel(table_name = volume_repair)] pub struct VolumeRepair { - pub volume_id: Uuid, + pub volume_id: DbTypedUuid, pub repair_id: Uuid, } diff --git a/nexus/db-model/src/volume_resource_usage.rs b/nexus/db-model/src/volume_resource_usage.rs index a5203bae46..977a46b0fa 100644 --- a/nexus/db-model/src/volume_resource_usage.rs +++ b/nexus/db-model/src/volume_resource_usage.rs @@ -7,6 +7,8 @@ use crate::schema::volume_resource_usage; use crate::typed_uuid::DbTypedUuid; use omicron_uuid_kinds::DatasetKind; use omicron_uuid_kinds::DatasetUuid; +use omicron_uuid_kinds::VolumeKind; +use omicron_uuid_kinds::VolumeUuid; use uuid::Uuid; impl_enum_type!( @@ -51,7 +53,7 @@ impl_enum_type!( pub struct VolumeResourceUsageRecord { pub usage_id: Uuid, - pub volume_id: Uuid, + pub volume_id: DbTypedUuid, pub usage_type: VolumeResourceUsageType, @@ -76,12 +78,12 @@ pub enum VolumeResourceUsage { } impl VolumeResourceUsageRecord { - pub fn new(volume_id: Uuid, usage: VolumeResourceUsage) -> Self { + pub fn new(volume_id: VolumeUuid, usage: VolumeResourceUsage) -> Self { match usage { VolumeResourceUsage::ReadOnlyRegion { region_id } => { VolumeResourceUsageRecord { usage_id: Uuid::new_v4(), - volume_id, + volume_id: volume_id.into(), usage_type: VolumeResourceUsageType::ReadOnlyRegion, region_id: Some(region_id), @@ -98,7 +100,7 @@ impl VolumeResourceUsageRecord { snapshot_id, } => VolumeResourceUsageRecord { usage_id: Uuid::new_v4(), - volume_id, + volume_id: volume_id.into(), usage_type: VolumeResourceUsageType::RegionSnapshot, region_id: None, @@ -109,6 +111,10 @@ impl VolumeResourceUsageRecord { }, } } + + pub fn volume_id(&self) -> VolumeUuid { + self.volume_id.into() + } } impl TryFrom for VolumeResourceUsage { diff --git a/nexus/db-queries/src/db/datastore/disk.rs b/nexus/db-queries/src/db/datastore/disk.rs index 572224161d..d3678daa53 100644 --- a/nexus/db-queries/src/db/datastore/disk.rs +++ b/nexus/db-queries/src/db/datastore/disk.rs @@ -19,6 +19,7 @@ use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Resource; use crate::db::lookup::LookupPath; +use crate::db::model::to_db_typed_uuid; use crate::db::model::Disk; use crate::db::model::DiskRuntimeState; use crate::db::model::DiskUpdate; @@ -45,6 +46,7 @@ use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use omicron_common::bail_unless; +use omicron_uuid_kinds::VolumeUuid; use ref_cast::RefCast; use std::net::SocketAddrV6; use uuid::Uuid; @@ -824,13 +826,13 @@ impl DataStore { pub async fn disk_for_volume_id( &self, - volume_id: Uuid, + volume_id: VolumeUuid, ) -> LookupResult> { let conn = self.pool_connection_unauthorized().await?; use db::schema::disk::dsl; dsl::disk - .filter(dsl::volume_id.eq(volume_id)) + .filter(dsl::volume_id.eq(to_db_typed_uuid(volume_id))) .select(Disk::as_select()) .first_async(&*conn) .await @@ -881,7 +883,7 @@ mod tests { Disk::new( Uuid::new_v4(), authz_project.id(), - Uuid::new_v4(), + VolumeUuid::new_v4(), params::DiskCreate { identity: external::IdentityMetadataCreateParams { name: "first-post".parse().unwrap(), diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index d20f24e773..9bff31b291 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -119,6 +119,8 @@ pub use rack::RackInit; pub use rack::SledUnderlayAllocationResult; pub use region::RegionAllocationFor; pub use region::RegionAllocationParameters; +pub use region_snapshot_replacement::NewRegionVolumeId; +pub use region_snapshot_replacement::OldSnapshotVolumeId; pub use silo::Discoverability; pub use sled::SledTransition; pub use sled::TransitionError; @@ -476,6 +478,7 @@ mod test { use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; + use omicron_uuid_kinds::VolumeUuid; use std::collections::HashMap; use std::collections::HashSet; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddrV6}; @@ -1020,7 +1023,7 @@ mod test { &format!("disk{}", alloc_seed), ByteCount::from_mebibytes_u32(1), ); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let expected_region_count = REGION_REDUNDANCY_THRESHOLD; let dataset_and_regions = datastore @@ -1114,7 +1117,7 @@ mod test { &format!("disk{}", alloc_seed), ByteCount::from_mebibytes_u32(1), ); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let expected_region_count = REGION_REDUNDANCY_THRESHOLD; let dataset_and_regions = datastore @@ -1202,7 +1205,7 @@ mod test { &format!("disk{}", alloc_seed), ByteCount::from_mebibytes_u32(1), ); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let err = datastore .disk_region_allocate( @@ -1248,7 +1251,7 @@ mod test { "disk", ByteCount::from_mebibytes_u32(500), ); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let mut dataset_and_regions1 = datastore .disk_region_allocate( &opctx, @@ -1357,7 +1360,7 @@ mod test { "disk1", ByteCount::from_mebibytes_u32(500), ); - let volume1_id = Uuid::new_v4(); + let volume1_id = VolumeUuid::new_v4(); let err = datastore .disk_region_allocate( &opctx, @@ -1457,7 +1460,7 @@ mod test { "disk1", ByteCount::from_mebibytes_u32(500), ); - let volume1_id = Uuid::new_v4(); + let volume1_id = VolumeUuid::new_v4(); let err = datastore .disk_region_allocate( &opctx, @@ -1546,7 +1549,7 @@ mod test { (Policy::InService, State::Active, AllocationShould::Succeed), ]; - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let params = create_test_disk_create_params( "disk", ByteCount::from_mebibytes_u32(500), @@ -1616,7 +1619,7 @@ mod test { let disk_size = test_zpool_size(); let alloc_size = ByteCount::try_from(disk_size.to_bytes() * 2).unwrap(); let params = create_test_disk_create_params("disk1", alloc_size); - let volume1_id = Uuid::new_v4(); + let volume1_id = VolumeUuid::new_v4(); assert!(datastore .disk_region_allocate( @@ -1643,10 +1646,11 @@ mod test { let db = TestDatabase::new_with_datastore(&logctx.log).await; let datastore = db.datastore(); let conn = datastore.pool_connection_for_tests().await.unwrap(); - let explanation = DataStore::get_allocated_regions_query(Uuid::nil()) - .explain_async(&conn) - .await - .unwrap(); + let explanation = + DataStore::get_allocated_regions_query(VolumeUuid::nil()) + .explain_async(&conn) + .await + .unwrap(); assert!( !explanation.contains("FULL SCAN"), "Found an unexpected FULL SCAN: {}", diff --git a/nexus/db-queries/src/db/datastore/region.rs b/nexus/db-queries/src/db/datastore/region.rs index 67bd37cf69..d0767c7677 100644 --- a/nexus/db-queries/src/db/datastore/region.rs +++ b/nexus/db-queries/src/db/datastore/region.rs @@ -35,17 +35,18 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::UpdateResult; use omicron_uuid_kinds::DatasetUuid; +use omicron_uuid_kinds::VolumeUuid; use slog::Logger; use std::net::SocketAddrV6; use uuid::Uuid; pub enum RegionAllocationFor { /// Allocate region(s) for a disk volume - DiskVolume { volume_id: Uuid }, + DiskVolume { volume_id: VolumeUuid }, /// Allocate region(s) for a snapshot volume, which may have read-only /// targets. - SnapshotVolume { volume_id: Uuid, snapshot_id: Uuid }, + SnapshotVolume { volume_id: VolumeUuid, snapshot_id: Uuid }, } /// Describe the region(s) to be allocated @@ -64,12 +65,12 @@ pub enum RegionAllocationParameters<'a> { impl DataStore { pub(super) fn get_allocated_regions_query( - volume_id: Uuid, + volume_id: VolumeUuid, ) -> impl RunnableQuery<(Dataset, Region)> { use db::schema::dataset::dsl as dataset_dsl; use db::schema::region::dsl as region_dsl; region_dsl::region - .filter(region_dsl::volume_id.eq(volume_id)) + .filter(region_dsl::volume_id.eq(to_db_typed_uuid(volume_id))) .inner_join( dataset_dsl::dataset .on(region_dsl::dataset_id.eq(dataset_dsl::id)), @@ -84,7 +85,7 @@ impl DataStore { /// may be used in a context where the disk is being deleted. pub async fn get_allocated_regions( &self, - volume_id: Uuid, + volume_id: VolumeUuid, ) -> Result, Error> { Self::get_allocated_regions_query(volume_id) .get_results_async::<(Dataset, Region)>( @@ -184,7 +185,7 @@ impl DataStore { pub async fn disk_region_allocate( &self, opctx: &OpContext, - volume_id: Uuid, + volume_id: VolumeUuid, disk_source: ¶ms::DiskSource, size: external::ByteCount, allocation_strategy: &RegionAllocationStrategy, @@ -553,7 +554,7 @@ impl DataStore { pub async fn find_non_expunged_regions( &self, opctx: &OpContext, - volume_id: Uuid, + volume_id: VolumeUuid, ) -> LookupResult> { let conn = self.pool_connection_authorized(opctx).await?; @@ -578,7 +579,7 @@ impl DataStore { )) .select(dataset_dsl::id) )) - .filter(region_dsl::volume_id.eq(volume_id)) + .filter(region_dsl::volume_id.eq(to_db_typed_uuid(volume_id))) .select(Region::as_select()) .load_async(&*conn) .await diff --git a/nexus/db-queries/src/db/datastore/region_replacement.rs b/nexus/db-queries/src/db/datastore/region_replacement.rs index 8aad7f2cfd..dbbb017d1f 100644 --- a/nexus/db-queries/src/db/datastore/region_replacement.rs +++ b/nexus/db-queries/src/db/datastore/region_replacement.rs @@ -10,6 +10,7 @@ use crate::db; use crate::db::datastore::SQL_BATCH_SIZE; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; +use crate::db::model::to_db_typed_uuid; use crate::db::model::Region; use crate::db::model::RegionReplacement; use crate::db::model::RegionReplacementState; @@ -27,6 +28,7 @@ use diesel::prelude::*; use omicron_common::api::external::Error; use omicron_uuid_kinds::DownstairsRegionKind; use omicron_uuid_kinds::TypedUuid; +use omicron_uuid_kinds::VolumeUuid; use uuid::Uuid; impl DataStore { @@ -72,7 +74,7 @@ impl DataStore { Self::volume_repair_insert_in_txn( &conn, err, - request.volume_id, + request.volume_id(), request.id, ) .await?; @@ -287,7 +289,7 @@ impl DataStore { region_replacement_id: Uuid, operating_saga_id: Uuid, new_region_id: Uuid, - old_region_volume_id: Uuid, + old_region_volume_id: VolumeUuid, ) -> Result<(), Error> { use db::schema::region_replacement::dsl; let updated = diesel::update(dsl::region_replacement) @@ -298,7 +300,8 @@ impl DataStore { ) .set(( dsl::replacement_state.eq(RegionReplacementState::Running), - dsl::old_region_volume_id.eq(Some(old_region_volume_id)), + dsl::old_region_volume_id + .eq(Some(to_db_typed_uuid(old_region_volume_id))), dsl::new_region_id.eq(Some(new_region_id)), dsl::operating_saga_id.eq(Option::::None), )) @@ -316,7 +319,7 @@ impl DataStore { && record.replacement_state == RegionReplacementState::Running && record.new_region_id == Some(new_region_id) - && record.old_region_volume_id + && record.old_region_volume_id() == Some(old_region_volume_id) { Ok(()) @@ -695,9 +698,10 @@ impl DataStore { self.transaction_retry_wrapper("set_region_replacement_complete") .transaction(&conn, |conn| { let err = err.clone(); + let request_volume_id = request.volume_id(); async move { Self::volume_repair_delete_query( - request.volume_id, + request_volume_id, request.id, ) .execute_async(&conn) @@ -769,9 +773,10 @@ impl DataStore { self.transaction_retry_wrapper("set_region_replacement_complete_from_requested") .transaction(&conn, |conn| { let err = err.clone(); + let request_volume_id = request.volume_id(); async move { Self::volume_repair_delete_query( - request.volume_id, + request_volume_id, request.id, ) .execute_async(&conn) @@ -924,6 +929,7 @@ mod test { use crate::db::pub_test_utils::TestDatabase; use omicron_test_utils::dev; + use omicron_uuid_kinds::VolumeUuid; use sled_agent_client::types::VolumeConstructionRequest; #[tokio::test] @@ -934,13 +940,13 @@ mod test { let region_1_id = Uuid::new_v4(); let region_2_id = Uuid::new_v4(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_id, + id: Uuid::new_v4(), block_size: 512, sub_volumes: vec![], read_only_parent: None, @@ -979,13 +985,13 @@ mod test { let (opctx, datastore) = (db.opctx(), db.datastore()); let region_id = Uuid::new_v4(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_id, + id: Uuid::new_v4(), block_size: 512, sub_volumes: vec![], read_only_parent: None, @@ -1085,13 +1091,13 @@ mod test { let (opctx, datastore) = (db.opctx(), db.datastore()); let region_id = Uuid::new_v4(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_id, + id: Uuid::new_v4(), block_size: 512, sub_volumes: vec![], read_only_parent: None, diff --git a/nexus/db-queries/src/db/datastore/region_snapshot_replacement.rs b/nexus/db-queries/src/db/datastore/region_snapshot_replacement.rs index 90b014c582..a9dcd97b77 100644 --- a/nexus/db-queries/src/db/datastore/region_snapshot_replacement.rs +++ b/nexus/db-queries/src/db/datastore/region_snapshot_replacement.rs @@ -11,6 +11,7 @@ use crate::db; use crate::db::datastore::SQL_BATCH_SIZE; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; +use crate::db::model::to_db_typed_uuid; use crate::db::model::RegionSnapshot; use crate::db::model::RegionSnapshotReplacement; use crate::db::model::RegionSnapshotReplacementState; @@ -25,6 +26,7 @@ use crate::transaction_retry::OptionalError; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::prelude::*; use omicron_common::api::external::Error; +use omicron_uuid_kinds::VolumeUuid; use uuid::Uuid; #[must_use] @@ -38,6 +40,9 @@ pub enum InsertStepResult { AlreadyHandled { existing_step_id: Uuid }, } +pub struct NewRegionVolumeId(pub VolumeUuid); +pub struct OldSnapshotVolumeId(pub VolumeUuid); + impl DataStore { /// Create and insert a region snapshot replacement request for a /// RegionSnapshot, returning the ID of the request. @@ -79,7 +84,7 @@ impl DataStore { self.insert_region_snapshot_replacement_request_with_volume_id( opctx, request, - db_snapshot.volume_id, + db_snapshot.volume_id(), ) .await } @@ -90,7 +95,7 @@ impl DataStore { &self, opctx: &OpContext, request: RegionSnapshotReplacement, - volume_id: Uuid, + volume_id: VolumeUuid, ) -> Result<(), Error> { let err = OptionalError::new(); let conn = self.pool_connection_authorized(opctx).await?; @@ -352,8 +357,8 @@ impl DataStore { region_snapshot_replacement_id: Uuid, operating_saga_id: Uuid, new_region_id: Uuid, - new_region_volume_id: Uuid, - old_snapshot_volume_id: Uuid, + new_region_volume_id: NewRegionVolumeId, + old_snapshot_volume_id: OldSnapshotVolumeId, ) -> Result<(), Error> { use db::schema::region_snapshot_replacement::dsl; let updated = diesel::update(dsl::region_snapshot_replacement) @@ -366,9 +371,11 @@ impl DataStore { .set(( dsl::replacement_state .eq(RegionSnapshotReplacementState::ReplacementDone), - dsl::old_snapshot_volume_id.eq(Some(old_snapshot_volume_id)), + dsl::old_snapshot_volume_id + .eq(Some(to_db_typed_uuid(old_snapshot_volume_id.0))), dsl::new_region_id.eq(Some(new_region_id)), - dsl::new_region_volume_id.eq(Some(new_region_volume_id)), + dsl::new_region_volume_id + .eq(Some(to_db_typed_uuid(new_region_volume_id.0))), dsl::operating_saga_id.eq(Option::::None), )) .check_if_exists::( @@ -387,10 +394,10 @@ impl DataStore { && record.replacement_state == RegionSnapshotReplacementState::ReplacementDone && record.new_region_id == Some(new_region_id) - && record.new_region_volume_id - == Some(new_region_volume_id) - && record.old_snapshot_volume_id - == Some(old_snapshot_volume_id) + && record.new_region_volume_id() + == Some(new_region_volume_id.0) + && record.old_snapshot_volume_id() + == Some(old_snapshot_volume_id.0) { Ok(()) } else { @@ -839,7 +846,7 @@ impl DataStore { &self, opctx: &OpContext, request_id: Uuid, - volume_id: Uuid, + volume_id: VolumeUuid, ) -> Result { let request = RegionSnapshotReplacementStep::new(request_id, volume_id); @@ -872,7 +879,10 @@ impl DataStore { // the same step being inserted with the same volume id. let maybe_record = dsl::region_snapshot_replacement_step - .filter(dsl::old_snapshot_volume_id.eq(request.volume_id)) + .filter( + dsl::old_snapshot_volume_id + .eq(to_db_typed_uuid(request.volume_id())), + ) .get_result_async::(&conn) .await .optional()?; @@ -887,7 +897,10 @@ impl DataStore { // snapshot replacement step for it in a non-complete state. let maybe_record = dsl::region_snapshot_replacement_step - .filter(dsl::volume_id.eq(request.volume_id)) + .filter( + dsl::volume_id + .eq(to_db_typed_uuid(request.volume_id())), + ) .get_result_async::(&conn) .await .optional()?; @@ -918,7 +931,7 @@ impl DataStore { Self::volume_repair_insert_in_txn( &conn, err, - request.volume_id, + request.volume_id(), request.id, ) .await?; @@ -1104,7 +1117,7 @@ impl DataStore { opctx: &OpContext, region_snapshot_replacement_step_id: Uuid, operating_saga_id: Uuid, - old_snapshot_volume_id: Uuid, + old_snapshot_volume_id: VolumeUuid, ) -> Result<(), Error> { type TxnError = TransactionError; @@ -1145,7 +1158,7 @@ impl DataStore { ), dsl::operating_saga_id.eq(Option::::None), dsl::old_snapshot_volume_id - .eq(old_snapshot_volume_id), + .eq(to_db_typed_uuid(old_snapshot_volume_id)), )) .check_if_exists::( region_snapshot_replacement_step_id, @@ -1371,6 +1384,7 @@ mod test { use crate::db::pub_test_utils::TestDatabase; use omicron_test_utils::dev; use omicron_uuid_kinds::DatasetUuid; + use omicron_uuid_kinds::VolumeUuid; use sled_agent_client::types::VolumeConstructionRequest; #[tokio::test] @@ -1387,7 +1401,7 @@ mod test { let region_2_id = Uuid::new_v4(); let snapshot_2_id = Uuid::new_v4(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -1447,7 +1461,7 @@ mod test { let region_2_id = Uuid::new_v4(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -1497,7 +1511,7 @@ mod test { let region_id = Uuid::new_v4(); let snapshot_id = Uuid::new_v4(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -1545,7 +1559,7 @@ mod test { // Insert some replacement steps, and make sure counting works - let step_volume_id = Uuid::new_v4(); + let step_volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -1592,7 +1606,7 @@ mod test { 1, ); - let step_volume_id = Uuid::new_v4(); + let step_volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -1642,7 +1656,7 @@ mod test { 1, ); - let step_volume_id = Uuid::new_v4(); + let step_volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -1708,7 +1722,7 @@ mod test { // Ensure that only one non-complete replacement step can be inserted // per volume. - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -1782,7 +1796,7 @@ mod test { &opctx, first_request_id, saga_id, - Uuid::new_v4(), // old_snapshot_volume_id + VolumeUuid::new_v4(), // old_snapshot_volume_id ) .await .unwrap(); @@ -1818,7 +1832,7 @@ mod test { let db = TestDatabase::new_with_datastore(&logctx.log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -1858,7 +1872,7 @@ mod test { .unwrap() .is_empty()); - let step_volume_id = Uuid::new_v4(); + let step_volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -1885,7 +1899,7 @@ mod test { assert!(matches!(result, InsertStepResult::Inserted { .. })); - let step_volume_id = Uuid::new_v4(); + let step_volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -1939,8 +1953,8 @@ mod test { // replacement step. let request_id = Uuid::new_v4(); - let volume_id = Uuid::new_v4(); - let old_snapshot_volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); + let old_snapshot_volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -1973,7 +1987,7 @@ mod test { let mut step = RegionSnapshotReplacementStep::new(request_id, volume_id); step.replacement_state = RegionSnapshotReplacementStepState::Complete; - step.old_snapshot_volume_id = Some(old_snapshot_volume_id); + step.old_snapshot_volume_id = Some(old_snapshot_volume_id.into()); let first_step_id = step.id; @@ -2017,13 +2031,13 @@ mod test { // Assert that a region snapshot replacement step cannot be performed on // a volume if region replacement is occurring for that volume. - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_id, + id: Uuid::new_v4(), block_size: 512, sub_volumes: vec![], read_only_parent: None, diff --git a/nexus/db-queries/src/db/datastore/snapshot.rs b/nexus/db-queries/src/db/datastore/snapshot.rs index b9f6b589d7..607a2cf16e 100644 --- a/nexus/db-queries/src/db/datastore/snapshot.rs +++ b/nexus/db-queries/src/db/datastore/snapshot.rs @@ -12,6 +12,7 @@ use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; +use crate::db::model::to_db_typed_uuid; use crate::db::model::Generation; use crate::db::model::Name; use crate::db::model::Project; @@ -36,6 +37,7 @@ use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use omicron_common::bail_unless; +use omicron_uuid_kinds::VolumeUuid; use ref_cast::RefCast; use uuid::Uuid; @@ -309,13 +311,13 @@ impl DataStore { pub async fn find_snapshot_by_destination_volume_id( &self, opctx: &OpContext, - volume_id: Uuid, + volume_id: VolumeUuid, ) -> LookupResult> { let conn = self.pool_connection_authorized(opctx).await?; use db::schema::snapshot::dsl; dsl::snapshot - .filter(dsl::destination_volume_id.eq(volume_id)) + .filter(dsl::destination_volume_id.eq(to_db_typed_uuid(volume_id))) .select(Snapshot::as_select()) .first_async(&*conn) .await diff --git a/nexus/db-queries/src/db/datastore/volume.rs b/nexus/db-queries/src/db/datastore/volume.rs index 505533da50..ad92900a63 100644 --- a/nexus/db-queries/src/db/datastore/volume.rs +++ b/nexus/db-queries/src/db/datastore/volume.rs @@ -55,6 +55,7 @@ use omicron_uuid_kinds::PropolisUuid; use omicron_uuid_kinds::TypedUuid; use omicron_uuid_kinds::UpstairsKind; use omicron_uuid_kinds::UpstairsRepairKind; +use omicron_uuid_kinds::VolumeUuid; use serde::Deserialize; use serde::Deserializer; use serde::Serialize; @@ -188,7 +189,7 @@ pub struct FreedCrucibleResources { pub datasets_and_regions: Vec<(Dataset, Region)>, /// Previously soft-deleted volumes that can now be hard-deleted - pub volumes: Vec, + pub volumes: Vec, } impl FreedCrucibleResources { @@ -207,7 +208,7 @@ impl DataStore { use db::schema::volume::dsl; let maybe_volume: Option = dsl::volume - .filter(dsl::id.eq(volume.id())) + .filter(dsl::id.eq(to_db_typed_uuid(volume.id()))) .select(Volume::as_select()) .first_async(conn) .await @@ -432,11 +433,11 @@ impl DataStore { pub(super) async fn volume_get_impl( conn: &async_bb8_diesel::Connection, - volume_id: Uuid, + volume_id: VolumeUuid, ) -> Result, diesel::result::Error> { use db::schema::volume::dsl; dsl::volume - .filter(dsl::id.eq(volume_id)) + .filter(dsl::id.eq(to_db_typed_uuid(volume_id))) .select(Volume::as_select()) .first_async::(conn) .await @@ -446,7 +447,7 @@ impl DataStore { /// Return a `Option` based on id, even if it's soft deleted. pub async fn volume_get( &self, - volume_id: Uuid, + volume_id: VolumeUuid, ) -> LookupResult> { let conn = self.pool_connection_unauthorized().await?; Self::volume_get_impl(&conn, volume_id) @@ -456,11 +457,14 @@ impl DataStore { /// Delete the volume if it exists. If it was already deleted, this is a /// no-op. - pub async fn volume_hard_delete(&self, volume_id: Uuid) -> DeleteResult { + pub async fn volume_hard_delete( + &self, + volume_id: VolumeUuid, + ) -> DeleteResult { use db::schema::volume::dsl; diesel::delete(dsl::volume) - .filter(dsl::id.eq(volume_id)) + .filter(dsl::id.eq(to_db_typed_uuid(volume_id))) .execute_async(&*self.pool_connection_unauthorized().await?) .await .map(|_| ()) @@ -519,8 +523,8 @@ impl DataStore { pub async fn swap_volume_usage_records_for_resources( conn: &async_bb8_diesel::Connection, resource: VolumeResourceUsage, - from_volume_id: Uuid, - to_volume_id: Uuid, + from_volume_id: VolumeUuid, + to_volume_id: VolumeUuid, ) -> Result<(), diesel::result::Error> { use db::schema::volume_resource_usage::dsl; @@ -532,8 +536,8 @@ impl DataStore { .eq(VolumeResourceUsageType::ReadOnlyRegion), ) .filter(dsl::region_id.eq(region_id)) - .filter(dsl::volume_id.eq(from_volume_id)) - .set(dsl::volume_id.eq(to_volume_id)) + .filter(dsl::volume_id.eq(to_db_typed_uuid(from_volume_id))) + .set(dsl::volume_id.eq(to_db_typed_uuid(to_volume_id))) .execute_async(conn) .await?; @@ -558,8 +562,8 @@ impl DataStore { ) .filter(dsl::region_snapshot_region_id.eq(region_id)) .filter(dsl::region_snapshot_snapshot_id.eq(snapshot_id)) - .filter(dsl::volume_id.eq(from_volume_id)) - .set(dsl::volume_id.eq(to_volume_id)) + .filter(dsl::volume_id.eq(to_db_typed_uuid(from_volume_id))) + .set(dsl::volume_id.eq(to_db_typed_uuid(to_volume_id))) .execute_async(conn) .await?; @@ -826,14 +830,14 @@ impl DataStore { async fn volume_checkout_in_txn( conn: &async_bb8_diesel::Connection, err: OptionalError, - volume_id: Uuid, + volume_id: VolumeUuid, reason: VolumeCheckoutReason, ) -> Result { use db::schema::volume::dsl; // Grab the volume in question. let volume = dsl::volume - .filter(dsl::id.eq(volume_id)) + .filter(dsl::id.eq(to_db_typed_uuid(volume_id))) .select(Volume::as_select()) .get_result_async(conn) .await?; @@ -857,7 +861,7 @@ impl DataStore { let maybe_disk: Option = disk_dsl::disk .filter(disk_dsl::time_deleted.is_null()) - .filter(disk_dsl::volume_id.eq(volume_id)) + .filter(disk_dsl::volume_id.eq(to_db_typed_uuid(volume_id))) .select(Disk::as_select()) .get_result_async(conn) .await @@ -955,7 +959,7 @@ impl DataStore { // Update the original volume_id with the new volume.data. use db::schema::volume::dsl as volume_dsl; let num_updated = diesel::update(volume_dsl::volume) - .filter(volume_dsl::id.eq(volume_id)) + .filter(volume_dsl::id.eq(to_db_typed_uuid(volume_id))) .set(volume_dsl::data.eq(new_volume_data)) .execute_async(conn) .await?; @@ -1006,7 +1010,7 @@ impl DataStore { /// crash consistency. pub async fn volume_checkout( &self, - volume_id: Uuid, + volume_id: VolumeUuid, reason: VolumeCheckoutReason, ) -> LookupResult { // We perform a transaction here, to be sure that on completion @@ -1113,7 +1117,7 @@ impl DataStore { /// returned by `read_only_resources_associated_with_volume`. pub async fn volume_checkout_randomize_ids( &self, - volume_id: Uuid, + volume_id: VolumeUuid, reason: VolumeCheckoutReason, ) -> CreateResult { let volume = self.volume_checkout(volume_id, reason).await?; @@ -1127,7 +1131,7 @@ impl DataStore { )?; self.volume_create(db::model::Volume::new( - Uuid::new_v4(), + VolumeUuid::new_v4(), randomized_vcr, )) .await @@ -1196,7 +1200,7 @@ impl DataStore { let mut deleted_regions = Vec::with_capacity(unfiltered_deleted_regions.len()); - let mut volume_set: HashSet = + let mut volume_set: HashSet = HashSet::with_capacity(unfiltered_deleted_regions.len()); for (dataset, region, region_snapshot, volume) in @@ -1291,7 +1295,7 @@ impl DataStore { pub async fn read_only_resources_associated_with_volume( &self, - volume_id: Uuid, + volume_id: VolumeUuid, ) -> LookupResult { let volume = if let Some(volume) = self.volume_get(volume_id).await? { volume @@ -1335,7 +1339,7 @@ impl DataStore { // See comment for `soft_delete_volume` async fn soft_delete_volume_in_txn( conn: &async_bb8_diesel::Connection, - volume_id: Uuid, + volume_id: VolumeUuid, err: OptionalError, ) -> Result { // Grab the volume, and check if the volume was already soft-deleted. @@ -1347,7 +1351,7 @@ impl DataStore { use db::schema::volume::dsl; let volume = dsl::volume - .filter(dsl::id.eq(volume_id)) + .filter(dsl::id.eq(to_db_typed_uuid(volume_id))) .select(Volume::as_select()) .get_result_async(conn) .await @@ -1492,7 +1496,10 @@ impl DataStore { VolumeResourceUsage::ReadOnlyRegion { region_id } => { let updated_rows = diesel::delete(ru_dsl::volume_resource_usage) - .filter(ru_dsl::volume_id.eq(volume_id)) + .filter( + ru_dsl::volume_id + .eq(to_db_typed_uuid(volume_id)), + ) .filter( ru_dsl::usage_type.eq( VolumeResourceUsageType::ReadOnlyRegion, @@ -1579,7 +1586,10 @@ impl DataStore { } => { let updated_rows = diesel::delete(ru_dsl::volume_resource_usage) - .filter(ru_dsl::volume_id.eq(volume_id)) + .filter( + ru_dsl::volume_id + .eq(to_db_typed_uuid(volume_id)), + ) .filter( ru_dsl::usage_type.eq( VolumeResourceUsageType::RegionSnapshot, @@ -1684,7 +1694,7 @@ impl DataStore { { use db::schema::volume::dsl; let updated_rows = diesel::update(dsl::volume) - .filter(dsl::id.eq(volume_id)) + .filter(dsl::id.eq(to_db_typed_uuid(volume_id))) .set(( dsl::time_deleted.eq(Utc::now()), dsl::resources_to_clean_up.eq(Some(serialized_resources)), @@ -1715,7 +1725,7 @@ impl DataStore { /// it is called from a saga node. pub async fn soft_delete_volume( &self, - volume_id: Uuid, + volume_id: VolumeUuid, ) -> Result { let err = OptionalError::new(); let conn = self.pool_connection_unauthorized().await?; @@ -1739,8 +1749,8 @@ impl DataStore { async fn volume_remove_rop_in_txn( conn: &async_bb8_diesel::Connection, err: OptionalError, - volume_id: Uuid, - temp_volume_id: Uuid, + volume_id: VolumeUuid, + temp_volume_id: VolumeUuid, ) -> Result { // Grab the volume in question. If the volume record was already deleted // then we can just return. @@ -1748,7 +1758,7 @@ impl DataStore { use db::schema::volume::dsl; let volume = dsl::volume - .filter(dsl::id.eq(volume_id)) + .filter(dsl::id.eq(to_db_typed_uuid(volume_id))) .select(Volume::as_select()) .get_result_async(conn) .await @@ -1804,7 +1814,7 @@ impl DataStore { // Update the original volume_id with the new volume.data. use db::schema::volume::dsl as volume_dsl; let num_updated = diesel::update(volume_dsl::volume) - .filter(volume_dsl::id.eq(volume_id)) + .filter(volume_dsl::id.eq(to_db_typed_uuid(volume_id))) .set(volume_dsl::data.eq(new_volume_data)) .execute_async(conn) .await?; @@ -1824,7 +1834,7 @@ impl DataStore { // temp_volume_id, but the read_only_parent from the // original volume. let rop_vcr = VolumeConstructionRequest::Volume { - id: temp_volume_id, + id: *temp_volume_id.as_untyped_uuid(), block_size, sub_volumes: vec![], read_only_parent, @@ -1836,7 +1846,9 @@ impl DataStore { // Update the temp_volume_id with the volume data that // contains the read_only_parent. let num_updated = diesel::update(volume_dsl::volume) - .filter(volume_dsl::id.eq(temp_volume_id)) + .filter( + volume_dsl::id.eq(to_db_typed_uuid(temp_volume_id)), + ) .filter(volume_dsl::time_deleted.is_null()) .set(volume_dsl::data.eq(rop_volume_data)) .execute_async(conn) @@ -1929,8 +1941,8 @@ impl DataStore { // not happen again, or be undone. pub async fn volume_remove_rop( &self, - volume_id: Uuid, - temp_volume_id: Uuid, + volume_id: VolumeUuid, + temp_volume_id: VolumeUuid, ) -> Result { // In this single transaction: // - Get the given volume from the volume_id from the database @@ -1979,7 +1991,7 @@ impl DataStore { &self, opctx: &OpContext, dataset_id: DatasetUuid, - volume_id: Uuid, + volume_id: VolumeUuid, ) -> LookupResult> { let conn = self.pool_connection_authorized(opctx).await?; @@ -2347,7 +2359,10 @@ impl DataStore { } /// Return true if a volume was soft-deleted or hard-deleted - pub async fn volume_deleted(&self, volume_id: Uuid) -> Result { + pub async fn volume_deleted( + &self, + volume_id: VolumeUuid, + ) -> Result { match self.volume_get(volume_id).await? { Some(v) => Ok(v.time_deleted.is_some()), None => Ok(true), @@ -2661,7 +2676,7 @@ fn read_only_target_in_vcr( #[derive(Clone)] pub struct VolumeReplacementParams { - pub volume_id: Uuid, + pub volume_id: VolumeUuid, pub region_id: Uuid, pub region_addr: SocketAddrV6, } @@ -2670,7 +2685,7 @@ pub struct VolumeReplacementParams { // parameters #[derive(Debug, Clone, Copy)] -pub struct VolumeWithTarget(pub Uuid); +pub struct VolumeWithTarget(pub VolumeUuid); #[derive(Debug, Clone, Copy)] pub struct ExistingTarget(pub SocketAddrV6); @@ -2679,7 +2694,7 @@ pub struct ExistingTarget(pub SocketAddrV6); pub struct ReplacementTarget(pub SocketAddrV6); #[derive(Debug, Clone, Copy)] -pub struct VolumeToDelete(pub Uuid); +pub struct VolumeToDelete(pub VolumeUuid); // The result type returned from both `volume_replace_region` and // `volume_replace_snapshot` @@ -2799,7 +2814,7 @@ impl DataStore { // Grab the old volume first let maybe_old_volume = { volume_dsl::volume - .filter(volume_dsl::id.eq(existing.volume_id)) + .filter(volume_dsl::id.eq(to_db_typed_uuid(existing.volume_id))) .select(Volume::as_select()) .first_async::(conn) .await @@ -2926,7 +2941,10 @@ impl DataStore { // Set the existing region's volume id to the replacement's volume id diesel::update(region_dsl::region) .filter(region_dsl::id.eq(existing.region_id)) - .set(region_dsl::volume_id.eq(replacement.volume_id)) + .set( + region_dsl::volume_id + .eq(to_db_typed_uuid(replacement.volume_id)), + ) .execute_async(conn) .await .map_err(|e| { @@ -2941,7 +2959,7 @@ impl DataStore { // Set the replacement region's volume id to the existing's volume id diesel::update(region_dsl::region) .filter(region_dsl::id.eq(replacement.region_id)) - .set(region_dsl::volume_id.eq(existing.volume_id)) + .set(region_dsl::volume_id.eq(to_db_typed_uuid(existing.volume_id))) .execute_async(conn) .await .map_err(|e| { @@ -2975,7 +2993,7 @@ impl DataStore { // Update the existing volume's data diesel::update(volume_dsl::volume) - .filter(volume_dsl::id.eq(existing.volume_id)) + .filter(volume_dsl::id.eq(to_db_typed_uuid(existing.volume_id))) .set(volume_dsl::data.eq(new_volume_data)) .execute_async(conn) .await @@ -3053,7 +3071,7 @@ impl DataStore { // Grab the old volume first let maybe_old_volume = { volume_dsl::volume - .filter(volume_dsl::id.eq(volume_id.0)) + .filter(volume_dsl::id.eq(to_db_typed_uuid(volume_id.0))) .select(Volume::as_select()) .first_async::(conn) .await @@ -3153,7 +3171,7 @@ impl DataStore { // Update the existing volume's data diesel::update(volume_dsl::volume) - .filter(volume_dsl::id.eq(volume_id.0)) + .filter(volume_dsl::id.eq(to_db_typed_uuid(volume_id.0))) .set(volume_dsl::data.eq(new_volume_data)) .execute_async(conn) .await @@ -3170,7 +3188,7 @@ impl DataStore { // don't matter, just that it gets fed into the volume_delete machinery // later. let vcr = VolumeConstructionRequest::Volume { - id: volume_to_delete_id.0, + id: *volume_to_delete_id.0.as_untyped_uuid(), block_size: 512, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 512, @@ -3178,7 +3196,7 @@ impl DataStore { extent_count: 1, gen: 1, opts: sled_agent_client::types::CrucibleOpts { - id: volume_to_delete_id.0, + id: *volume_to_delete_id.0.as_untyped_uuid(), target: vec![existing.0.to_string()], lossy: false, flush_timeout: None, @@ -3198,7 +3216,7 @@ impl DataStore { // Update the volume to delete data let num_updated = diesel::update(volume_dsl::volume) - .filter(volume_dsl::id.eq(volume_to_delete_id.0)) + .filter(volume_dsl::id.eq(to_db_typed_uuid(volume_to_delete_id.0))) .filter(volume_dsl::time_deleted.is_null()) .set(volume_dsl::data.eq(volume_data)) .execute_async(conn) @@ -3282,7 +3300,7 @@ impl DataStore { })? // TODO be smart enough to .filter the above query .into_iter() - .filter(|record| record.volume_id == volume_to_delete_id.0) + .filter(|record| record.volume_id == volume_to_delete_id.0.into()) .count(); // The "replacement" target moved into the volume @@ -3778,7 +3796,8 @@ impl DataStore { public_error_from_diesel(e, ErrorHandler::Server) })?; - paginator = p.found_batch(&haystack, &|r| r.id()); + paginator = + p.found_batch(&haystack, &|r| *r.id().as_untyped_uuid()); for volume in haystack { let vcr: VolumeConstructionRequest = @@ -3834,7 +3853,8 @@ impl DataStore { .get_results_async::(conn) .await?; - paginator = p.found_batch(&haystack, &|v| v.id()); + paginator = + p.found_batch(&haystack, &|v| *v.id().as_untyped_uuid()); for volume in haystack { Self::validate_volume_has_all_resources(&conn, volume).await?; @@ -4006,6 +4026,7 @@ mod tests { use nexus_types::external_api::params::DiskSource; use omicron_common::api::external::ByteCount; use omicron_test_utils::dev; + use omicron_uuid_kinds::VolumeUuid; use sled_agent_client::types::CrucibleOpts; // Assert that Nexus will not fail to deserialize an old version of @@ -4020,12 +4041,12 @@ mod tests { // Start with a fake volume, doesn't matter if it's empty - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let _volume = datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: None, @@ -4077,7 +4098,7 @@ mod tests { "#; diesel::update(dsl::volume) - .filter(dsl::id.eq(volume_id)) + .filter(dsl::id.eq(to_db_typed_uuid(volume_id))) .set(( dsl::resources_to_clean_up.eq(resources_to_clean_up), dsl::time_deleted.eq(Utc::now()), @@ -4129,8 +4150,8 @@ mod tests { ) .await; - let volume_id = Uuid::new_v4(); - let volume_to_delete_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); + let volume_to_delete_id = VolumeUuid::new_v4(); let datasets_and_regions = datastore .disk_region_allocate( @@ -4198,7 +4219,7 @@ mod tests { .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 512, @@ -4206,7 +4227,7 @@ mod tests { extent_count: 10, gen: 1, opts: CrucibleOpts { - id: volume_id, + id: *volume_id.as_untyped_uuid(), target: vec![ // target to replace region_addresses[0].clone(), @@ -4261,7 +4282,7 @@ mod tests { assert_eq!( &vcr, &VolumeConstructionRequest::Volume { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 512, @@ -4269,7 +4290,7 @@ mod tests { extent_count: 10, gen: 2, // generation number bumped opts: CrucibleOpts { - id: volume_id, + id: *volume_id.as_untyped_uuid(), target: vec![ replacement_region_addr.to_string(), // replaced region_addresses[1].clone(), @@ -4319,7 +4340,7 @@ mod tests { assert_eq!( &vcr, &VolumeConstructionRequest::Volume { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 512, @@ -4327,7 +4348,7 @@ mod tests { extent_count: 10, gen: 3, // generation number bumped opts: CrucibleOpts { - id: volume_id, + id: *volume_id.as_untyped_uuid(), target: vec![ region_addresses[0].clone(), // back to what it was region_addresses[1].clone(), @@ -4367,8 +4388,8 @@ mod tests { ) .await; - let volume_id = Uuid::new_v4(); - let volume_to_delete_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); + let volume_to_delete_id = VolumeUuid::new_v4(); let datasets_and_regions = datastore .disk_region_allocate( @@ -4483,7 +4504,7 @@ mod tests { .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 512, @@ -4491,7 +4512,7 @@ mod tests { extent_count: 10, gen: 1, opts: CrucibleOpts { - id: volume_id, + id: *volume_id.as_untyped_uuid(), target: vec![ region_addresses[0].clone(), region_addresses[1].clone(), @@ -4551,14 +4572,14 @@ mod tests { .unwrap(); assert_eq!(usage.len(), 1); - assert_eq!(usage[0].volume_id, volume_id); + assert_eq!(usage[0].volume_id(), volume_id); } datastore .volume_create(nexus_db_model::Volume::new( volume_to_delete_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_to_delete_id, + id: *volume_to_delete_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: None, @@ -4606,7 +4627,7 @@ mod tests { assert_eq!( &vcr, &VolumeConstructionRequest::Volume { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 512, @@ -4614,7 +4635,7 @@ mod tests { extent_count: 10, gen: 1, opts: CrucibleOpts { - id: volume_id, + id: *volume_id.as_untyped_uuid(), target: vec![ region_addresses[0].clone(), region_addresses[1].clone(), @@ -4671,7 +4692,7 @@ mod tests { assert_eq!( &vcr, &VolumeConstructionRequest::Volume { - id: volume_to_delete_id, + id: *volume_to_delete_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 512, @@ -4679,7 +4700,7 @@ mod tests { extent_count: 1, gen: 1, opts: CrucibleOpts { - id: volume_to_delete_id, + id: *volume_to_delete_id.as_untyped_uuid(), target: vec![ // replaced target stashed here address_1.clone(), @@ -4716,11 +4737,11 @@ mod tests { match i { 0 => { - assert_eq!(usage[0].volume_id, volume_to_delete_id); + assert_eq!(usage[0].volume_id(), volume_to_delete_id); } 1 | 2 => { - assert_eq!(usage[0].volume_id, volume_id); + assert_eq!(usage[0].volume_id(), volume_id); } _ => panic!("out of range"), @@ -4737,7 +4758,7 @@ mod tests { .unwrap(); assert_eq!(usage.len(), 1); - assert_eq!(usage[0].volume_id, volume_id); + assert_eq!(usage[0].volume_id(), volume_id); // Now undo the replacement. Note volume ID is not swapped. @@ -4762,7 +4783,7 @@ mod tests { assert_eq!( &vcr, &VolumeConstructionRequest::Volume { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 512, @@ -4770,7 +4791,7 @@ mod tests { extent_count: 10, gen: 1, opts: CrucibleOpts { - id: volume_id, + id: *volume_id.as_untyped_uuid(), target: vec![ region_addresses[0].clone(), region_addresses[1].clone(), @@ -4793,7 +4814,7 @@ mod tests { extent_count: 10, gen: 1, opts: CrucibleOpts { - id: rop_id, + id: *rop_id.as_untyped_uuid(), target: vec![ // back to what it was address_1, address_2, address_3, @@ -4825,7 +4846,7 @@ mod tests { assert_eq!( &vcr, &VolumeConstructionRequest::Volume { - id: volume_to_delete_id, + id: *volume_to_delete_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 512, @@ -4833,7 +4854,7 @@ mod tests { extent_count: 1, gen: 1, opts: CrucibleOpts { - id: volume_to_delete_id, + id: *volume_to_delete_id.as_untyped_uuid(), target: vec![ // replacement stashed here replacement_region_addr.to_string(), @@ -4867,7 +4888,7 @@ mod tests { .unwrap(); assert_eq!(usage.len(), 1); - assert_eq!(usage[0].volume_id, volume_id); + assert_eq!(usage[0].volume_id(), volume_id); } let usage = datastore @@ -4880,7 +4901,7 @@ mod tests { .unwrap(); assert_eq!(usage.len(), 1); - assert_eq!(usage[0].volume_id, volume_to_delete_id); + assert_eq!(usage[0].volume_id(), volume_to_delete_id); db.terminate().await; logctx.cleanup_successful(); @@ -4894,7 +4915,7 @@ mod tests { let db = TestDatabase::new_with_datastore(&log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); // need to add region snapshot objects to satisfy volume create // transaction's search for resources @@ -4937,7 +4958,7 @@ mod tests { .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: Some(Box::new( @@ -5519,7 +5540,7 @@ mod tests { ) .await; - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); // Assert that allocating regions without creating the volume does not // cause them to be returned as "deleted" regions, as this can cause diff --git a/nexus/db-queries/src/db/datastore/volume_repair.rs b/nexus/db-queries/src/db/datastore/volume_repair.rs index 598d9d77a2..f9cc2de390 100644 --- a/nexus/db-queries/src/db/datastore/volume_repair.rs +++ b/nexus/db-queries/src/db/datastore/volume_repair.rs @@ -10,6 +10,7 @@ use crate::db::datastore::OpContext; use crate::db::datastore::RunnableQuery; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; +use crate::db::model::to_db_typed_uuid; use crate::db::model::VolumeRepair; use crate::db::DbConnection; use crate::transaction_retry::OptionalError; @@ -18,6 +19,7 @@ use diesel::prelude::*; use diesel::result::DatabaseErrorKind; use diesel::result::Error as DieselError; use omicron_common::api::external::Error; +use omicron_uuid_kinds::VolumeUuid; use uuid::Uuid; impl DataStore { @@ -36,7 +38,7 @@ impl DataStore { pub(super) async fn volume_repair_insert_in_txn( conn: &async_bb8_diesel::Connection, err: OptionalError, - volume_id: Uuid, + volume_id: VolumeUuid, repair_id: Uuid, ) -> Result<(), diesel::result::Error> { use db::schema::volume_repair::dsl; @@ -48,7 +50,7 @@ impl DataStore { // first time, but have been deleted now. let maybe_lock = dsl::volume_repair .filter(dsl::repair_id.eq(repair_id)) - .filter(dsl::volume_id.eq(volume_id)) + .filter(dsl::volume_id.eq(to_db_typed_uuid(volume_id))) .first_async::(conn) .await .optional()?; @@ -71,7 +73,7 @@ impl DataStore { // soft-deleted volumes. match diesel::insert_into(dsl::volume_repair) - .values(VolumeRepair { volume_id, repair_id }) + .values(VolumeRepair { volume_id: volume_id.into(), repair_id }) .execute_async(conn) .await { @@ -95,7 +97,7 @@ impl DataStore { pub async fn volume_repair_lock( &self, opctx: &OpContext, - volume_id: Uuid, + volume_id: VolumeUuid, repair_id: Uuid, ) -> Result<(), Error> { let conn = self.pool_connection_authorized(opctx).await?; @@ -122,14 +124,14 @@ impl DataStore { } pub(super) fn volume_repair_delete_query( - volume_id: Uuid, + volume_id: VolumeUuid, repair_id: Uuid, ) -> impl RunnableQuery { use db::schema::volume_repair::dsl; diesel::delete( dsl::volume_repair - .filter(dsl::volume_id.eq(volume_id)) + .filter(dsl::volume_id.eq(to_db_typed_uuid(volume_id))) .filter(dsl::repair_id.eq(repair_id)), ) } @@ -137,7 +139,7 @@ impl DataStore { pub async fn volume_repair_unlock( &self, opctx: &OpContext, - volume_id: Uuid, + volume_id: VolumeUuid, repair_id: Uuid, ) -> Result<(), Error> { let conn = self.pool_connection_authorized(opctx).await?; @@ -150,14 +152,14 @@ impl DataStore { pub async fn volume_repair_get( conn: &async_bb8_diesel::Connection, - volume_id: Uuid, + volume_id: VolumeUuid, repair_id: Uuid, ) -> Result { use db::schema::volume_repair::dsl; dsl::volume_repair .filter(dsl::repair_id.eq(repair_id)) - .filter(dsl::volume_id.eq(volume_id)) + .filter(dsl::volume_id.eq(to_db_typed_uuid(volume_id))) .first_async::(conn) .await } @@ -169,6 +171,7 @@ mod test { use crate::db::pub_test_utils::TestDatabase; use omicron_test_utils::dev; + use omicron_uuid_kinds::VolumeUuid; use sled_agent_client::types::VolumeConstructionRequest; #[tokio::test] @@ -179,13 +182,13 @@ mod test { let lock_1 = Uuid::new_v4(); let lock_2 = Uuid::new_v4(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_id, + id: Uuid::new_v4(), block_size: 512, sub_volumes: vec![], read_only_parent: None, @@ -218,7 +221,7 @@ mod test { let (opctx, datastore) = (db.opctx(), db.datastore()); let lock_1 = Uuid::new_v4(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_repair_lock(&opctx, volume_id, lock_1) @@ -236,13 +239,13 @@ mod test { let (opctx, datastore) = (db.opctx(), db.datastore()); let lock_id = Uuid::new_v4(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_id, + id: Uuid::new_v4(), block_size: 512, sub_volumes: vec![], read_only_parent: None, diff --git a/nexus/db-queries/src/db/queries/region_allocation.rs b/nexus/db-queries/src/db/queries/region_allocation.rs index a9130d87f7..ae595cbc9b 100644 --- a/nexus/db-queries/src/db/queries/region_allocation.rs +++ b/nexus/db-queries/src/db/queries/region_allocation.rs @@ -15,6 +15,8 @@ use diesel::result::Error as DieselError; use diesel::sql_types; use nexus_config::RegionAllocationStrategy; use omicron_common::api::external; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::VolumeUuid; type AllColumnsOfRegion = AllColumnsOf; type AllColumnsOfDataset = AllColumnsOf; @@ -85,7 +87,7 @@ pub struct RegionParameters { /// layers of the hierarchy). If that volume has region snapshots in the region /// set, a `snapshot_id` should be supplied matching those entries. pub fn allocation_query( - volume_id: uuid::Uuid, + volume_id: VolumeUuid, snapshot_id: Option, params: RegionParameters, allocation_strategy: &RegionAllocationStrategy, @@ -124,7 +126,7 @@ pub fn allocation_query( old_regions AS ( SELECT ").sql(AllColumnsOfRegion::with_prefix("region")).sql(" FROM region WHERE (region.volume_id = ").param().sql(")),") - .bind::(volume_id) + .bind::(*volume_id.as_untyped_uuid()) // Calculates the old size being used by zpools under consideration as targets for region // allocation. @@ -265,7 +267,7 @@ pub fn allocation_query( SELECT COUNT(*) FROM old_regions )) ),") - .bind::(volume_id) + .bind::(*volume_id.as_untyped_uuid()) .bind::(params.block_size as i64) .bind::(params.blocks_per_extent as i64) .bind::(params.extent_count as i64) @@ -418,7 +420,7 @@ mod test { // how the output SQL has been altered. #[tokio::test] async fn expectorate_query() { - let volume_id = Uuid::nil(); + let volume_id = VolumeUuid::nil(); let params = RegionParameters { block_size: 512, blocks_per_extent: 4, @@ -509,7 +511,7 @@ mod test { let pool = db.pool(); let conn = pool.claim().await.unwrap(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let params = RegionParameters { block_size: 512, blocks_per_extent: 4, diff --git a/nexus/internal-api/src/lib.rs b/nexus/internal-api/src/lib.rs index 2507b6a688..9149469bb4 100644 --- a/nexus/internal-api/src/lib.rs +++ b/nexus/internal-api/src/lib.rs @@ -41,7 +41,7 @@ use omicron_common::{ }; use omicron_uuid_kinds::{ DemoSagaUuid, DownstairsKind, PropolisUuid, SledUuid, TypedUuid, - UpstairsKind, UpstairsRepairKind, + UpstairsKind, UpstairsRepairKind, VolumeUuid, }; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -567,7 +567,7 @@ pub struct DiskPathParam { /// Path parameters for Volume requests (internal API) #[derive(Deserialize, JsonSchema)] pub struct VolumePathParam { - pub volume_id: Uuid, + pub volume_id: VolumeUuid, } /// Path parameters for Rack requests. diff --git a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs index 137e378410..d2f8592e30 100644 --- a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs +++ b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs @@ -166,6 +166,7 @@ mod test { use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; + use omicron_uuid_kinds::VolumeUuid; use omicron_uuid_kinds::ZpoolUuid; use std::collections::BTreeMap; use std::net::SocketAddr; @@ -453,7 +454,7 @@ mod test { // There isn't a great API to insert regions (we normally allocate!) // so insert the record manually here. let region = { - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); Region::new( dataset.id(), volume_id, diff --git a/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs b/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs index 551372e67e..a82a7ea57b 100644 --- a/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs +++ b/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs @@ -187,7 +187,7 @@ mod tests { use nexus_test_utils_macros::nexus_test; use omicron_common::api::internal::shared::DatasetKind; use omicron_uuid_kinds::{ - DatasetUuid, PhysicalDiskUuid, RegionUuid, SledUuid, + DatasetUuid, PhysicalDiskUuid, RegionUuid, SledUuid, VolumeUuid, }; use std::str::FromStr; use uuid::Uuid; @@ -249,7 +249,7 @@ mod tests { // There isn't a great API to insert regions (we normally allocate!) // so insert the record manually here. let region = { - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); Region::new( dataset.id(), volume_id, diff --git a/nexus/src/app/background/tasks/region_replacement.rs b/nexus/src/app/background/tasks/region_replacement.rs index caa262bc8c..08fbb0ede7 100644 --- a/nexus/src/app/background/tasks/region_replacement.rs +++ b/nexus/src/app/background/tasks/region_replacement.rs @@ -197,7 +197,7 @@ impl BackgroundTask for RegionReplacementDetector { let volume_deleted = match self .datastore - .volume_deleted(request.volume_id) + .volume_deleted(request.volume_id()) .await { Ok(volume_deleted) => volume_deleted, @@ -205,7 +205,7 @@ impl BackgroundTask for RegionReplacementDetector { Err(e) => { let s = format!( "error checking if volume id {} was deleted: {e}", - request.volume_id, + request.volume_id(), ); error!(&log, "{s}"); @@ -226,7 +226,7 @@ impl BackgroundTask for RegionReplacementDetector { &log, "request {} volume {} was soft or hard deleted!", request_id, - request.volume_id, + request.volume_id(), ); let result = self @@ -306,6 +306,7 @@ mod test { use nexus_db_model::RegionReplacement; use nexus_db_model::Volume; use nexus_test_utils_macros::nexus_test; + use omicron_uuid_kinds::VolumeUuid; use sled_agent_client::types::CrucibleOpts; use sled_agent_client::types::VolumeConstructionRequest; use uuid::Uuid; @@ -333,13 +334,13 @@ mod test { assert_eq!(result, json!(RegionReplacementStatus::default())); // Add a region replacement request for a fake region - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_id, + id: Uuid::new_v4(), block_size: 512, sub_volumes: vec![], read_only_parent: None, @@ -358,7 +359,7 @@ mod test { .unwrap(); let volume_construction_request = VolumeConstructionRequest::Volume { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size: 0, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 0, @@ -366,7 +367,7 @@ mod test { extent_count: 0, gen: 0, opts: CrucibleOpts { - id: volume_id, + id: Uuid::new_v4(), target: vec![ // if you put something here, you'll need a synthetic // dataset record diff --git a/nexus/src/app/background/tasks/region_replacement_driver.rs b/nexus/src/app/background/tasks/region_replacement_driver.rs index 6cc28f9dfd..80f33942bc 100644 --- a/nexus/src/app/background/tasks/region_replacement_driver.rs +++ b/nexus/src/app/background/tasks/region_replacement_driver.rs @@ -174,7 +174,7 @@ impl RegionReplacementDriver { }; for request in done_replacements { - let Some(old_region_volume_id) = request.old_region_volume_id + let Some(old_region_volume_id) = request.old_region_volume_id() else { // This state is illegal! let s = format!( @@ -252,12 +252,12 @@ mod test { use nexus_db_model::UpstairsRepairType; use nexus_test_utils_macros::nexus_test; use omicron_uuid_kinds::DatasetUuid; - use omicron_uuid_kinds::DownstairsRegionKind; + use omicron_uuid_kinds::DownstairsRegionUuid; use omicron_uuid_kinds::GenericUuid; - use omicron_uuid_kinds::TypedUuid; - use omicron_uuid_kinds::UpstairsKind; - use omicron_uuid_kinds::UpstairsRepairKind; - use omicron_uuid_kinds::UpstairsSessionKind; + use omicron_uuid_kinds::UpstairsRepairUuid; + use omicron_uuid_kinds::UpstairsSessionUuid; + use omicron_uuid_kinds::UpstairsUuid; + use omicron_uuid_kinds::VolumeUuid; use sled_agent_client::types::VolumeConstructionRequest; use uuid::Uuid; @@ -287,13 +287,13 @@ mod test { // state Running. let region_id = Uuid::new_v4(); let new_region_id = Uuid::new_v4(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_id, + id: Uuid::new_v4(), block_size: 512, sub_volumes: vec![], read_only_parent: None, @@ -354,7 +354,7 @@ mod test { // Insert some region records let old_region = { let dataset_id = DatasetUuid::new_v4(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); Region::new( dataset_id, volume_id, @@ -368,7 +368,7 @@ mod test { let new_region = { let dataset_id = DatasetUuid::new_v4(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); Region::new( dataset_id, volume_id, @@ -401,7 +401,7 @@ mod test { .volume_create(nexus_db_model::Volume::new( old_region.volume_id(), serde_json::to_string(&VolumeConstructionRequest::Volume { - id: old_region.volume_id(), + id: Uuid::new_v4(), block_size: 512, sub_volumes: vec![], read_only_parent: None, @@ -419,7 +419,7 @@ mod test { RegionReplacement::new(old_region.id(), old_region.volume_id()); request.replacement_state = RegionReplacementState::ReplacementDone; request.new_region_id = Some(new_region.id()); - request.old_region_volume_id = Some(Uuid::new_v4()); + request.old_region_volume_id = Some(VolumeUuid::new_v4().into()); request }; @@ -467,7 +467,7 @@ mod test { // Insert some region records let old_region = { let dataset_id = DatasetUuid::new_v4(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); Region::new( dataset_id, volume_id, @@ -481,7 +481,7 @@ mod test { let new_region = { let dataset_id = DatasetUuid::new_v4(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); Region::new( dataset_id, volume_id, @@ -514,7 +514,7 @@ mod test { .volume_create(nexus_db_model::Volume::new( old_region.volume_id(), serde_json::to_string(&VolumeConstructionRequest::Volume { - id: old_region.volume_id(), + id: Uuid::new_v4(), block_size: 512, sub_volumes: vec![], read_only_parent: None, @@ -531,7 +531,7 @@ mod test { RegionReplacement::new(old_region.id(), old_region.volume_id()); request.replacement_state = RegionReplacementState::Running; request.new_region_id = Some(new_region.id()); - request.old_region_volume_id = Some(Uuid::new_v4()); + request.old_region_volume_id = Some(VolumeUuid::new_v4().into()); request }; @@ -565,17 +565,11 @@ mod test { &opctx, UpstairsRepairNotification::new( Utc::now(), // client time - TypedUuid::::from_untyped_uuid( - Uuid::new_v4(), - ), + UpstairsRepairUuid::new_v4(), UpstairsRepairType::Live, - TypedUuid::::from_untyped_uuid( - Uuid::new_v4(), - ), - TypedUuid::::from_untyped_uuid( - Uuid::new_v4(), - ), - TypedUuid::::from_untyped_uuid( + UpstairsUuid::new_v4(), + UpstairsSessionUuid::new_v4(), + DownstairsRegionUuid::from_untyped_uuid( new_region.id(), ), // downstairs that was repaired "[fd00:1122:3344:101::2]:12345".parse().unwrap(), @@ -630,7 +624,7 @@ mod test { // Insert some region records let old_region = { let dataset_id = DatasetUuid::new_v4(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); Region::new( dataset_id, volume_id, @@ -644,7 +638,7 @@ mod test { let new_region = { let dataset_id = DatasetUuid::new_v4(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); Region::new( dataset_id, volume_id, @@ -677,7 +671,7 @@ mod test { .volume_create(nexus_db_model::Volume::new( old_region.volume_id(), serde_json::to_string(&VolumeConstructionRequest::Volume { - id: old_region.volume_id(), + id: Uuid::new_v4(), block_size: 512, sub_volumes: vec![], read_only_parent: None, @@ -727,17 +721,11 @@ mod test { &opctx, UpstairsRepairNotification::new( Utc::now(), // client time - TypedUuid::::from_untyped_uuid( - Uuid::new_v4(), - ), + UpstairsRepairUuid::new_v4(), UpstairsRepairType::Live, - TypedUuid::::from_untyped_uuid( - Uuid::new_v4(), - ), - TypedUuid::::from_untyped_uuid( - Uuid::new_v4(), - ), - TypedUuid::::from_untyped_uuid( + UpstairsUuid::new_v4(), + UpstairsSessionUuid::new_v4(), + DownstairsRegionUuid::from_untyped_uuid( new_region.id(), ), // downstairs that was repaired "[fd00:1122:3344:101::2]:12345".parse().unwrap(), diff --git a/nexus/src/app/background/tasks/region_snapshot_replacement_finish.rs b/nexus/src/app/background/tasks/region_snapshot_replacement_finish.rs index 61a84c579d..20804c944f 100644 --- a/nexus/src/app/background/tasks/region_snapshot_replacement_finish.rs +++ b/nexus/src/app/background/tasks/region_snapshot_replacement_finish.rs @@ -196,8 +196,11 @@ mod test { use nexus_db_model::RegionSnapshotReplacementStep; use nexus_db_model::RegionSnapshotReplacementStepState; use nexus_db_queries::db::datastore::region_snapshot_replacement; + use nexus_db_queries::db::datastore::NewRegionVolumeId; + use nexus_db_queries::db::datastore::OldSnapshotVolumeId; use nexus_test_utils_macros::nexus_test; use omicron_uuid_kinds::DatasetUuid; + use omicron_uuid_kinds::VolumeUuid; use sled_agent_client::types::VolumeConstructionRequest; use uuid::Uuid; @@ -239,7 +242,7 @@ mod test { let request_id = request.id; - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -277,8 +280,8 @@ mod test { .unwrap(); let new_region_id = Uuid::new_v4(); - let new_region_volume_id = Uuid::new_v4(); - let old_snapshot_volume_id = Uuid::new_v4(); + let new_region_volume_id = VolumeUuid::new_v4(); + let old_snapshot_volume_id = VolumeUuid::new_v4(); datastore .set_region_snapshot_replacement_replacement_done( @@ -286,8 +289,8 @@ mod test { request_id, operating_saga_id, new_region_id, - new_region_volume_id, - old_snapshot_volume_id, + NewRegionVolumeId(new_region_volume_id), + OldSnapshotVolumeId(old_snapshot_volume_id), ) .await .unwrap(); @@ -314,7 +317,7 @@ mod test { let operating_saga_id = Uuid::new_v4(); - let step_volume_id = Uuid::new_v4(); + let step_volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( step_volume_id, @@ -335,7 +338,7 @@ mod test { step_1.operating_saga_id = Some(operating_saga_id); let step_1_id = step_1.id; - let step_volume_id = Uuid::new_v4(); + let step_volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( step_volume_id, diff --git a/nexus/src/app/background/tasks/region_snapshot_replacement_garbage_collect.rs b/nexus/src/app/background/tasks/region_snapshot_replacement_garbage_collect.rs index 57bbf3741c..5109ef6b8f 100644 --- a/nexus/src/app/background/tasks/region_snapshot_replacement_garbage_collect.rs +++ b/nexus/src/app/background/tasks/region_snapshot_replacement_garbage_collect.rs @@ -15,7 +15,7 @@ use futures::FutureExt; use nexus_db_model::RegionSnapshotReplacement; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; -use nexus_types::internal_api::background::RegionSnapshotReplacementGarbageCollectStatus; +use nexus_types::internal_api::background::*; use serde_json::json; use std::sync::Arc; @@ -34,7 +34,7 @@ impl RegionSnapshotReplacementGarbageCollect { opctx: &OpContext, request: RegionSnapshotReplacement, ) -> Result<(), omicron_common::api::external::Error> { - let Some(old_snapshot_volume_id) = request.old_snapshot_volume_id + let Some(old_snapshot_volume_id) = request.old_snapshot_volume_id() else { // This state is illegal! let s = format!( @@ -155,6 +155,7 @@ mod test { use nexus_db_model::RegionSnapshotReplacementState; use nexus_test_utils_macros::nexus_test; use omicron_uuid_kinds::DatasetUuid; + use omicron_uuid_kinds::VolumeUuid; use sled_agent_client::types::VolumeConstructionRequest; use uuid::Uuid; @@ -196,11 +197,11 @@ mod test { ); request.replacement_state = RegionSnapshotReplacementState::ReplacementDone; - request.old_snapshot_volume_id = Some(Uuid::new_v4()); + request.old_snapshot_volume_id = Some(VolumeUuid::new_v4().into()); let request_1_id = request.id; - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -230,11 +231,11 @@ mod test { ); request.replacement_state = RegionSnapshotReplacementState::ReplacementDone; - request.old_snapshot_volume_id = Some(Uuid::new_v4()); + request.old_snapshot_volume_id = Some(VolumeUuid::new_v4().into()); let request_2_id = request.id; - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( diff --git a/nexus/src/app/background/tasks/region_snapshot_replacement_start.rs b/nexus/src/app/background/tasks/region_snapshot_replacement_start.rs index f2b82a3943..f0fabe1a53 100644 --- a/nexus/src/app/background/tasks/region_snapshot_replacement_start.rs +++ b/nexus/src/app/background/tasks/region_snapshot_replacement_start.rs @@ -355,6 +355,7 @@ mod test { use omicron_common::api::external; use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; + use omicron_uuid_kinds::VolumeUuid; use sled_agent_client::types::CrucibleOpts; use sled_agent_client::types::VolumeConstructionRequest; use std::collections::BTreeMap; @@ -408,7 +409,7 @@ mod test { let request_id = request.id; - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -513,13 +514,13 @@ mod test { .await .unwrap(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_id, + id: Uuid::new_v4(), block_size: 512, sub_volumes: vec![], read_only_parent: None, @@ -548,8 +549,9 @@ mod test { project_id, disk_id: Uuid::new_v4(), - volume_id, - destination_volume_id: Uuid::new_v4(), + + volume_id: volume_id.into(), + destination_volume_id: VolumeUuid::new_v4().into(), gen: Generation::new(), state: SnapshotState::Creating, @@ -684,7 +686,7 @@ mod test { let request_id = request.id; - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -736,7 +738,7 @@ mod test { .unwrap(); assert!(!records.is_empty()); - assert_eq!(records[0].volume_id, volume_id); + assert_eq!(records[0].volume_id, volume_id.into()); datastore .insert_region_snapshot_replacement_request_with_volume_id( diff --git a/nexus/src/app/background/tasks/region_snapshot_replacement_step.rs b/nexus/src/app/background/tasks/region_snapshot_replacement_step.rs index f481126312..b538dda182 100644 --- a/nexus/src/app/background/tasks/region_snapshot_replacement_step.rs +++ b/nexus/src/app/background/tasks/region_snapshot_replacement_step.rs @@ -72,7 +72,7 @@ impl RegionSnapshotReplacementFindAffected { opctx: &OpContext, request: RegionSnapshotReplacementStep, ) -> Result<(), Error> { - let Some(old_snapshot_volume_id) = request.old_snapshot_volume_id + let Some(old_snapshot_volume_id) = request.old_snapshot_volume_id() else { // This state is illegal! let s = format!( @@ -140,8 +140,8 @@ impl RegionSnapshotReplacementFindAffected { info!( &log, "{s}"; - "request.volume_id" => %request.volume_id, - "request.old_snapshot_volume_id" => ?request.old_snapshot_volume_id, + "request.volume_id" => %request.volume_id(), + "request.old_snapshot_volume_id" => ?request.old_snapshot_volume_id(), ); status.step_garbage_collect_invoked_ok.push(s); } @@ -154,8 +154,8 @@ impl RegionSnapshotReplacementFindAffected { error!( &log, "{s}"; - "request.volume_id" => %request.volume_id, - "request.old_snapshot_volume_id" => ?request.old_snapshot_volume_id, + "request.volume_id" => %request.volume_id(), + "request.old_snapshot_volume_id" => ?request.old_snapshot_volume_id(), ); status.errors.push(s); } @@ -428,22 +428,25 @@ impl RegionSnapshotReplacementFindAffected { // saga if it was deleted: the saga will do the right thing if it is // deleted, but this avoids the overhead of starting it. - let volume_deleted = - match self.datastore.volume_deleted(request.volume_id).await { - Ok(volume_deleted) => volume_deleted, + let volume_deleted = match self + .datastore + .volume_deleted(request.volume_id()) + .await + { + Ok(volume_deleted) => volume_deleted, - Err(e) => { - let s = format!( - "error checking if volume id {} was \ + Err(e) => { + let s = format!( + "error checking if volume id {} was \ deleted: {e}", - request.volume_id, - ); - error!(&log, "{s}"); + request.volume_id, + ); + error!(&log, "{s}"); - status.errors.push(s); - continue; - } - }; + status.errors.push(s); + continue; + } + }; if volume_deleted { // Volume was soft or hard deleted, so proceed with clean up, @@ -502,7 +505,7 @@ impl RegionSnapshotReplacementFindAffected { &log, "{s}"; "request.request_id" => %request.request_id, - "request.volume_id" => %request.volume_id, + "request.volume_id" => %request.volume_id(), ); status.step_invoked_ok.push(s); } @@ -517,7 +520,7 @@ impl RegionSnapshotReplacementFindAffected { &log, "{s}"; "request.request_id" => %request.request_id, - "request.volume_id" => %request.volume_id, + "request.volume_id" => %request.volume_id(), ); status.errors.push(s); } @@ -566,6 +569,8 @@ mod test { use nexus_db_model::Volume; use nexus_test_utils_macros::nexus_test; use omicron_uuid_kinds::DatasetUuid; + use omicron_uuid_kinds::GenericUuid; + use omicron_uuid_kinds::VolumeUuid; use sled_agent_client::types::CrucibleOpts; use sled_agent_client::types::VolumeConstructionRequest; use uuid::Uuid; @@ -576,8 +581,8 @@ mod test { async fn add_fake_volume_for_snapshot_addr( datastore: &DataStore, snapshot_addr: String, - ) -> Uuid { - let new_volume_id = Uuid::new_v4(); + ) -> VolumeUuid { + let new_volume_id = VolumeUuid::new_v4(); // need to add region snapshot objects to satisfy volume create // transaction's search for resources @@ -593,7 +598,7 @@ mod test { .unwrap(); let volume_construction_request = VolumeConstructionRequest::Volume { - id: new_volume_id, + id: *new_volume_id.as_untyped_uuid(), block_size: 0, sub_volumes: vec![], read_only_parent: Some(Box::new( @@ -672,7 +677,7 @@ mod test { let request_id = request.id; - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -710,8 +715,8 @@ mod test { .unwrap(); let new_region_id = Uuid::new_v4(); - let new_region_volume_id = Uuid::new_v4(); - let old_snapshot_volume_id = Uuid::new_v4(); + let new_region_volume_id = VolumeUuid::new_v4(); + let old_snapshot_volume_id = VolumeUuid::new_v4(); datastore .set_region_snapshot_replacement_replacement_done( @@ -719,8 +724,8 @@ mod test { request_id, operating_saga_id, new_region_id, - new_region_volume_id, - old_snapshot_volume_id, + NewRegionVolumeId(new_region_volume_id), + OldSnapshotVolumeId(old_snapshot_volume_id), ) .await .unwrap(); @@ -794,12 +799,12 @@ mod test { ); assert!(result.step_invoked_ok.contains(&s)); - if step.volume_id == new_volume_1_id - || step.volume_id == new_volume_2_id + if step.volume_id() == new_volume_1_id + || step.volume_id() == new_volume_2_id { // ok! - } else if step.volume_id == other_volume_1_id - || step.volume_id == other_volume_2_id + } else if step.volume_id() == other_volume_1_id + || step.volume_id() == other_volume_2_id { // error! assert!(false); @@ -844,7 +849,7 @@ mod test { // Now, add some Complete records and make sure the garbage collection // saga is invoked. - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -869,7 +874,8 @@ mod test { record.replacement_state = RegionSnapshotReplacementStepState::Complete; - record.old_snapshot_volume_id = Some(Uuid::new_v4()); + record.old_snapshot_volume_id = + Some(VolumeUuid::new_v4().into()); record }) @@ -878,7 +884,7 @@ mod test { assert!(matches!(result, InsertStepResult::Inserted { .. })); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -903,7 +909,8 @@ mod test { record.replacement_state = RegionSnapshotReplacementStepState::Complete; - record.old_snapshot_volume_id = Some(Uuid::new_v4()); + record.old_snapshot_volume_id = + Some(VolumeUuid::new_v4().into()); record }) diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index adb0010e9a..556ed5f104 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -341,7 +341,7 @@ impl super::Nexus { serialized_authn: authn::saga::Serialized::for_opctx(opctx), project_id: project.id(), disk_id: authz_disk.id(), - volume_id: db_disk.volume_id, + volume_id: db_disk.volume_id(), }; self.sagas .saga_execute::(saga_params) @@ -366,7 +366,8 @@ impl super::Nexus { .fetch() .await?; - self.volume_remove_read_only_parent(&opctx, db_disk.volume_id).await?; + self.volume_remove_read_only_parent(&opctx, db_disk.volume_id()) + .await?; Ok(()) } diff --git a/nexus/src/app/image.rs b/nexus/src/app/image.rs index a3fa722d36..347ff949af 100644 --- a/nexus/src/app/image.rs +++ b/nexus/src/app/image.rs @@ -23,6 +23,7 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::NameOrId; use omicron_common::api::external::UpdateResult; +use omicron_uuid_kinds::VolumeUuid; use std::sync::Arc; use uuid::Uuid; @@ -122,7 +123,7 @@ impl super::Nexus { let image_volume = self .db_datastore .volume_checkout_randomize_ids( - db_snapshot.volume_id, + db_snapshot.volume_id(), db::datastore::VolumeCheckoutReason::ReadOnlyCopy, ) .await?; @@ -134,7 +135,7 @@ impl super::Nexus { ), silo_id: authz_silo.id(), project_id: maybe_authz_project.clone().map(|p| p.id()), - volume_id: image_volume.id(), + volume_id: image_volume.id().into(), url: None, os: params.os.clone(), version: params.version.clone(), @@ -179,7 +180,7 @@ impl super::Nexus { })?; let new_image_volume = - db::model::Volume::new(Uuid::new_v4(), volume_data); + db::model::Volume::new(VolumeUuid::new_v4(), volume_data); let volume = self.db_datastore.volume_create(new_image_volume).await?; @@ -190,7 +191,7 @@ impl super::Nexus { ), silo_id: authz_silo.id(), project_id: maybe_authz_project.clone().map(|p| p.id()), - volume_id: volume.id(), + volume_id: volume.id().into(), url: None, os: "alpine".into(), version: "propolis-blob".into(), diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index e916974332..7ba41777b3 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -1079,7 +1079,7 @@ impl super::Nexus { let volume = self .db_datastore .volume_checkout( - disk.volume_id, + disk.volume_id(), match operation { InstanceRegisterReason::Start { vmm_id } => db::datastore::VolumeCheckoutReason::InstanceStart { vmm_id }, diff --git a/nexus/src/app/sagas/common_storage.rs b/nexus/src/app/sagas/common_storage.rs index 59c76e30e7..3100d9e8f8 100644 --- a/nexus/src/app/sagas/common_storage.rs +++ b/nexus/src/app/sagas/common_storage.rs @@ -90,7 +90,7 @@ pub(crate) async fn call_pantry_attach_for_disk( let disk_volume = nexus .datastore() .volume_checkout( - disk.volume_id, + disk.volume_id(), db::datastore::VolumeCheckoutReason::Pantry, ) .await @@ -99,7 +99,7 @@ pub(crate) async fn call_pantry_attach_for_disk( info!( log, "sending attach for disk {disk_id} volume {} to endpoint {endpoint}", - disk.volume_id, + disk.volume_id(), ); let volume_construction_request: VolumeConstructionRequest = diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index 51f6d29de1..8fdff04e1c 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -17,6 +17,7 @@ use nexus_db_queries::db::identity::{Asset, Resource}; use nexus_db_queries::db::lookup::LookupPath; use omicron_common::api::external::DiskState; use omicron_common::api::external::Error; +use omicron_uuid_kinds::VolumeUuid; use rand::{rngs::StdRng, RngCore, SeedableRng}; use serde::Deserialize; use serde::Serialize; @@ -137,7 +138,7 @@ async fn sdc_create_disk_record( // We admittedly reference the volume before it has been allocated, // but this should be acceptable because the disk remains in a "Creating" // state until the saga has completed. - let volume_id = sagactx.lookup::("volume_id")?; + let volume_id = sagactx.lookup::("volume_id")?; let opctx = crate::context::op_context_for_saga_action( &sagactx, ¶ms.serialized_authn, @@ -237,7 +238,7 @@ async fn sdc_alloc_regions( ) -> Result, ActionError> { let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let volume_id = sagactx.lookup::("volume_id")?; + let volume_id = sagactx.lookup::("volume_id")?; // Ensure the disk is backed by appropriate regions. // @@ -389,13 +390,13 @@ async fn sdc_regions_ensure( log, "grabbing snapshot {} volume {}", db_snapshot.id(), - db_snapshot.volume_id, + db_snapshot.volume_id(), ); let volume = osagactx .datastore() .volume_checkout( - db_snapshot.volume_id, + db_snapshot.volume_id(), db::datastore::VolumeCheckoutReason::ReadOnlyCopy, ) .await @@ -435,13 +436,13 @@ async fn sdc_regions_ensure( log, "grabbing image {} volume {}", image.id(), - image.volume_id + image.volume_id(), ); let volume = osagactx .datastore() .volume_checkout( - image.volume_id, + image.volume_id(), db::datastore::VolumeCheckoutReason::ReadOnlyCopy, ) .await @@ -612,7 +613,7 @@ async fn sdc_create_volume_record( ) -> Result<(), ActionError> { let osagactx = sagactx.user_data(); - let volume_id = sagactx.lookup::("volume_id")?; + let volume_id = sagactx.lookup::("volume_id")?; let volume_data = sagactx.lookup::("regions_ensure")?; let volume = db::model::Volume::new(volume_id, volume_data); @@ -631,7 +632,7 @@ async fn sdc_create_volume_record_undo( ) -> Result<(), anyhow::Error> { let osagactx = sagactx.user_data(); - let volume_id = sagactx.lookup::("volume_id")?; + let volume_id = sagactx.lookup::("volume_id")?; // Depending on the read only parent, there will some read only resources // used, however this saga tracks them all. diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index aab361eced..296216f089 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -11,6 +11,7 @@ use crate::app::sagas::SagaInitError; use nexus_db_queries::authn; use nexus_db_queries::db; use omicron_common::api::external::DiskState; +use omicron_uuid_kinds::VolumeUuid; use serde::Deserialize; use serde::Serialize; use steno::ActionError; @@ -24,7 +25,7 @@ pub(crate) struct Params { pub serialized_authn: authn::saga::Serialized, pub project_id: Uuid, pub disk_id: Uuid, - pub volume_id: Uuid, + pub volume_id: VolumeUuid, } // disk delete saga: actions @@ -242,7 +243,7 @@ pub(crate) mod test { serialized_authn: Serialized::for_opctx(&opctx), project_id, disk_id: disk.id(), - volume_id: disk.volume_id, + volume_id: disk.volume_id(), }; nexus.sagas.saga_execute::(params).await.unwrap(); } @@ -264,7 +265,7 @@ pub(crate) mod test { serialized_authn: Serialized::for_opctx(&opctx), project_id, disk_id: disk.id(), - volume_id: disk.volume_id, + volume_id: disk.volume_id(), }; let dag = create_saga_dag::(params).unwrap(); crate::app::sagas::test_helpers::actions_succeed_idempotently( diff --git a/nexus/src/app/sagas/image_delete.rs b/nexus/src/app/sagas/image_delete.rs index 1d88ff17af..32b6655aba 100644 --- a/nexus/src/app/sagas/image_delete.rs +++ b/nexus/src/app/sagas/image_delete.rs @@ -6,11 +6,11 @@ use super::{ActionRegistry, NexusActionContext, NexusSaga}; use crate::app::sagas; use crate::app::sagas::declare_saga_actions; use nexus_db_queries::{authn, authz, db}; +use omicron_uuid_kinds::VolumeUuid; use serde::Deserialize; use serde::Serialize; use steno::ActionError; use steno::Node; -use uuid::Uuid; #[derive(Debug, Deserialize, Serialize)] pub(crate) enum ImageParam { @@ -20,11 +20,11 @@ pub(crate) enum ImageParam { } impl ImageParam { - fn volume_id(&self) -> Uuid { + fn volume_id(&self) -> VolumeUuid { match self { - ImageParam::Project { image, .. } => image.volume_id, + ImageParam::Project { image, .. } => image.volume_id(), - ImageParam::Silo { image, .. } => image.volume_id, + ImageParam::Silo { image, .. } => image.volume_id(), } } } diff --git a/nexus/src/app/sagas/region_replacement_drive.rs b/nexus/src/app/sagas/region_replacement_drive.rs index 81f9a9a7eb..ed0fdacd70 100644 --- a/nexus/src/app/sagas/region_replacement_drive.rs +++ b/nexus/src/app/sagas/region_replacement_drive.rs @@ -150,6 +150,7 @@ use chrono::Utc; use nexus_db_model::VmmState; use nexus_types::identity::Resource; use omicron_common::api::external::Error; +use omicron_uuid_kinds::VolumeUuid; use propolis_client::types::ReplaceResult; use serde::Deserialize; use serde::Serialize; @@ -320,7 +321,7 @@ async fn srrd_drive_region_replacement_check( let volume_deleted = osagactx .datastore() - .volume_deleted(params.request.volume_id) + .volume_deleted(params.request.volume_id()) .await .map_err(ActionError::action_failed)?; @@ -329,7 +330,7 @@ async fn srrd_drive_region_replacement_check( log, "volume was soft or hard deleted!"; "region replacement id" => %params.request.id, - "volume id" => %params.request.volume_id, + "volume id" => %params.request.volume_id(), ); return Ok(DriveCheck::Done); @@ -814,7 +815,7 @@ enum DriveAction { /// If there is no active Propolis that is running the Volume, attach the /// associated Volume to a Pantry. - Pantry { step: db::model::RegionReplacementStep, volume_id: Uuid }, + Pantry { step: db::model::RegionReplacementStep, volume_id: VolumeUuid }, /// If the Volume is currently running in a Propolis server, then send the /// volume replacement request there. @@ -1230,7 +1231,7 @@ async fn srrd_drive_region_replacement_execute( let disk_new_volume_vcr = match osagactx .datastore() - .volume_get(disk.volume_id) + .volume_get(disk.volume_id()) .await .map_err(ActionError::action_failed)? { @@ -1281,7 +1282,7 @@ async fn execute_pantry_drive_action( datastore: &db::DataStore, request_id: Uuid, pantry_address: SocketAddrV6, - volume_id: Uuid, + volume_id: VolumeUuid, job_id: Uuid, ) -> Result<(), ActionError> { // Importantly, _do not use `call_pantry_attach_for_disk`_! That call uses @@ -1494,7 +1495,7 @@ async fn execute_propolis_drive_action( "sending replacement request for disk volume to propolis {step_vmm_id}"; "region replacement id" => %request_id, "disk id" => ?disk.id(), - "volume id" => ?disk.volume_id, + "volume id" => ?disk.volume_id(), ); // Start (or poll) the replacement @@ -1525,7 +1526,7 @@ async fn execute_propolis_drive_action( "saw replace result {replace_result:?}"; "region replacement id" => %request_id, "disk id" => ?disk.id(), - "volume id" => ?disk.volume_id, + "volume id" => ?disk.volume_id(), ); let replacement_done = match &replace_result { diff --git a/nexus/src/app/sagas/region_replacement_finish.rs b/nexus/src/app/sagas/region_replacement_finish.rs index 2212e6fdf3..9470d00322 100644 --- a/nexus/src/app/sagas/region_replacement_finish.rs +++ b/nexus/src/app/sagas/region_replacement_finish.rs @@ -37,6 +37,7 @@ use super::{ use crate::app::sagas::declare_saga_actions; use crate::app::sagas::volume_delete; use crate::app::{authn, db}; +use omicron_uuid_kinds::VolumeUuid; use serde::Deserialize; use serde::Serialize; use steno::ActionError; @@ -51,7 +52,7 @@ pub(crate) struct Params { /// The fake volume created for the region that was replaced // Note: this is only required in the params to build the volume-delete sub // saga - pub region_volume_id: Uuid, + pub region_volume_id: VolumeUuid, pub request: db::model::RegionReplacement, } @@ -219,6 +220,8 @@ pub(crate) mod test { use nexus_db_queries::context::OpContext; use nexus_test_utils_macros::nexus_test; use omicron_uuid_kinds::DatasetUuid; + use omicron_uuid_kinds::GenericUuid; + use omicron_uuid_kinds::VolumeUuid; use sled_agent_client::types::CrucibleOpts; use sled_agent_client::types::VolumeConstructionRequest; use uuid::Uuid; @@ -238,8 +241,8 @@ pub(crate) mod test { ); // Manually insert required records - let old_region_volume_id = Uuid::new_v4(); - let new_volume_id = Uuid::new_v4(); + let old_region_volume_id = VolumeUuid::new_v4(); + let new_volume_id = VolumeUuid::new_v4(); let replaced_region = { let dataset_id = DatasetUuid::new_v4(); @@ -266,7 +269,7 @@ pub(crate) mod test { } let volume_construction_request = VolumeConstructionRequest::Volume { - id: old_region_volume_id, + id: *old_region_volume_id.as_untyped_uuid(), block_size: 0, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 0, @@ -274,7 +277,7 @@ pub(crate) mod test { extent_count: 0, gen: 0, opts: CrucibleOpts { - id: old_region_volume_id, + id: Uuid::new_v4(), target: vec![ // if you put something here, you'll need a synthetic // dataset record @@ -304,8 +307,8 @@ pub(crate) mod test { id: Uuid::new_v4(), request_time: Utc::now(), old_region_id: replaced_region.id(), - volume_id: new_volume_id, - old_region_volume_id: Some(old_region_volume_id), + volume_id: new_volume_id.into(), + old_region_volume_id: Some(old_region_volume_id.into()), new_region_id: None, // no value needed here replacement_state: RegionReplacementState::ReplacementDone, operating_saga_id: None, @@ -315,7 +318,7 @@ pub(crate) mod test { .volume_create(nexus_db_model::Volume::new( new_volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: new_volume_id, + id: Uuid::new_v4(), block_size: 512, sub_volumes: vec![], // nothing needed here read_only_parent: None, diff --git a/nexus/src/app/sagas/region_replacement_start.rs b/nexus/src/app/sagas/region_replacement_start.rs index aa9e83c037..c190c5fcad 100644 --- a/nexus/src/app/sagas/region_replacement_start.rs +++ b/nexus/src/app/sagas/region_replacement_start.rs @@ -56,6 +56,8 @@ use crate::app::RegionAllocationStrategy; use crate::app::{authn, db}; use nexus_db_queries::db::datastore::REGION_REDUNDANCY_THRESHOLD; use omicron_common::api::external::Error; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::VolumeUuid; use serde::Deserialize; use serde::Serialize; use sled_agent_client::types::CrucibleOpts; @@ -405,7 +407,7 @@ async fn srrs_new_region_ensure_undo( async fn srrs_get_old_region_volume_id( sagactx: NexusActionContext, -) -> Result { +) -> Result { // Save the region's original volume ID, because we'll be altering it and // need the original @@ -502,7 +504,7 @@ async fn srrs_replace_region_in_volume( .await .map_err(ActionError::action_failed)?; - let new_volume_id = sagactx.lookup::("new_volume_id")?; + let new_volume_id = sagactx.lookup::("new_volume_id")?; let old_region_address = sagactx.lookup::("old_region_address")?; @@ -523,7 +525,7 @@ async fn srrs_replace_region_in_volume( // If this node is rerun, the forward action will have overwritten // db_region's volume id, so get the cached copy. - let old_volume_id = sagactx.lookup::("old_region_volume_id")?; + let old_volume_id = sagactx.lookup::("old_region_volume_id")?; info!( log, @@ -602,7 +604,7 @@ async fn srrs_replace_region_in_volume_undo( .await .map_err(ActionError::action_failed)?; - let new_volume_id = sagactx.lookup::("new_volume_id")?; + let new_volume_id = sagactx.lookup::("new_volume_id")?; let old_region_address = sagactx.lookup::("old_region_address")?; @@ -621,7 +623,7 @@ async fn srrs_replace_region_in_volume_undo( // The forward action will have overwritten db_region's volume id, so get // the cached copy. - let old_volume_id = sagactx.lookup::("old_region_volume_id")?; + let old_volume_id = sagactx.lookup::("old_region_volume_id")?; info!( log, @@ -670,7 +672,7 @@ async fn srrs_create_fake_volume( ) -> Result<(), ActionError> { let osagactx = sagactx.user_data(); - let new_volume_id = sagactx.lookup::("new_volume_id")?; + let new_volume_id = sagactx.lookup::("new_volume_id")?; let old_region_address = sagactx.lookup::("old_region_address")?; @@ -679,7 +681,7 @@ async fn srrs_create_fake_volume( // finished. let volume_construction_request = VolumeConstructionRequest::Volume { - id: new_volume_id, + id: *new_volume_id.as_untyped_uuid(), block_size: 0, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 0, @@ -687,7 +689,7 @@ async fn srrs_create_fake_volume( extent_count: 0, gen: 0, opts: CrucibleOpts { - id: new_volume_id, + id: *new_volume_id.as_untyped_uuid(), target: vec![old_region_address.to_string()], lossy: false, flush_timeout: None, @@ -725,7 +727,7 @@ async fn srrs_create_fake_volume_undo( // Delete the fake volume. - let new_volume_id = sagactx.lookup::("new_volume_id")?; + let new_volume_id = sagactx.lookup::("new_volume_id")?; osagactx.datastore().volume_hard_delete(new_volume_id).await?; Ok(()) @@ -749,7 +751,7 @@ async fn srrs_update_request_record( )?; let new_region_id = new_dataset_and_region.1.id(); - let old_region_volume_id = sagactx.lookup::("new_volume_id")?; + let old_region_volume_id = sagactx.lookup::("new_volume_id")?; // Now that the region has been ensured and the construction request has // been updated, update the replacement request record to 'Running' and @@ -793,6 +795,7 @@ pub(crate) mod test { use nexus_types::identity::Asset; use omicron_common::api::internal::shared::DatasetKind; use omicron_uuid_kinds::DatasetUuid; + use omicron_uuid_kinds::VolumeUuid; use sled_agent_client::types::VolumeConstructionRequest; use uuid::Uuid; @@ -832,7 +835,7 @@ pub(crate) mod test { .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 3); // Replace one of the disk's regions @@ -844,7 +847,7 @@ pub(crate) mod test { id: Uuid::new_v4(), request_time: Utc::now(), old_region_id: region_to_replace.id(), - volume_id: region_to_replace.volume_id(), + volume_id: region_to_replace.volume_id().into(), old_region_volume_id: None, new_region_id: None, replacement_state: RegionReplacementState::Requested, @@ -881,7 +884,7 @@ pub(crate) mod test { // Validate number of regions for disk didn't change let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 3); // Validate that one of the regions for the disk is the new one @@ -895,7 +898,7 @@ pub(crate) mod test { let old_region = datastore.get_region(region_to_replace.id()).await.unwrap(); let new_volume_id = - output.lookup_node_output::("new_volume_id").unwrap(); + output.lookup_node_output::("new_volume_id").unwrap(); assert_eq!(old_region.volume_id(), new_volume_id); } @@ -933,7 +936,7 @@ pub(crate) mod test { let regions = vec![ Region::new( datasets[0].id(), - Uuid::new_v4(), + VolumeUuid::new_v4(), 512_i64.try_into().unwrap(), 10, 10, @@ -942,7 +945,7 @@ pub(crate) mod test { ), Region::new( datasets[1].id(), - Uuid::new_v4(), + VolumeUuid::new_v4(), 512_i64.try_into().unwrap(), 10, 10, @@ -951,7 +954,7 @@ pub(crate) mod test { ), Region::new( datasets[2].id(), - Uuid::new_v4(), + VolumeUuid::new_v4(), 512_i64.try_into().unwrap(), 10, 10, @@ -960,7 +963,7 @@ pub(crate) mod test { ), Region::new( datasets[3].id(), - Uuid::new_v4(), + VolumeUuid::new_v4(), 512_i64.try_into().unwrap(), 10, 10, @@ -1157,7 +1160,7 @@ pub(crate) mod test { .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 3); let region_to_replace: &Region = &allocated_regions[0].1; @@ -1166,7 +1169,7 @@ pub(crate) mod test { id: Uuid::new_v4(), request_time: Utc::now(), old_region_id: region_to_replace.id(), - volume_id: region_to_replace.volume_id(), + volume_id: region_to_replace.volume_id().into(), old_region_volume_id: None, new_region_id: None, replacement_state: RegionReplacementState::Requested, @@ -1232,7 +1235,7 @@ pub(crate) mod test { .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 3); let region_to_replace: &Region = &allocated_regions[0].1; @@ -1241,7 +1244,7 @@ pub(crate) mod test { id: Uuid::new_v4(), request_time: Utc::now(), old_region_id: region_to_replace.id(), - volume_id: region_to_replace.volume_id(), + volume_id: region_to_replace.volume_id().into(), old_region_volume_id: None, new_region_id: None, replacement_state: RegionReplacementState::Requested, diff --git a/nexus/src/app/sagas/region_snapshot_replacement_finish.rs b/nexus/src/app/sagas/region_snapshot_replacement_finish.rs index d992f753d6..f99d523392 100644 --- a/nexus/src/app/sagas/region_snapshot_replacement_finish.rs +++ b/nexus/src/app/sagas/region_snapshot_replacement_finish.rs @@ -88,7 +88,8 @@ impl NexusSaga for SagaRegionSnapshotReplacementFinish { builder.append(set_saga_id_action()); - if let Some(new_region_volume_id) = params.request.new_region_volume_id + if let Some(new_region_volume_id) = + params.request.new_region_volume_id() { let subsaga_params = volume_delete::Params { serialized_authn: params.serialized_authn.clone(), diff --git a/nexus/src/app/sagas/region_snapshot_replacement_garbage_collect.rs b/nexus/src/app/sagas/region_snapshot_replacement_garbage_collect.rs index 675b2b0cb3..8e4254e07d 100644 --- a/nexus/src/app/sagas/region_snapshot_replacement_garbage_collect.rs +++ b/nexus/src/app/sagas/region_snapshot_replacement_garbage_collect.rs @@ -31,6 +31,7 @@ use super::{ use crate::app::sagas::declare_saga_actions; use crate::app::sagas::volume_delete; use crate::app::{authn, db}; +use omicron_uuid_kinds::VolumeUuid; use serde::Deserialize; use serde::Serialize; use steno::ActionError; @@ -45,7 +46,7 @@ pub(crate) struct Params { /// The fake volume created for the snapshot that was replaced // Note: this is only required in the params to build the volume-delete sub // saga - pub old_snapshot_volume_id: Uuid, + pub old_snapshot_volume_id: VolumeUuid, pub request: db::model::RegionSnapshotReplacement, } @@ -219,6 +220,8 @@ pub(crate) mod test { use nexus_db_queries::context::OpContext; use nexus_test_utils_macros::nexus_test; use omicron_uuid_kinds::DatasetUuid; + use omicron_uuid_kinds::GenericUuid; + use omicron_uuid_kinds::VolumeUuid; use sled_agent_client::types::CrucibleOpts; use sled_agent_client::types::VolumeConstructionRequest; use uuid::Uuid; @@ -238,10 +241,10 @@ pub(crate) mod test { ); // Manually insert required records - let old_snapshot_volume_id = Uuid::new_v4(); + let old_snapshot_volume_id = VolumeUuid::new_v4(); let volume_construction_request = VolumeConstructionRequest::Volume { - id: old_snapshot_volume_id, + id: *old_snapshot_volume_id.as_untyped_uuid(), block_size: 0, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 0, @@ -249,7 +252,7 @@ pub(crate) mod test { extent_count: 0, gen: 0, opts: CrucibleOpts { - id: old_snapshot_volume_id, + id: *old_snapshot_volume_id.as_untyped_uuid(), target: vec![ // if you put something here, you'll need a synthetic // dataset record @@ -282,9 +285,9 @@ pub(crate) mod test { ); request.replacement_state = RegionSnapshotReplacementState::ReplacementDone; - request.old_snapshot_volume_id = Some(old_snapshot_volume_id); + request.old_snapshot_volume_id = Some(old_snapshot_volume_id.into()); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( diff --git a/nexus/src/app/sagas/region_snapshot_replacement_start.rs b/nexus/src/app/sagas/region_snapshot_replacement_start.rs index b9ed75c288..3c6cc0e22b 100644 --- a/nexus/src/app/sagas/region_snapshot_replacement_start.rs +++ b/nexus/src/app/sagas/region_snapshot_replacement_start.rs @@ -62,10 +62,14 @@ use crate::app::sagas::common_storage::find_only_new_region; use crate::app::sagas::declare_saga_actions; use crate::app::RegionAllocationStrategy; use crate::app::{authn, db}; +use nexus_db_queries::db::datastore::NewRegionVolumeId; +use nexus_db_queries::db::datastore::OldSnapshotVolumeId; use nexus_types::identity::Asset; use nexus_types::identity::Resource; use omicron_common::api::external::Error; use omicron_uuid_kinds::DatasetUuid; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::VolumeUuid; use serde::Deserialize; use serde::Serialize; use sled_agent_client::types::CrucibleOpts; @@ -440,7 +444,7 @@ struct AllocRegionParams { extent_count: u64, current_allocated_regions: Vec<(db::model::Dataset, db::model::Region)>, snapshot_id: Uuid, - snapshot_volume_id: Uuid, + snapshot_volume_id: VolumeUuid, } async fn rsrss_get_alloc_region_params( @@ -479,7 +483,7 @@ async fn rsrss_get_alloc_region_params( let current_allocated_regions = osagactx .datastore() - .get_allocated_regions(db_snapshot.volume_id) + .get_allocated_regions(db_snapshot.volume_id()) .await .map_err(ActionError::action_failed)?; @@ -489,7 +493,7 @@ async fn rsrss_get_alloc_region_params( extent_count: db_region.extent_count(), current_allocated_regions, snapshot_id: db_snapshot.id(), - snapshot_volume_id: db_snapshot.volume_id, + snapshot_volume_id: db_snapshot.volume_id(), }) } @@ -715,7 +719,7 @@ async fn rsrss_new_region_volume_create( let osagactx = sagactx.user_data(); let new_region_volume_id = - sagactx.lookup::("new_region_volume_id")?; + sagactx.lookup::("new_region_volume_id")?; let (new_dataset, ensured_region) = sagactx.lookup::<( db::model::Dataset, @@ -744,7 +748,7 @@ async fn rsrss_new_region_volume_create( // in, removing the last reference to it and causing garbage collection. let volume_construction_request = VolumeConstructionRequest::Volume { - id: new_region_volume_id, + id: new_region_volume_id.into_untyped_uuid(), block_size: 0, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 0, @@ -752,7 +756,7 @@ async fn rsrss_new_region_volume_create( extent_count: 0, gen: 0, opts: CrucibleOpts { - id: new_region_volume_id, + id: new_region_volume_id.into_untyped_uuid(), target: vec![new_region_address.to_string()], lossy: false, flush_timeout: None, @@ -791,7 +795,7 @@ async fn rsrss_new_region_volume_create_undo( // Delete the volume. let new_region_volume_id = - sagactx.lookup::("new_region_volume_id")?; + sagactx.lookup::("new_region_volume_id")?; osagactx.datastore().volume_hard_delete(new_region_volume_id).await?; Ok(()) @@ -799,7 +803,7 @@ async fn rsrss_new_region_volume_create_undo( async fn rsrss_get_old_snapshot_volume_id( sagactx: NexusActionContext, -) -> Result { +) -> Result { // Save the snapshot's original volume ID, because we'll be altering it and // need the original @@ -827,7 +831,7 @@ async fn rsrss_get_old_snapshot_volume_id( ))); }; - Ok(db_snapshot.volume_id) + Ok(db_snapshot.volume_id()) } async fn rsrss_create_fake_volume( @@ -835,14 +839,14 @@ async fn rsrss_create_fake_volume( ) -> Result<(), ActionError> { let osagactx = sagactx.user_data(); - let new_volume_id = sagactx.lookup::("new_volume_id")?; + let new_volume_id = sagactx.lookup::("new_volume_id")?; // Create a fake volume record for the old snapshot target. This will be // deleted after snapshot replacement has finished. It can be completely // blank here, it will be replaced by `volume_replace_snapshot`. let volume_construction_request = VolumeConstructionRequest::Volume { - id: new_volume_id, + id: *new_volume_id.as_untyped_uuid(), block_size: 0, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 0, @@ -850,7 +854,7 @@ async fn rsrss_create_fake_volume( extent_count: 0, gen: 0, opts: CrucibleOpts { - id: new_volume_id, + id: *new_volume_id.as_untyped_uuid(), // Do not put the new region ID here: it will be deleted during // the associated garbage collection saga if // `volume_replace_snapshot` does not perform the swap (which @@ -895,7 +899,7 @@ async fn rsrss_create_fake_volume_undo( // Delete the fake volume. - let new_volume_id = sagactx.lookup::("new_volume_id")?; + let new_volume_id = sagactx.lookup::("new_volume_id")?; osagactx.datastore().volume_hard_delete(new_volume_id).await?; Ok(()) @@ -903,10 +907,10 @@ async fn rsrss_create_fake_volume_undo( #[derive(Debug)] struct ReplaceParams { - old_volume_id: Uuid, + old_volume_id: VolumeUuid, old_snapshot_address: SocketAddrV6, new_region_address: SocketAddrV6, - new_volume_id: Uuid, + new_volume_id: VolumeUuid, } async fn get_replace_params( @@ -915,7 +919,7 @@ async fn get_replace_params( let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let new_volume_id = sagactx.lookup::("new_volume_id")?; + let new_volume_id = sagactx.lookup::("new_volume_id")?; let region_snapshot = osagactx .datastore() @@ -969,7 +973,8 @@ async fn get_replace_params( 0, ); - let old_volume_id = sagactx.lookup::("old_snapshot_volume_id")?; + let old_volume_id = + sagactx.lookup::("old_snapshot_volume_id")?; // Return the replacement parameters for the forward action case - the undo // will swap the existing and replacement target @@ -1102,10 +1107,10 @@ async fn rsrss_update_request_record( let new_region_id = new_dataset_and_region.1.id(); - let old_region_volume_id = sagactx.lookup::("new_volume_id")?; + let old_region_volume_id = sagactx.lookup::("new_volume_id")?; let new_region_volume_id = - sagactx.lookup::("new_region_volume_id")?; + sagactx.lookup::("new_region_volume_id")?; // Now that the region has been ensured and the construction request has // been updated, update the replacement request record to 'ReplacementDone' @@ -1117,8 +1122,8 @@ async fn rsrss_update_request_record( params.request.id, saga_id, new_region_id, - new_region_volume_id, - old_region_volume_id, + NewRegionVolumeId(new_region_volume_id), + OldSnapshotVolumeId(old_region_volume_id), ) .await .map_err(ActionError::action_failed)?; @@ -1230,14 +1235,14 @@ pub(crate) mod test { // Assert disk has three allocated regions let disk_allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(disk_allocated_regions.len(), 3); // Assert the snapshot has zero allocated regions let snapshot_id = snapshot.identity.id; let snapshot_allocated_regions = datastore - .get_allocated_regions(db_snapshot.volume_id) + .get_allocated_regions(db_snapshot.volume_id()) .await .unwrap(); assert_eq!(snapshot_allocated_regions.len(), 0); @@ -1291,12 +1296,12 @@ pub(crate) mod test { // Validate number of regions for disk didn't change let disk_allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(disk_allocated_regions.len(), 3); // Validate that the snapshot now has one allocated region let snapshot_allocated_datasets_and_regions = datastore - .get_allocated_regions(db_snapshot.volume_id) + .get_allocated_regions(db_snapshot.volume_id()) .await .unwrap(); @@ -1325,7 +1330,7 @@ pub(crate) mod test { assert!(volumes .iter() .map(|v| v.id()) - .any(|vid| vid == db_snapshot.volume_id)); + .any(|vid| vid == db_snapshot.volume_id())); } fn new_test_params( @@ -1489,7 +1494,7 @@ pub(crate) mod test { let opctx = test_opctx(cptestctx); let disk_allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(disk_allocated_regions.len(), 3); let region: &nexus_db_model::Region = &disk_allocated_regions[0].1; @@ -1509,8 +1514,11 @@ pub(crate) mod test { .await .unwrap(); - let affected_volume_original = - datastore.volume_get(db_snapshot.volume_id).await.unwrap().unwrap(); + let affected_volume_original = datastore + .volume_get(db_snapshot.volume_id()) + .await + .unwrap() + .unwrap(); verify_clean_slate( &cptestctx, @@ -1618,7 +1626,7 @@ pub(crate) mod test { let opctx = test_opctx(cptestctx); let disk_allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(disk_allocated_regions.len(), 3); let region: &nexus_db_model::Region = &disk_allocated_regions[0].1; diff --git a/nexus/src/app/sagas/region_snapshot_replacement_step.rs b/nexus/src/app/sagas/region_snapshot_replacement_step.rs index a236fcf62c..8163809687 100644 --- a/nexus/src/app/sagas/region_snapshot_replacement_step.rs +++ b/nexus/src/app/sagas/region_snapshot_replacement_step.rs @@ -58,6 +58,8 @@ use crate::app::{authn, authz, db}; use nexus_db_model::VmmState; use nexus_types::identity::Resource; use omicron_common::api::external::Error; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::VolumeUuid; use propolis_client::types::ReplaceResult; use serde::Deserialize; use serde::Serialize; @@ -279,7 +281,7 @@ async fn rssrs_create_fake_volume( ) -> Result<(), ActionError> { let osagactx = sagactx.user_data(); - let new_volume_id = sagactx.lookup::("new_volume_id")?; + let new_volume_id = sagactx.lookup::("new_volume_id")?; // Create a fake volume record for the old snapshot target. This will be // deleted after region snapshot replacement step saga has finished, and the @@ -287,7 +289,7 @@ async fn rssrs_create_fake_volume( // here, it will be replaced by `volume_replace_snapshot`. let volume_construction_request = VolumeConstructionRequest::Volume { - id: new_volume_id, + id: *new_volume_id.as_untyped_uuid(), block_size: 0, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 0, @@ -295,7 +297,7 @@ async fn rssrs_create_fake_volume( extent_count: 0, gen: 0, opts: CrucibleOpts { - id: new_volume_id, + id: *new_volume_id.as_untyped_uuid(), target: vec![], lossy: false, flush_timeout: None, @@ -333,7 +335,7 @@ async fn rssrs_create_fake_volume_undo( // Delete the fake volume. - let new_volume_id = sagactx.lookup::("new_volume_id")?; + let new_volume_id = sagactx.lookup::("new_volume_id")?; osagactx.datastore().volume_hard_delete(new_volume_id).await?; Ok(()) @@ -348,7 +350,7 @@ async fn rsrss_replace_snapshot_in_volume( let replace_params = sagactx.lookup::("replace_params")?; - let new_volume_id = sagactx.lookup::("new_volume_id")?; + let new_volume_id = sagactx.lookup::("new_volume_id")?; // `volume_replace_snapshot` will swap the old snapshot for the new region. // No repair or reconcilation needs to occur after this. @@ -356,7 +358,7 @@ async fn rsrss_replace_snapshot_in_volume( let volume_replace_snapshot_result = osagactx .datastore() .volume_replace_snapshot( - VolumeWithTarget(params.request.volume_id), + VolumeWithTarget(params.request.volume_id()), ExistingTarget(replace_params.old_snapshot_address), ReplacementTarget(replace_params.new_region_address), VolumeToDelete(new_volume_id), @@ -393,7 +395,7 @@ async fn rsrss_replace_snapshot_in_volume_undo( let replace_params = sagactx.lookup::("replace_params")?; - let new_volume_id = sagactx.lookup::("new_volume_id")?; + let new_volume_id = sagactx.lookup::("new_volume_id")?; // It's ok if this function returned ExistingVolumeDeleted, don't cause the // saga to get stuck unwinding! @@ -401,7 +403,7 @@ async fn rsrss_replace_snapshot_in_volume_undo( let volume_replace_snapshot_result = osagactx .datastore() .volume_replace_snapshot( - VolumeWithTarget(params.request.volume_id), + VolumeWithTarget(params.request.volume_id()), ExistingTarget(replace_params.new_region_address), ReplacementTarget(replace_params.old_snapshot_address), VolumeToDelete(new_volume_id), @@ -450,7 +452,7 @@ async fn rsrss_notify_upstairs( let Some(disk) = osagactx .datastore() - .disk_for_volume_id(params.request.volume_id) + .disk_for_volume_id(params.request.volume_id()) .await .map_err(ActionError::action_failed)? else { @@ -489,7 +491,7 @@ async fn rsrss_notify_upstairs( "volume associated with disk attached to instance with vmm in \ state {state}"; "request id" => %params.request.id, - "volume id" => %params.request.volume_id, + "volume id" => %params.request.volume_id(), "disk id" => ?disk.id(), "instance id" => ?instance_id, "vmm id" => ?vmm.id, @@ -519,7 +521,7 @@ async fn rsrss_notify_upstairs( let new_volume_vcr = match osagactx .datastore() - .volume_get(params.request.volume_id) + .volume_get(params.request.volume_id()) .await .map_err(ActionError::action_failed)? { @@ -549,7 +551,7 @@ async fn rsrss_notify_upstairs( log, "sending replacement request for disk volume to propolis"; "request id" => %params.request.id, - "volume id" => %params.request.volume_id, + "volume id" => %params.request.volume_id(), "disk id" => ?disk.id(), "instance id" => ?instance_id, "vmm id" => ?vmm.id, @@ -581,7 +583,7 @@ async fn rsrss_notify_upstairs( log, "saw replace result {replace_result:?}"; "request id" => %params.request.id, - "volume id" => %params.request.volume_id, + "volume id" => %params.request.volume_id(), "disk id" => ?disk.id(), "instance id" => ?instance_id, "vmm id" => ?vmm.id, @@ -630,7 +632,7 @@ async fn rsrss_update_request_record( ); let saga_id = sagactx.lookup::("saga_id")?; - let new_volume_id = sagactx.lookup::("new_volume_id")?; + let new_volume_id = sagactx.lookup::("new_volume_id")?; // Update the request record to 'Completed' and clear the operating saga id. // There is no undo step for this, it should succeed idempotently. diff --git a/nexus/src/app/sagas/region_snapshot_replacement_step_garbage_collect.rs b/nexus/src/app/sagas/region_snapshot_replacement_step_garbage_collect.rs index 15c6a39651..57907cf8ea 100644 --- a/nexus/src/app/sagas/region_snapshot_replacement_step_garbage_collect.rs +++ b/nexus/src/app/sagas/region_snapshot_replacement_step_garbage_collect.rs @@ -10,11 +10,11 @@ use super::{ActionRegistry, NexusActionContext, NexusSaga, SagaInitError}; use crate::app::sagas::declare_saga_actions; use crate::app::sagas::volume_delete; use crate::app::{authn, db}; +use omicron_uuid_kinds::VolumeUuid; use serde::Deserialize; use serde::Serialize; use steno::ActionError; use steno::Node; -use uuid::Uuid; // region snapshot replacement step garbage collect saga: input parameters @@ -24,7 +24,7 @@ pub(crate) struct Params { /// The fake volume created for the snapshot that was replaced // Note: this is only required in the params to build the volume-delete sub // saga - pub old_snapshot_volume_id: Uuid, + pub old_snapshot_volume_id: VolumeUuid, pub request: db::model::RegionSnapshotReplacementStep, } @@ -131,6 +131,8 @@ pub(crate) mod test { use nexus_db_queries::context::OpContext; use nexus_db_queries::db::datastore::region_snapshot_replacement; use nexus_test_utils_macros::nexus_test; + use omicron_uuid_kinds::GenericUuid; + use omicron_uuid_kinds::VolumeUuid; use sled_agent_client::types::CrucibleOpts; use sled_agent_client::types::VolumeConstructionRequest; use uuid::Uuid; @@ -150,10 +152,10 @@ pub(crate) mod test { ); // Manually insert required records - let old_snapshot_volume_id = Uuid::new_v4(); + let old_snapshot_volume_id = VolumeUuid::new_v4(); let volume_construction_request = VolumeConstructionRequest::Volume { - id: old_snapshot_volume_id, + id: *old_snapshot_volume_id.as_untyped_uuid(), block_size: 0, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 0, @@ -161,7 +163,7 @@ pub(crate) mod test { extent_count: 0, gen: 0, opts: CrucibleOpts { - id: old_snapshot_volume_id, + id: *old_snapshot_volume_id.as_untyped_uuid(), target: vec![ // if you put something here, you'll need a synthetic // dataset record @@ -187,7 +189,7 @@ pub(crate) mod test { .await .unwrap(); - let step_volume_id = Uuid::new_v4(); + let step_volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( @@ -207,7 +209,7 @@ pub(crate) mod test { RegionSnapshotReplacementStep::new(Uuid::new_v4(), step_volume_id); request.replacement_state = RegionSnapshotReplacementStepState::Complete; - request.old_snapshot_volume_id = Some(old_snapshot_volume_id); + request.old_snapshot_volume_id = Some(old_snapshot_volume_id.into()); let result = datastore .insert_region_snapshot_replacement_step(&opctx, request.clone()) diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 4dd958c50b..c6bb3e7711 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -108,7 +108,7 @@ use omicron_common::progenitor_operation_retry::ProgenitorOperationRetryError; use omicron_common::{ api::external, progenitor_operation_retry::ProgenitorOperationRetry, }; -use omicron_uuid_kinds::{GenericUuid, PropolisUuid, SledUuid}; +use omicron_uuid_kinds::{GenericUuid, PropolisUuid, SledUuid, VolumeUuid}; use rand::{rngs::StdRng, RngCore, SeedableRng}; use serde::Deserialize; use serde::Serialize; @@ -389,7 +389,7 @@ async fn ssc_take_volume_lock( osagactx .datastore() - .volume_repair_lock(&opctx, disk.volume_id, lock_id) + .volume_repair_lock(&opctx, disk.volume_id(), lock_id) .await .map_err(ActionError::action_failed)?; @@ -415,7 +415,7 @@ async fn ssc_take_volume_lock_undo( osagactx .datastore() - .volume_repair_unlock(&opctx, disk.volume_id, lock_id) + .volume_repair_unlock(&opctx, disk.volume_id(), lock_id) .await?; Ok(()) @@ -427,7 +427,7 @@ async fn ssc_alloc_regions( let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; let destination_volume_id = - sagactx.lookup::("destination_volume_id")?; + sagactx.lookup::("destination_volume_id")?; // Ensure the destination volume is backed by appropriate regions. // @@ -497,7 +497,7 @@ async fn ssc_regions_ensure( let osagactx = sagactx.user_data(); let log = osagactx.log(); let destination_volume_id = - sagactx.lookup::("destination_volume_id")?; + sagactx.lookup::("destination_volume_id")?; let datasets_and_regions = osagactx .nexus() @@ -517,7 +517,7 @@ async fn ssc_regions_ensure( // Create volume construction request let mut rng = StdRng::from_entropy(); let volume_construction_request = VolumeConstructionRequest::Volume { - id: destination_volume_id, + id: *destination_volume_id.as_untyped_uuid(), block_size, sub_volumes: vec![VolumeConstructionRequest::Region { block_size, @@ -525,7 +525,7 @@ async fn ssc_regions_ensure( extent_count, gen: 1, opts: CrucibleOpts { - id: destination_volume_id, + id: *destination_volume_id.as_untyped_uuid(), target: datasets_and_regions .iter() .map(|(dataset, region)| { @@ -610,7 +610,7 @@ async fn ssc_create_destination_volume_record( let osagactx = sagactx.user_data(); let destination_volume_id = - sagactx.lookup::("destination_volume_id")?; + sagactx.lookup::("destination_volume_id")?; let destination_volume_data = sagactx.lookup::("regions_ensure")?; @@ -633,7 +633,7 @@ async fn ssc_create_destination_volume_record_undo( let osagactx = sagactx.user_data(); let destination_volume_id = - sagactx.lookup::("destination_volume_id")?; + sagactx.lookup::("destination_volume_id")?; // This saga contains what is necessary to clean up the destination volume // resources. It's safe here to perform a volume hard delete without @@ -641,7 +641,7 @@ async fn ssc_create_destination_volume_record_undo( // guaranteed to never have read only resources that require that // accounting. - info!(log, "hard deleting volume {}", destination_volume_id,); + info!(log, "hard deleting volume {}", destination_volume_id); osagactx.datastore().volume_hard_delete(destination_volume_id).await?; @@ -664,9 +664,9 @@ async fn ssc_create_snapshot_record( // We admittedly reference the volume(s) before they have been allocated, // but this should be acceptable because the snapshot remains in a // "Creating" state until the saga has completed. - let volume_id = sagactx.lookup::("volume_id")?; + let volume_id = sagactx.lookup::("volume_id")?; let destination_volume_id = - sagactx.lookup::("destination_volume_id")?; + sagactx.lookup::("destination_volume_id")?; info!(log, "grabbing disk by name {}", params.create_params.disk); @@ -686,8 +686,8 @@ async fn ssc_create_snapshot_record( project_id: params.project_id, disk_id: disk.id(), - volume_id, - destination_volume_id, + volume_id: volume_id.into(), + destination_volume_id: destination_volume_id.into(), gen: db::model::Generation::new(), state: db::model::SnapshotState::Creating, @@ -911,7 +911,7 @@ async fn ssc_send_snapshot_request_to_sled_agent_undo( .fetch() .await?; let datasets_and_regions = - osagactx.datastore().get_allocated_regions(disk.volume_id).await?; + osagactx.datastore().get_allocated_regions(disk.volume_id()).await?; // ... and instruct each of those regions to delete the snapshot. for (dataset, region) in datasets_and_regions { @@ -1259,7 +1259,7 @@ async fn ssc_call_pantry_snapshot_for_disk_undo( .fetch() .await?; let datasets_and_regions = - osagactx.datastore().get_allocated_regions(disk.volume_id).await?; + osagactx.datastore().get_allocated_regions(disk.volume_id()).await?; // ... and instruct each of those regions to delete the snapshot. for (dataset, region) in datasets_and_regions { @@ -1405,7 +1405,7 @@ async fn ssc_start_running_snapshot( // region information to the new running snapshot information. let datasets_and_regions = osagactx .datastore() - .get_allocated_regions(disk.volume_id) + .get_allocated_regions(disk.volume_id()) .await .map_err(ActionError::action_failed)?; @@ -1502,7 +1502,7 @@ async fn ssc_start_running_snapshot_undo( .await?; let datasets_and_regions = - osagactx.datastore().get_allocated_regions(disk.volume_id).await?; + osagactx.datastore().get_allocated_regions(disk.volume_id()).await?; // ... and instruct each of those regions to delete the running snapshot. for (dataset, region) in datasets_and_regions { @@ -1531,7 +1531,7 @@ async fn ssc_create_volume_record( let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let volume_id = sagactx.lookup::("volume_id")?; + let volume_id = sagactx.lookup::("volume_id")?; // For a snapshot, copy the volume construction request at the time the // snapshot was taken. @@ -1549,7 +1549,7 @@ async fn ssc_create_volume_record( let disk_volume = osagactx .datastore() .volume_checkout( - disk.volume_id, + disk.volume_id(), db::datastore::VolumeCheckoutReason::CopyAndModify, ) .await @@ -1607,7 +1607,7 @@ async fn ssc_create_volume_record_undo( ) -> Result<(), anyhow::Error> { let log = sagactx.user_data().log(); let osagactx = sagactx.user_data(); - let volume_id = sagactx.lookup::("volume_id")?; + let volume_id = sagactx.lookup::("volume_id")?; // `volume_create` will increase the resource count for read only resources // in a volume, which there are guaranteed to be for snapshot volumes. @@ -1681,7 +1681,7 @@ async fn ssc_release_volume_lock( osagactx .datastore() - .volume_repair_unlock(&opctx, disk.volume_id, lock_id) + .volume_repair_unlock(&opctx, disk.volume_id(), lock_id) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/src/app/sagas/snapshot_delete.rs b/nexus/src/app/sagas/snapshot_delete.rs index 75fc16754d..584d4ae67b 100644 --- a/nexus/src/app/sagas/snapshot_delete.rs +++ b/nexus/src/app/sagas/snapshot_delete.rs @@ -54,7 +54,7 @@ impl NexusSaga for SagaSnapshotDelete { let volume_delete_params = sagas::volume_delete::Params { serialized_authn: params.serialized_authn.clone(), - volume_id: params.snapshot.volume_id, + volume_id: params.snapshot.volume_id(), }; builder.append(Node::constant( DELETE_VOLUME_PARAMS, @@ -68,7 +68,7 @@ impl NexusSaga for SagaSnapshotDelete { let volume_delete_params = sagas::volume_delete::Params { serialized_authn: params.serialized_authn.clone(), - volume_id: params.snapshot.destination_volume_id, + volume_id: params.snapshot.destination_volume_id(), }; builder.append(Node::constant( DELETE_VOLUME_DESTINATION_PARAMS, diff --git a/nexus/src/app/sagas/volume_delete.rs b/nexus/src/app/sagas/volume_delete.rs index a8ded4e33c..29acbe074f 100644 --- a/nexus/src/app/sagas/volume_delete.rs +++ b/nexus/src/app/sagas/volume_delete.rs @@ -30,17 +30,17 @@ use crate::app::sagas::declare_saga_actions; use nexus_db_queries::authn; use nexus_db_queries::db::datastore::CrucibleResources; use nexus_db_queries::db::datastore::FreedCrucibleResources; +use omicron_uuid_kinds::VolumeUuid; use serde::Deserialize; use serde::Serialize; use steno::ActionError; -use uuid::Uuid; // volume delete saga: input parameters #[derive(Debug, Deserialize, Serialize)] pub(crate) struct Params { pub serialized_authn: authn::saga::Serialized, - pub volume_id: Uuid, + pub volume_id: VolumeUuid, } // volume delete saga: actions diff --git a/nexus/src/app/sagas/volume_remove_rop.rs b/nexus/src/app/sagas/volume_remove_rop.rs index b614495615..d79310560b 100644 --- a/nexus/src/app/sagas/volume_remove_rop.rs +++ b/nexus/src/app/sagas/volume_remove_rop.rs @@ -8,18 +8,19 @@ use crate::app::sagas::declare_saga_actions; use nexus_db_queries::authn; use nexus_db_queries::db; use omicron_common::api::external::Error; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::VolumeUuid; use serde::Deserialize; use serde::Serialize; use sled_agent_client::types::VolumeConstructionRequest; use steno::{ActionError, Node}; -use uuid::Uuid; // Volume remove read only parent saga: input parameters #[derive(Debug, Deserialize, Serialize)] pub(crate) struct Params { pub serialized_authn: authn::saga::Serialized, - pub volume_id: Uuid, + pub volume_id: VolumeUuid, } // Volume remove_read_only_parent saga: actions @@ -71,7 +72,7 @@ impl NexusSaga for SagaVolumeRemoveROP { mut builder: steno::DagBuilder, ) -> Result { // Generate the temp volume ID this saga will use. - let temp_volume_id = Uuid::new_v4(); + let temp_volume_id = VolumeUuid::new_v4(); // Generate the params for the subsaga called at the end. let subsaga_params = sagas::volume_delete::Params { serialized_authn: params.serialized_authn.clone(), @@ -129,12 +130,12 @@ async fn svr_create_temp_volume( ) -> Result<(), ActionError> { let osagactx = sagactx.user_data(); - let temp_volume_id = sagactx.lookup::("temp_volume_id")?; + let temp_volume_id = sagactx.lookup::("temp_volume_id")?; // Create the crucible VolumeConstructionRequest which we use // for the temporary volume. let volume_construction_request = VolumeConstructionRequest::Volume { - id: temp_volume_id, + id: *temp_volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: None, @@ -162,7 +163,7 @@ async fn svr_create_temp_volume_undo( ) -> Result<(), anyhow::Error> { let osagactx = sagactx.user_data(); - let temp_volume_id = sagactx.lookup::("temp_volume_id")?; + let temp_volume_id = sagactx.lookup::("temp_volume_id")?; osagactx .datastore() @@ -178,7 +179,7 @@ async fn svr_remove_read_only_parent( let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let temp_volume_id = sagactx.lookup::("temp_volume_id")?; + let temp_volume_id = sagactx.lookup::("temp_volume_id")?; osagactx .datastore() diff --git a/nexus/src/app/volume.rs b/nexus/src/app/volume.rs index b4aa1dee7f..90510c2a61 100644 --- a/nexus/src/app/volume.rs +++ b/nexus/src/app/volume.rs @@ -19,15 +19,15 @@ use omicron_uuid_kinds::DownstairsKind; use omicron_uuid_kinds::TypedUuid; use omicron_uuid_kinds::UpstairsKind; use omicron_uuid_kinds::UpstairsRepairKind; +use omicron_uuid_kinds::VolumeUuid; use std::sync::Arc; -use uuid::Uuid; impl super::Nexus { /// Start a saga to remove a read only parent from a volume. pub(crate) async fn volume_remove_read_only_parent( self: &Arc, opctx: &OpContext, - volume_id: Uuid, + volume_id: VolumeUuid, ) -> DeleteResult { let saga_params = sagas::volume_remove_rop::Params { serialized_authn: authn::saga::Serialized::for_opctx(opctx), diff --git a/nexus/tests/integration_tests/crucible_replacements.rs b/nexus/tests/integration_tests/crucible_replacements.rs index 57dc624187..f697c77cdf 100644 --- a/nexus/tests/integration_tests/crucible_replacements.rs +++ b/nexus/tests/integration_tests/crucible_replacements.rs @@ -31,6 +31,7 @@ use omicron_common::api::external; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_test_utils::dev::poll::{wait_for_condition, CondCheckError}; use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::VolumeUuid; use slog::Logger; use std::collections::HashSet; use std::net::SocketAddr; @@ -123,7 +124,7 @@ async fn test_region_replacement_does_not_create_freed_region( // Next, expunge a physical disk that contains a region let disk_allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); let (dataset, _) = &disk_allocated_regions[0]; let zpool = disk_test .zpools() @@ -217,7 +218,7 @@ mod region_replacement { assert_eq!(db_disk.id(), disk.identity.id); let disk_allocated_regions = datastore - .get_allocated_regions(db_disk.volume_id) + .get_allocated_regions(db_disk.volume_id()) .await .unwrap(); let (_, region) = &disk_allocated_regions[0]; @@ -653,7 +654,7 @@ async fn test_racing_replacements_for_soft_deleted_disk_volume( // means it'll have the region too) let disk_allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); let (dataset, region) = &disk_allocated_regions[0]; let zpool = disk_test .zpools() @@ -715,7 +716,7 @@ async fn test_racing_replacements_for_soft_deleted_disk_volume( .unwrap(); let snapshot_allocated_datasets_and_regions = datastore - .get_allocated_regions(db_snapshot.destination_volume_id) + .get_allocated_regions(db_snapshot.destination_volume_id()) .await .unwrap(); @@ -866,7 +867,7 @@ async fn test_racing_replacements_for_soft_deleted_disk_volume( // volume for later deletion, but has not actually deleted that temporary // volume yet, so the count will not have gone to 0. - let volume = datastore.volume_get(db_disk.volume_id).await.unwrap(); + let volume = datastore.volume_get(db_disk.volume_id()).await.unwrap(); assert!(volume.is_some()); assert!(volume.unwrap().time_deleted.is_some()); @@ -914,7 +915,7 @@ async fn test_racing_replacements_for_soft_deleted_disk_volume( // Assert that the disk's volume is still only soft-deleted, because the two // other associated region snapshots still exist. - let volume = datastore.volume_get(db_disk.volume_id).await.unwrap(); + let volume = datastore.volume_get(db_disk.volume_id()).await.unwrap(); assert!(volume.is_some()); // Check on the old region id - it should not be deleted @@ -1104,7 +1105,7 @@ async fn test_racing_replacements_for_soft_deleted_disk_volume( wait_for_condition( || { let datastore = datastore.clone(); - let volume_id = db_disk.volume_id; + let volume_id = db_disk.volume_id(); async move { let volume = datastore.volume_get(volume_id).await.unwrap(); @@ -1207,7 +1208,7 @@ mod region_snapshot_replacement { assert_eq!(db_disk.id(), disk.identity.id); let disk_allocated_regions = datastore - .get_allocated_regions(db_disk.volume_id) + .get_allocated_regions(db_disk.volume_id()) .await .unwrap(); let (_, region) = &disk_allocated_regions[0]; @@ -1262,7 +1263,7 @@ mod region_snapshot_replacement { // Validate that they are snapshot and disk from snapshot - let volumes_set: HashSet = + let volumes_set: HashSet = volumes.into_iter().map(|v| v.id()).collect(); let (.., db_snapshot) = LookupPath::new(&opctx, &datastore) @@ -1278,8 +1279,8 @@ mod region_snapshot_replacement { .await .unwrap(); - assert!(volumes_set.contains(&db_snapshot.volume_id)); - assert!(volumes_set.contains(&db_disk_from_snapshot.volume_id)); + assert!(volumes_set.contains(&db_snapshot.volume_id())); + assert!(volumes_set.contains(&db_disk_from_snapshot.volume_id())); DeletedVolumeTest { log: cptestctx.logctx.log.new(o!()), @@ -1542,7 +1543,7 @@ mod region_snapshot_replacement { .create_region_snapshot_replacement_step( &self.opctx(), self.replacement_request_id, - db_disk_from_snapshot.volume_id, + db_disk_from_snapshot.volume_id(), ) .await .unwrap(); @@ -1784,7 +1785,7 @@ async fn test_replacement_sanity(cptestctx: &ControlPlaneTestContext) { // Next, expunge a physical disk that contains a region let disk_allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); let (dataset, _) = &disk_allocated_regions[0]; let zpool = disk_test @@ -1887,9 +1888,9 @@ async fn test_region_replacement_triple_sanity( let internal_client = &cptestctx.internal_client; let disk_allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); let snapshot_allocated_regions = - datastore.get_allocated_regions(db_snapshot.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_snapshot.volume_id()).await.unwrap(); assert_eq!(disk_allocated_regions.len(), 3); assert_eq!(snapshot_allocated_regions.len(), 0); @@ -1922,9 +1923,9 @@ async fn test_region_replacement_triple_sanity( } let disk_allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); let snapshot_allocated_regions = - datastore.get_allocated_regions(db_snapshot.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_snapshot.volume_id()).await.unwrap(); assert_eq!(disk_allocated_regions.len(), 3); assert!(disk_allocated_regions.iter().all(|(_, r)| !r.read_only())); @@ -1999,9 +2000,9 @@ async fn test_region_replacement_triple_sanity_2( let internal_client = &cptestctx.internal_client; let disk_allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); let snapshot_allocated_regions = - datastore.get_allocated_regions(db_snapshot.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_snapshot.volume_id()).await.unwrap(); assert_eq!(disk_allocated_regions.len(), 3); assert_eq!(snapshot_allocated_regions.len(), 0); @@ -2063,9 +2064,9 @@ async fn test_region_replacement_triple_sanity_2( run_replacement_tasks_to_completion(&internal_client).await; let disk_allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); let snapshot_allocated_regions = - datastore.get_allocated_regions(db_snapshot.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_snapshot.volume_id()).await.unwrap(); assert_eq!(disk_allocated_regions.len(), 3); assert!(disk_allocated_regions.iter().all(|(_, r)| !r.read_only())); diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index db16113dd7..1f9ef44158 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -46,6 +46,7 @@ use omicron_common::api::external::{ByteCount, SimpleIdentity as _}; use omicron_nexus::app::{MAX_DISK_SIZE_BYTES, MIN_DISK_SIZE_BYTES}; use omicron_nexus::Nexus; use omicron_nexus::TestInterfaces as _; +use omicron_uuid_kinds::VolumeUuid; use omicron_uuid_kinds::{GenericUuid, InstanceUuid}; use oximeter::types::Datum; use oximeter::types::Measurement; @@ -2072,7 +2073,7 @@ async fn test_single_region_allocate(cptestctx: &ControlPlaneTestContext) { assert_eq!(10, DiskTest::DEFAULT_ZPOOL_SIZE_GIB); // Allocate a single 1 GB region - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let datasets_and_regions = datastore .arbitrary_region_allocate( @@ -2164,7 +2165,7 @@ async fn test_region_allocation_strategy_random_is_idempotent( .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), REGION_REDUNDANCY_THRESHOLD); // Call `disk_region_allocate` again @@ -2182,7 +2183,7 @@ async fn test_region_allocation_strategy_random_is_idempotent( let datasets_and_regions = datastore .disk_region_allocate( &opctx, - db_disk.volume_id, + db_disk.volume_id(), ¶ms::DiskSource::Blank { block_size: params::BlockSize::try_from( region.block_size().to_bytes() as u32, @@ -2220,7 +2221,7 @@ async fn test_region_allocation_strategy_random_is_idempotent_arbitrary( assert_eq!(10, DiskTest::DEFAULT_ZPOOL_SIZE_GIB); // Call region allocation in isolation - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let datasets_and_regions = datastore .arbitrary_region_allocate( @@ -2300,7 +2301,7 @@ async fn test_single_region_allocate_for_replace( .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), REGION_REDUNDANCY_THRESHOLD); // Allocate one more single 1 GB region to replace one of the disk's regions @@ -2321,7 +2322,7 @@ async fn test_single_region_allocate_for_replace( let datasets_and_regions = datastore .arbitrary_region_allocate( &opctx, - RegionAllocationFor::DiskVolume { volume_id: db_disk.volume_id }, + RegionAllocationFor::DiskVolume { volume_id: db_disk.volume_id() }, RegionAllocationParameters::FromDiskSource { disk_source: ¶ms::DiskSource::Blank { block_size: params::BlockSize::try_from( @@ -2343,7 +2344,7 @@ async fn test_single_region_allocate_for_replace( // There should be `one_more` regions for this disk's volume id. let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), one_more); // Each region should be on a different pool @@ -2387,7 +2388,7 @@ async fn test_single_region_allocate_for_replace_not_enough_zpools( .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), REGION_REDUNDANCY_THRESHOLD); // Allocate one more single 1 GB region to replace one of the disk's regions @@ -2409,7 +2410,7 @@ async fn test_single_region_allocate_for_replace_not_enough_zpools( let result = datastore .arbitrary_region_allocate( &opctx, - RegionAllocationFor::DiskVolume { volume_id: db_disk.volume_id }, + RegionAllocationFor::DiskVolume { volume_id: db_disk.volume_id() }, RegionAllocationParameters::FromDiskSource { disk_source: ¶ms::DiskSource::Blank { block_size: params::BlockSize::try_from( @@ -2430,7 +2431,7 @@ async fn test_single_region_allocate_for_replace_not_enough_zpools( let datasets_and_regions = datastore .arbitrary_region_allocate( &opctx, - RegionAllocationFor::DiskVolume { volume_id: db_disk.volume_id }, + RegionAllocationFor::DiskVolume { volume_id: db_disk.volume_id() }, RegionAllocationParameters::FromDiskSource { disk_source: ¶ms::DiskSource::Blank { block_size: params::BlockSize::try_from( @@ -2525,7 +2526,7 @@ async fn test_no_halt_disk_delete_one_region_on_expunged_agent( // Nexus should hard delete the region records in this case. let datasets_and_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert!(datasets_and_regions.is_empty()); } @@ -2558,7 +2559,7 @@ async fn test_disk_expunge(cptestctx: &ControlPlaneTestContext) { .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), REGION_REDUNDANCY_THRESHOLD); // Expunge the sled diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index accb4470fb..833709b583 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -42,6 +42,7 @@ use omicron_nexus::app::MIN_DISK_SIZE_BYTES; use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::InstanceUuid; +use omicron_uuid_kinds::VolumeUuid; use uuid::Uuid; type ControlPlaneTestContext = @@ -574,8 +575,8 @@ async fn test_reject_creating_disk_from_snapshot( project_id, disk_id: Uuid::new_v4(), - volume_id: Uuid::new_v4(), - destination_volume_id: Uuid::new_v4(), + volume_id: VolumeUuid::new_v4().into(), + destination_volume_id: VolumeUuid::new_v4().into(), gen: db::model::Generation::new(), state: db::model::SnapshotState::Creating, @@ -727,8 +728,8 @@ async fn test_reject_creating_disk_from_illegal_snapshot( project_id, disk_id: Uuid::new_v4(), - volume_id: Uuid::new_v4(), - destination_volume_id: Uuid::new_v4(), + volume_id: VolumeUuid::new_v4().into(), + destination_volume_id: VolumeUuid::new_v4().into(), gen: db::model::Generation::new(), state: db::model::SnapshotState::Creating, @@ -823,8 +824,8 @@ async fn test_reject_creating_disk_from_other_project_snapshot( project_id, disk_id: Uuid::new_v4(), - volume_id: Uuid::new_v4(), - destination_volume_id: Uuid::new_v4(), + volume_id: VolumeUuid::new_v4().into(), + destination_volume_id: VolumeUuid::new_v4().into(), gen: db::model::Generation::new(), state: db::model::SnapshotState::Creating, @@ -1044,8 +1045,8 @@ async fn test_create_snapshot_record_idempotent( project_id, disk_id, - volume_id: Uuid::new_v4(), - destination_volume_id: Uuid::new_v4(), + volume_id: VolumeUuid::new_v4().into(), + destination_volume_id: VolumeUuid::new_v4().into(), gen: db::model::Generation::new(), state: db::model::SnapshotState::Creating, @@ -1095,8 +1096,8 @@ async fn test_create_snapshot_record_idempotent( project_id, disk_id, - volume_id: Uuid::new_v4(), - destination_volume_id: Uuid::new_v4(), + volume_id: VolumeUuid::new_v4().into(), + destination_volume_id: VolumeUuid::new_v4().into(), gen: db::model::Generation::new(), state: db::model::SnapshotState::Creating, @@ -1196,8 +1197,8 @@ async fn test_create_snapshot_record_idempotent( project_id, disk_id, - volume_id: Uuid::new_v4(), - destination_volume_id: Uuid::new_v4(), + volume_id: VolumeUuid::new_v4().into(), + destination_volume_id: VolumeUuid::new_v4().into(), gen: db::model::Generation::new(), state: db::model::SnapshotState::Creating, @@ -1398,13 +1399,13 @@ async fn test_multiple_deletes_not_sent(cptestctx: &ControlPlaneTestContext) { // which will cause the saga to fail. let resources_1 = - datastore.soft_delete_volume(db_snapshot_1.volume_id).await.unwrap(); + datastore.soft_delete_volume(db_snapshot_1.volume_id()).await.unwrap(); let resources_2 = - datastore.soft_delete_volume(db_snapshot_2.volume_id).await.unwrap(); + datastore.soft_delete_volume(db_snapshot_2.volume_id()).await.unwrap(); let resources_3 = - datastore.soft_delete_volume(db_snapshot_3.volume_id).await.unwrap(); + datastore.soft_delete_volume(db_snapshot_3.volume_id()).await.unwrap(); let resources_1_datasets_and_regions = datastore.regions_to_delete(&resources_1).await.unwrap(); @@ -1490,7 +1491,7 @@ async fn test_region_allocation_for_snapshot( .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), REGION_REDUNDANCY_THRESHOLD); // Create a snapshot of the disk @@ -1525,7 +1526,7 @@ async fn test_region_allocation_for_snapshot( }); let allocated_regions = - datastore.get_allocated_regions(db_snapshot.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_snapshot.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 0); @@ -1537,7 +1538,7 @@ async fn test_region_allocation_for_snapshot( .arbitrary_region_allocate( &opctx, RegionAllocationFor::SnapshotVolume { - volume_id: db_snapshot.volume_id, + volume_id: db_snapshot.volume_id(), snapshot_id: snapshot.identity.id, }, RegionAllocationParameters::FromDiskSource { @@ -1556,7 +1557,7 @@ async fn test_region_allocation_for_snapshot( .unwrap(); let allocated_regions = - datastore.get_allocated_regions(db_snapshot.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_snapshot.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 1); @@ -1595,7 +1596,7 @@ async fn test_region_allocation_for_snapshot( .arbitrary_region_allocate( &opctx, RegionAllocationFor::SnapshotVolume { - volume_id: db_snapshot.volume_id, + volume_id: db_snapshot.volume_id(), snapshot_id: snapshot.identity.id, }, RegionAllocationParameters::FromDiskSource { @@ -1614,7 +1615,7 @@ async fn test_region_allocation_for_snapshot( .unwrap(); let allocated_regions = - datastore.get_allocated_regions(db_snapshot.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_snapshot.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 1); @@ -1625,7 +1626,7 @@ async fn test_region_allocation_for_snapshot( .arbitrary_region_allocate( &opctx, RegionAllocationFor::SnapshotVolume { - volume_id: db_snapshot.volume_id, + volume_id: db_snapshot.volume_id(), snapshot_id: snapshot.identity.id, }, RegionAllocationParameters::FromDiskSource { @@ -1644,7 +1645,7 @@ async fn test_region_allocation_for_snapshot( .unwrap(); let allocated_regions = - datastore.get_allocated_regions(db_snapshot.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_snapshot.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 2); } diff --git a/nexus/tests/integration_tests/volume_management.rs b/nexus/tests/integration_tests/volume_management.rs index f0eb294e58..8bb1f9fa2b 100644 --- a/nexus/tests/integration_tests/volume_management.rs +++ b/nexus/tests/integration_tests/volume_management.rs @@ -55,13 +55,14 @@ use omicron_common::api::external::Name; use omicron_common::api::internal; use omicron_test_utils::dev::poll::wait_for_condition; use omicron_test_utils::dev::poll::CondCheckError; -use omicron_uuid_kinds::DownstairsKind; -use omicron_uuid_kinds::DownstairsRegionKind; +use omicron_uuid_kinds::DownstairsRegionUuid; +use omicron_uuid_kinds::DownstairsUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::TypedUuid; -use omicron_uuid_kinds::UpstairsKind; -use omicron_uuid_kinds::UpstairsRepairKind; -use omicron_uuid_kinds::UpstairsSessionKind; +use omicron_uuid_kinds::UpstairsRepairUuid; +use omicron_uuid_kinds::UpstairsSessionUuid; +use omicron_uuid_kinds::UpstairsUuid; +use omicron_uuid_kinds::VolumeUuid; use rand::prelude::SliceRandom; use rand::{rngs::StdRng, SeedableRng}; use sled_agent_client::types::{CrucibleOpts, VolumeConstructionRequest}; @@ -1348,14 +1349,14 @@ async fn test_delete_image_order_6(cptestctx: &ControlPlaneTestContext) { // A test function to create a volume with the provided read only parent. async fn create_volume( datastore: &Arc, - volume_id: Uuid, + volume_id: VolumeUuid, rop_option: Option, ) { let block_size = 512; // Make the SubVolume let sub_volume = VolumeConstructionRequest::File { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size, path: "/lol".to_string(), }; @@ -1371,7 +1372,7 @@ async fn create_volume( .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size, sub_volumes, read_only_parent: rop, @@ -1391,8 +1392,8 @@ async fn test_volume_remove_read_only_parent_base( let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); - let volume_id = Uuid::new_v4(); - let t_vid = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); + let t_vid = VolumeUuid::new_v4(); let block_size = 512; // Make our read_only_parent @@ -1505,8 +1506,8 @@ async fn test_volume_remove_read_only_parent_no_parent( let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); - let volume_id = Uuid::new_v4(); - let t_vid = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); + let t_vid = VolumeUuid::new_v4(); create_volume(&datastore, volume_id, None).await; // We will get Ok(false) back from this operation. @@ -1523,14 +1524,14 @@ async fn test_volume_remove_read_only_parent_volume_not_volume( let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); - let volume_id = Uuid::new_v4(); - let t_vid = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); + let t_vid = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::File { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size: 512, path: "/lol".to_string(), }) @@ -1552,8 +1553,8 @@ async fn test_volume_remove_read_only_parent_bad_volume( let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); - let volume_id = Uuid::new_v4(); - let t_vid = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); + let t_vid = VolumeUuid::new_v4(); // Nothing should be removed, but we also don't return error. let removed = datastore.volume_remove_rop(volume_id, t_vid).await.unwrap(); @@ -1567,7 +1568,7 @@ async fn test_volume_remove_read_only_parent_volume_deleted( // Test the removal of a read_only_parent from a deleted volume. let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let block_size = 512; // Make our read_only_parent @@ -1582,7 +1583,7 @@ async fn test_volume_remove_read_only_parent_volume_deleted( // Soft delete the volume let _cr = datastore.soft_delete_volume(volume_id).await.unwrap(); - let t_vid = Uuid::new_v4(); + let t_vid = VolumeUuid::new_v4(); // Nothing should be removed, but we also don't return error. let removed = datastore.volume_remove_rop(volume_id, t_vid).await.unwrap(); assert!(!removed); @@ -1595,7 +1596,7 @@ async fn test_volume_remove_rop_saga(cptestctx: &ControlPlaneTestContext) { let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let block_size = 512; // Make our read_only_parent @@ -1658,7 +1659,7 @@ async fn test_volume_remove_rop_saga_twice( let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let block_size = 512; // Make our read_only_parent @@ -1730,7 +1731,7 @@ async fn test_volume_remove_rop_saga_no_volume( cptestctx: &ControlPlaneTestContext, ) { // Test calling the saga on a volume that does not exist. - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); println!("Non-existant volume: {:?}", volume_id); let int_client = &cptestctx.internal_client; @@ -1755,14 +1756,14 @@ async fn test_volume_remove_rop_saga_volume_not_volume( // Test saga removal of a read only volume for a volume that is not // of a type to have a read only parent. let nexus = &cptestctx.server.server_context().nexus; - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let datastore = nexus.datastore(); datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::File { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size: 512, path: "/lol".to_string(), }) @@ -1795,7 +1796,7 @@ async fn test_volume_remove_rop_saga_deleted_volume( // takes no action on that deleted volume. let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let block_size = 512; // Make our read_only_parent @@ -1856,13 +1857,13 @@ async fn test_volume_checkout(cptestctx: &ControlPlaneTestContext) { // database when the volume type is Volume with sub_volume Region. let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let block_size = 512; // Create a sub_vol with generation 1. let subvol = create_region(block_size, 1, Uuid::new_v4()); let volume_construction_request = VolumeConstructionRequest::Volume { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size, sub_volumes: vec![subvol], read_only_parent: None, @@ -1907,17 +1908,17 @@ async fn test_volume_checkout_updates_nothing( // not contain a sub_volume with a generation field. let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let block_size = 512; // Build our sub_vol and VCR from parts. let subvol = VolumeConstructionRequest::File { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size, path: "/lol".to_string(), }; let volume_construction_request = VolumeConstructionRequest::Volume { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size, sub_volumes: vec![subvol], read_only_parent: None, @@ -1960,7 +1961,7 @@ async fn test_volume_checkout_updates_multiple_gen( // type Region. let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let block_size = 512; // Create two regions. @@ -1969,7 +1970,7 @@ async fn test_volume_checkout_updates_multiple_gen( // Make the volume with our two regions as sub_volumes let volume_construction_request = VolumeConstructionRequest::Volume { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size, sub_volumes: vec![subvol_one, subvol_two], read_only_parent: None, @@ -2026,7 +2027,7 @@ async fn test_volume_checkout_updates_sparse_multiple_gen( // problem let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let block_size = 512; // Create three sub_vols. @@ -2040,7 +2041,7 @@ async fn test_volume_checkout_updates_sparse_multiple_gen( // Make the volume with our three regions as sub_volumes let volume_construction_request = VolumeConstructionRequest::Volume { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size, sub_volumes: vec![subvol_one, subvol_two, subvol_three], read_only_parent: None, @@ -2087,7 +2088,7 @@ async fn test_volume_checkout_updates_sparse_mid_multiple_gen( // middle of the sub_volumes won't be a problem let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let block_size = 512; // Create three sub_vols. @@ -2101,7 +2102,7 @@ async fn test_volume_checkout_updates_sparse_mid_multiple_gen( // Make the volume with our three sub_volumes let volume_construction_request = VolumeConstructionRequest::Volume { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size, sub_volumes: vec![subvol_one, subvol_two, subvol_three], read_only_parent: None, @@ -2146,7 +2147,7 @@ async fn test_volume_checkout_randomize_ids_only_read_only( // non-read-only Regions let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let block_size = 512; // Create three sub_vols. @@ -2156,7 +2157,7 @@ async fn test_volume_checkout_randomize_ids_only_read_only( // Make the volume with our three sub_volumes let volume_construction_request = VolumeConstructionRequest::Volume { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size, sub_volumes: vec![subvol_one, subvol_two, subvol_three], read_only_parent: None, @@ -2266,12 +2267,12 @@ async fn test_keep_your_targets_straight(cptestctx: &ControlPlaneTestContext) { .unwrap(); } - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let volume = datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: Some(Box::new( @@ -2334,7 +2335,7 @@ async fn test_keep_your_targets_straight(cptestctx: &ControlPlaneTestContext) { .unwrap(); assert_eq!(usage.len(), 1); - assert_eq!(usage[0].volume_id, volume_id); + assert_eq!(usage[0].volume_id(), volume_id); } // Soft delete the volume, and validate that the volume's region_snapshots @@ -2387,12 +2388,12 @@ async fn test_keep_your_targets_straight(cptestctx: &ControlPlaneTestContext) { .unwrap(); } - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); let volume = datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: Some(Box::new( @@ -2472,7 +2473,7 @@ async fn test_keep_your_targets_straight(cptestctx: &ControlPlaneTestContext) { .unwrap(); assert_eq!(usage.len(), 1); - assert_eq!(usage[0].volume_id, volume_id); + assert_eq!(usage[0].volume_id(), volume_id); } // Soft delete the volume, and validate that only three region_snapshot @@ -2714,12 +2715,12 @@ async fn test_volume_hard_delete_idempotent( let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); - let volume_id = Uuid::new_v4(); + let volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::File { - id: volume_id, + id: *volume_id.as_untyped_uuid(), block_size: 512, path: "/lol".to_string(), }) @@ -2741,10 +2742,10 @@ async fn test_upstairs_repair_notify_idempotent( ) { let int_client = &cptestctx.internal_client; - let upstairs_id: TypedUuid = TypedUuid::new_v4(); - let session_id: TypedUuid = TypedUuid::new_v4(); - let repair_id: TypedUuid = TypedUuid::new_v4(); - let region_id: TypedUuid = TypedUuid::new_v4(); + let upstairs_id = UpstairsUuid::new_v4(); + let session_id = UpstairsSessionUuid::new_v4(); + let repair_id = UpstairsRepairUuid::new_v4(); + let region_id = DownstairsRegionUuid::new_v4(); // Send the same start request. let notify_url = format!("/crucible/0/upstairs/{upstairs_id}/repair-start"); @@ -2825,10 +2826,10 @@ async fn test_upstairs_repair_notify_different_finish_status( ) { let int_client = &cptestctx.internal_client; - let upstairs_id: TypedUuid = TypedUuid::new_v4(); - let session_id: TypedUuid = TypedUuid::new_v4(); - let repair_id: TypedUuid = TypedUuid::new_v4(); - let region_id: TypedUuid = TypedUuid::new_v4(); + let upstairs_id = UpstairsUuid::new_v4(); + let session_id = UpstairsSessionUuid::new_v4(); + let repair_id = UpstairsRepairUuid::new_v4(); + let region_id = DownstairsRegionUuid::new_v4(); let notify_url = format!("/crucible/0/upstairs/{upstairs_id}/repair-finish"); @@ -2885,10 +2886,10 @@ async fn test_upstairs_repair_same_upstairs_retry( ) { let int_client = &cptestctx.internal_client; - let upstairs_id: TypedUuid = TypedUuid::new_v4(); - let session_id: TypedUuid = TypedUuid::new_v4(); - let repair_id: TypedUuid = TypedUuid::new_v4(); - let region_id: TypedUuid = TypedUuid::new_v4(); + let upstairs_id = UpstairsUuid::new_v4(); + let session_id = UpstairsSessionUuid::new_v4(); + let repair_id = UpstairsRepairUuid::new_v4(); + let region_id = DownstairsRegionUuid::new_v4(); // Simulate one failed repair @@ -2942,7 +2943,7 @@ async fn test_upstairs_repair_same_upstairs_retry( // Simulate the same Upstairs restarting the repair, which passes this time - let repair_id: TypedUuid = TypedUuid::new_v4(); + let repair_id = UpstairsRepairUuid::new_v4(); int_client .make_request( @@ -2995,10 +2996,10 @@ async fn test_upstairs_repair_different_upstairs_retry( ) { let int_client = &cptestctx.internal_client; - let upstairs_id: TypedUuid = TypedUuid::new_v4(); - let session_id: TypedUuid = TypedUuid::new_v4(); - let repair_id: TypedUuid = TypedUuid::new_v4(); - let region_id: TypedUuid = TypedUuid::new_v4(); + let upstairs_id = UpstairsUuid::new_v4(); + let session_id = UpstairsSessionUuid::new_v4(); + let repair_id = UpstairsRepairUuid::new_v4(); + let region_id = DownstairsRegionUuid::new_v4(); // Simulate one failed repair by one Upstairs @@ -3052,8 +3053,8 @@ async fn test_upstairs_repair_different_upstairs_retry( // Simulate a different Upstairs session restarting the repair, which passes this time - let session_id: TypedUuid = TypedUuid::new_v4(); - let repair_id: TypedUuid = TypedUuid::new_v4(); + let session_id = UpstairsSessionUuid::new_v4(); + let repair_id = UpstairsRepairUuid::new_v4(); int_client .make_request( @@ -3106,10 +3107,10 @@ async fn test_upstairs_repair_different_upstairs_retry_interrupted( ) { let int_client = &cptestctx.internal_client; - let upstairs_id: TypedUuid = TypedUuid::new_v4(); - let session_id: TypedUuid = TypedUuid::new_v4(); - let repair_id: TypedUuid = TypedUuid::new_v4(); - let region_id: TypedUuid = TypedUuid::new_v4(); + let upstairs_id = UpstairsUuid::new_v4(); + let session_id = UpstairsSessionUuid::new_v4(); + let repair_id = UpstairsRepairUuid::new_v4(); + let region_id = DownstairsRegionUuid::new_v4(); // Simulate one failed repair by one Upstairs, which was interrupted (which // leads to no finish message). @@ -3143,8 +3144,8 @@ async fn test_upstairs_repair_different_upstairs_retry_interrupted( // Simulate a different Upstairs session restarting the interrupted repair, // which passes this time - let session_id: TypedUuid = TypedUuid::new_v4(); - let repair_id: TypedUuid = TypedUuid::new_v4(); + let session_id = UpstairsSessionUuid::new_v4(); + let repair_id = UpstairsRepairUuid::new_v4(); int_client .make_request( @@ -3197,10 +3198,10 @@ async fn test_upstairs_repair_repair_id_and_type_conflict( ) { let int_client = &cptestctx.internal_client; - let upstairs_id: TypedUuid = TypedUuid::new_v4(); - let session_id: TypedUuid = TypedUuid::new_v4(); - let repair_id: TypedUuid = TypedUuid::new_v4(); - let region_id: TypedUuid = TypedUuid::new_v4(); + let upstairs_id = UpstairsUuid::new_v4(); + let session_id = UpstairsSessionUuid::new_v4(); + let repair_id = UpstairsRepairUuid::new_v4(); + let region_id = DownstairsRegionUuid::new_v4(); let notify_start_url = format!("/crucible/0/upstairs/{upstairs_id}/repair-start"); @@ -3256,10 +3257,10 @@ async fn test_upstairs_repair_submit_progress( ) { let int_client = &cptestctx.internal_client; - let upstairs_id: TypedUuid = TypedUuid::new_v4(); - let session_id: TypedUuid = TypedUuid::new_v4(); - let repair_id: TypedUuid = TypedUuid::new_v4(); - let region_id: TypedUuid = TypedUuid::new_v4(); + let upstairs_id = UpstairsUuid::new_v4(); + let session_id = UpstairsSessionUuid::new_v4(); + let repair_id = UpstairsRepairUuid::new_v4(); + let region_id = DownstairsRegionUuid::new_v4(); // A repair must be started before progress can be submitted @@ -3315,8 +3316,8 @@ async fn test_upstairs_repair_reject_submit_progress_when_no_repair( ) { let int_client = &cptestctx.internal_client; - let upstairs_id: TypedUuid = TypedUuid::new_v4(); - let repair_id: TypedUuid = TypedUuid::new_v4(); + let upstairs_id = UpstairsUuid::new_v4(); + let repair_id = UpstairsRepairUuid::new_v4(); let progress_url = format!( "/crucible/0/upstairs/{upstairs_id}/repair/{repair_id}/progress" @@ -3345,8 +3346,8 @@ async fn test_upstairs_notify_downstairs_client_stop_request( ) { let int_client = &cptestctx.internal_client; - let upstairs_id: TypedUuid = TypedUuid::new_v4(); - let downstairs_id: TypedUuid = TypedUuid::new_v4(); + let upstairs_id = UpstairsUuid::new_v4(); + let downstairs_id = DownstairsUuid::new_v4(); let stop_request_url = format!( "/crucible/0/upstairs/{upstairs_id}/downstairs/{downstairs_id}/stop-request" @@ -3423,8 +3424,8 @@ async fn test_upstairs_notify_downstairs_client_stops( ) { let int_client = &cptestctx.internal_client; - let upstairs_id: TypedUuid = TypedUuid::new_v4(); - let downstairs_id: TypedUuid = TypedUuid::new_v4(); + let upstairs_id = UpstairsUuid::new_v4(); + let downstairs_id = DownstairsUuid::new_v4(); let stopped_url = format!( "/crucible/0/upstairs/{upstairs_id}/downstairs/{downstairs_id}/stopped" @@ -3538,12 +3539,12 @@ async fn test_cte_returns_regions(cptestctx: &ControlPlaneTestContext) { .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 3); let resources_to_clean_up = - datastore.soft_delete_volume(db_disk.volume_id).await.unwrap(); + datastore.soft_delete_volume(db_disk.volume_id()).await.unwrap(); let datasets_and_regions_to_clean = datastore.regions_to_delete(&resources_to_clean_up).await.unwrap(); @@ -3568,8 +3569,8 @@ struct TestReadOnlyRegionReferenceUsage { region: db::model::Region, region_address: SocketAddrV6, - first_volume_id: Uuid, - second_volume_id: Uuid, + first_volume_id: VolumeUuid, + second_volume_id: VolumeUuid, last_resources_to_delete: Option, } @@ -3593,8 +3594,8 @@ impl TestReadOnlyRegionReferenceUsage { create_project_and_pool(client).await; - let first_volume_id = Uuid::new_v4(); - let second_volume_id = Uuid::new_v4(); + let first_volume_id = VolumeUuid::new_v4(); + let second_volume_id = VolumeUuid::new_v4(); let snapshot_id = Uuid::new_v4(); let datasets_and_regions = datastore @@ -3650,7 +3651,7 @@ impl TestReadOnlyRegionReferenceUsage { .volume_create(nexus_db_model::Volume::new( self.first_volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: self.first_volume_id, + id: *self.first_volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 512, @@ -3690,7 +3691,7 @@ impl TestReadOnlyRegionReferenceUsage { .unwrap(); assert_eq!(usage.len(), 1); - assert_eq!(usage[0].volume_id, self.first_volume_id); + assert_eq!(usage[0].volume_id(), self.first_volume_id); } pub async fn validate_only_second_volume_referenced(&self) { @@ -3705,7 +3706,7 @@ impl TestReadOnlyRegionReferenceUsage { .unwrap(); assert_eq!(usage.len(), 1); - assert_eq!(usage[0].volume_id, self.second_volume_id); + assert_eq!(usage[0].volume_id(), self.second_volume_id); } pub async fn delete_first_volume(&mut self) { @@ -3778,7 +3779,7 @@ impl TestReadOnlyRegionReferenceUsage { .volume_create(nexus_db_model::Volume::new( self.first_volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: self.first_volume_id, + id: *self.first_volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: Some(Box::new( @@ -3813,7 +3814,7 @@ impl TestReadOnlyRegionReferenceUsage { .volume_create(nexus_db_model::Volume::new( self.second_volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: self.second_volume_id, + id: *self.second_volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 512, @@ -3846,7 +3847,7 @@ impl TestReadOnlyRegionReferenceUsage { .volume_create(nexus_db_model::Volume::new( self.second_volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: self.second_volume_id, + id: *self.second_volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: Some(Box::new( @@ -3888,8 +3889,8 @@ impl TestReadOnlyRegionReferenceUsage { .unwrap(); assert_eq!(usage.len(), 2); - assert!(usage.iter().any(|r| r.volume_id == self.first_volume_id)); - assert!(usage.iter().any(|r| r.volume_id == self.second_volume_id)); + assert!(usage.iter().any(|r| r.volume_id() == self.first_volume_id)); + assert!(usage.iter().any(|r| r.volume_id() == self.second_volume_id)); } } @@ -4119,7 +4120,7 @@ async fn test_read_only_region_reference_counting( .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 3); @@ -4149,7 +4150,7 @@ async fn test_read_only_region_reference_counting( }); let allocated_regions = - datastore.get_allocated_regions(db_snapshot.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_snapshot.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 1); let (_, read_only_region) = &allocated_regions[0]; @@ -4182,7 +4183,7 @@ async fn test_read_only_region_reference_counting( .await .unwrap() .iter() - .any(|volume| volume.id() == db_disk_from_snapshot.volume_id)); + .any(|volume| volume.id() == db_disk_from_snapshot.volume_id())); // Expect that there are two read-only region references now: one in the // snapshot volume, and one in the disk-from-snap volume. @@ -4197,10 +4198,10 @@ async fn test_read_only_region_reference_counting( .unwrap(); assert_eq!(usage.len(), 2); - assert!(usage.iter().any(|r| r.volume_id == db_snapshot.volume_id)); + assert!(usage.iter().any(|r| r.volume_id() == db_snapshot.volume_id())); assert!(usage .iter() - .any(|r| r.volume_id == db_disk_from_snapshot.volume_id)); + .any(|r| r.volume_id() == db_disk_from_snapshot.volume_id())); // Deleting the snapshot should _not_ cause the region to get deleted from // CRDB @@ -4249,7 +4250,7 @@ async fn test_read_only_region_reference_counting( .unwrap(); assert_eq!(usage.len(), 1); - assert_eq!(usage[0].volume_id, db_disk_from_snapshot.volume_id); + assert_eq!(usage[0].volume_id(), db_disk_from_snapshot.volume_id()); // Delete the disk, and expect that does not alter the volume usage records @@ -4269,7 +4270,7 @@ async fn test_read_only_region_reference_counting( .unwrap(); assert_eq!(usage.len(), 1); - assert_eq!(usage[0].volume_id, db_disk_from_snapshot.volume_id); + assert_eq!(usage[0].volume_id(), db_disk_from_snapshot.volume_id()); // Delete the disk from snapshot, verify everything is cleaned up @@ -4359,7 +4360,7 @@ async fn test_read_only_region_reference_counting_layers( .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 3); @@ -4389,7 +4390,7 @@ async fn test_read_only_region_reference_counting_layers( }); let allocated_regions = - datastore.get_allocated_regions(db_snapshot.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_snapshot.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 1); let (_, read_only_region) = &allocated_regions[0]; @@ -4419,7 +4420,7 @@ async fn test_read_only_region_reference_counting_layers( .await .unwrap() .iter() - .any(|volume| volume.id() == db_disk_from_snapshot.volume_id)); + .any(|volume| volume.id() == db_disk_from_snapshot.volume_id())); // Expect that there are two read-only region references now: one in the // snapshot volume, and one in the disk-from-snap volume. @@ -4434,10 +4435,10 @@ async fn test_read_only_region_reference_counting_layers( .unwrap(); assert_eq!(usage.len(), 2); - assert!(usage.iter().any(|r| r.volume_id == db_snapshot.volume_id)); + assert!(usage.iter().any(|r| r.volume_id() == db_snapshot.volume_id())); assert!(usage .iter() - .any(|r| r.volume_id == db_disk_from_snapshot.volume_id)); + .any(|r| r.volume_id() == db_disk_from_snapshot.volume_id())); // Take a snapshot of the disk-from-snapshot disk @@ -4472,11 +4473,13 @@ async fn test_read_only_region_reference_counting_layers( .unwrap(); assert_eq!(usage.len(), 3); - assert!(usage.iter().any(|r| r.volume_id == db_snapshot.volume_id)); + assert!(usage.iter().any(|r| r.volume_id() == db_snapshot.volume_id())); assert!(usage .iter() - .any(|r| r.volume_id == db_disk_from_snapshot.volume_id)); - assert!(usage.iter().any(|r| r.volume_id == db_double_snapshot.volume_id)); + .any(|r| r.volume_id() == db_disk_from_snapshot.volume_id())); + assert!(usage + .iter() + .any(|r| r.volume_id() == db_double_snapshot.volume_id())); // Delete resources, assert volume resource usage records along the way @@ -4498,8 +4501,10 @@ async fn test_read_only_region_reference_counting_layers( .unwrap(); assert_eq!(usage.len(), 2); - assert!(usage.iter().any(|r| r.volume_id == db_snapshot.volume_id)); - assert!(usage.iter().any(|r| r.volume_id == db_double_snapshot.volume_id)); + assert!(usage.iter().any(|r| r.volume_id() == db_snapshot.volume_id())); + assert!(usage + .iter() + .any(|r| r.volume_id() == db_double_snapshot.volume_id())); NexusRequest::object_delete(client, &get_snapshot_url("snapshot")) .authn_as(AuthnMode::PrivilegedUser) @@ -4519,7 +4524,9 @@ async fn test_read_only_region_reference_counting_layers( .unwrap(); assert_eq!(usage.len(), 1); - assert!(usage.iter().any(|r| r.volume_id == db_double_snapshot.volume_id)); + assert!(usage + .iter() + .any(|r| r.volume_id() == db_double_snapshot.volume_id())); NexusRequest::object_delete(client, &get_snapshot_url("double-snapshot")) .authn_as(AuthnMode::PrivilegedUser) @@ -4580,7 +4587,7 @@ async fn test_volume_replace_snapshot_respects_accounting( .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); let disk_allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(disk_allocated_regions.len(), 3); @@ -4596,7 +4603,7 @@ async fn test_volume_replace_snapshot_respects_accounting( }); let allocated_regions = - datastore.get_allocated_regions(db_snapshot.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_snapshot.volume_id()).await.unwrap(); // There won't be any regions for the snapshot volume, only region snapshots @@ -4608,7 +4615,7 @@ async fn test_volume_replace_snapshot_respects_accounting( .arbitrary_region_allocate( &opctx, RegionAllocationFor::SnapshotVolume { - volume_id: db_snapshot.volume_id, + volume_id: db_snapshot.volume_id(), snapshot_id: db_snapshot.id(), }, RegionAllocationParameters::FromDiskSource { @@ -4626,7 +4633,7 @@ async fn test_volume_replace_snapshot_respects_accounting( // Get the newly allocated region let mut new_allocated_regions = - datastore.get_allocated_regions(db_snapshot.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_snapshot.volume_id()).await.unwrap(); assert_eq!(new_allocated_regions.len(), 1); @@ -4639,13 +4646,13 @@ async fn test_volume_replace_snapshot_respects_accounting( // Create a blank region to use as the "volume to delete" - let volume_to_delete_id = Uuid::new_v4(); + let volume_to_delete_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( volume_to_delete_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_to_delete_id, + id: *volume_to_delete_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: None, @@ -4687,7 +4694,7 @@ async fn test_volume_replace_snapshot_respects_accounting( let replacement_result = datastore .volume_replace_snapshot( - VolumeWithTarget(db_snapshot.volume_id), + VolumeWithTarget(db_snapshot.volume_id()), ExistingTarget(existing.snapshot_addr.parse().unwrap()), ReplacementTarget(replacement), VolumeToDelete(volume_to_delete_id), @@ -4716,13 +4723,13 @@ async fn test_volume_replace_snapshot_respects_accounting( .unwrap(); assert_eq!(usage.len(), 1); - assert_eq!(usage[0].volume_id, db_snapshot.volume_id); + assert_eq!(usage[0].volume_id(), db_snapshot.volume_id()); // Now, reverse the replacement let replacement_result = datastore .volume_replace_snapshot( - VolumeWithTarget(db_snapshot.volume_id), + VolumeWithTarget(db_snapshot.volume_id()), ExistingTarget(replacement), // swapped! ReplacementTarget(existing.snapshot_addr.parse().unwrap()), // swapped! VolumeToDelete(volume_to_delete_id), @@ -4751,7 +4758,7 @@ async fn test_volume_replace_snapshot_respects_accounting( .unwrap(); assert_eq!(usage.len(), 1); - assert_eq!(usage[0].volume_id, volume_to_delete_id); + assert_eq!(usage[0].volume_id(), volume_to_delete_id); } /// Test that the `volume_remove_rop` function correctly updates volume resource @@ -4787,7 +4794,7 @@ async fn test_volume_remove_rop_respects_accounting( .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); let disk_allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(disk_allocated_regions.len(), 3); @@ -4848,21 +4855,21 @@ async fn test_volume_remove_rop_respects_accounting( .unwrap(); assert_eq!(usage.len(), 2); - assert!(usage.iter().any(|r| r.volume_id == db_snapshot.volume_id)); + assert!(usage.iter().any(|r| r.volume_id() == db_snapshot.volume_id())); assert!(usage .iter() - .any(|r| r.volume_id == db_disk_from_snapshot.volume_id)); + .any(|r| r.volume_id() == db_disk_from_snapshot.volume_id())); } // Remove the ROP from disk-from-snapshot - let volume_to_delete_id = Uuid::new_v4(); + let volume_to_delete_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( volume_to_delete_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_to_delete_id, + id: *volume_to_delete_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: None, @@ -4873,7 +4880,10 @@ async fn test_volume_remove_rop_respects_accounting( .unwrap(); let result = datastore - .volume_remove_rop(db_disk_from_snapshot.volume_id, volume_to_delete_id) + .volume_remove_rop( + db_disk_from_snapshot.volume_id(), + volume_to_delete_id, + ) .await .unwrap(); @@ -4908,8 +4918,8 @@ async fn test_volume_remove_rop_respects_accounting( .unwrap(); assert_eq!(usage.len(), 2); - assert!(usage.iter().any(|r| r.volume_id == db_snapshot.volume_id)); - assert!(usage.iter().any(|r| r.volume_id == volume_to_delete_id)); + assert!(usage.iter().any(|r| r.volume_id() == db_snapshot.volume_id())); + assert!(usage.iter().any(|r| r.volume_id() == volume_to_delete_id)); } } @@ -4946,7 +4956,7 @@ async fn test_volume_remove_rop_respects_accounting_no_modify_others( .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); let disk_allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(disk_allocated_regions.len(), 3); @@ -5028,24 +5038,24 @@ async fn test_volume_remove_rop_respects_accounting_no_modify_others( .unwrap(); assert_eq!(usage.len(), 3); - assert!(usage.iter().any(|r| r.volume_id == db_snapshot.volume_id)); - assert!(usage - .iter() - .any(|r| r.volume_id == db_disk_from_snapshot.volume_id)); + assert!(usage.iter().any(|r| r.volume_id() == db_snapshot.volume_id())); assert!(usage .iter() - .any(|r| r.volume_id == db_another_disk_from_snapshot.volume_id)); + .any(|r| r.volume_id() == db_disk_from_snapshot.volume_id())); + assert!(usage.iter().any( + |r| r.volume_id() == db_another_disk_from_snapshot.volume_id() + )); } // Remove the ROP from disk-from-snapshot - let volume_to_delete_id = Uuid::new_v4(); + let volume_to_delete_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( volume_to_delete_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: volume_to_delete_id, + id: *volume_to_delete_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: None, @@ -5056,7 +5066,10 @@ async fn test_volume_remove_rop_respects_accounting_no_modify_others( .unwrap(); let result = datastore - .volume_remove_rop(db_disk_from_snapshot.volume_id, volume_to_delete_id) + .volume_remove_rop( + db_disk_from_snapshot.volume_id(), + volume_to_delete_id, + ) .await .unwrap(); @@ -5092,11 +5105,11 @@ async fn test_volume_remove_rop_respects_accounting_no_modify_others( .unwrap(); assert_eq!(usage.len(), 3); - assert!(usage.iter().any(|r| r.volume_id == db_snapshot.volume_id)); - assert!(usage.iter().any(|r| r.volume_id == volume_to_delete_id)); - assert!(usage - .iter() - .any(|r| r.volume_id == db_another_disk_from_snapshot.volume_id)); + assert!(usage.iter().any(|r| r.volume_id() == db_snapshot.volume_id())); + assert!(usage.iter().any(|r| r.volume_id() == volume_to_delete_id)); + assert!(usage.iter().any( + |r| r.volume_id() == db_another_disk_from_snapshot.volume_id() + )); } } @@ -5333,7 +5346,7 @@ async fn test_migrate_to_ref_count_with_records_soft_delete_volume( }); let resources = - datastore.soft_delete_volume(db_snapshot.volume_id).await.unwrap(); + datastore.soft_delete_volume(db_snapshot.volume_id()).await.unwrap(); // Assert that the region snapshots did not have deleted set to true @@ -5424,12 +5437,12 @@ async fn test_migrate_to_ref_count_with_records_region_snapshot_deleting( // Create two volumes, one with the first three region snapshots, one with // the last three region snapshots - let first_volume_id = Uuid::new_v4(); + let first_volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( first_volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: first_volume_id, + id: *first_volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: Some(Box::new( @@ -5462,12 +5475,12 @@ async fn test_migrate_to_ref_count_with_records_region_snapshot_deleting( .await .unwrap(); - let second_volume_id = Uuid::new_v4(); + let second_volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( second_volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: second_volume_id, + id: *second_volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: Some(Box::new( @@ -5597,7 +5610,7 @@ async fn test_double_layer_with_read_only_region_delete( .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 3); @@ -5706,7 +5719,7 @@ async fn test_double_layer_snapshot_with_read_only_region_delete_2( .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 3); @@ -5892,7 +5905,7 @@ async fn test_no_zombie_region_snapshots(cptestctx: &ControlPlaneTestContext) { }); let snapshot_volume: Volume = datastore - .volume_get(db_snapshot.volume_id) + .volume_get(db_snapshot.volume_id()) .await .expect("volume_get without error") .expect("volume exists"); @@ -5900,12 +5913,12 @@ async fn test_no_zombie_region_snapshots(cptestctx: &ControlPlaneTestContext) { let snapshot_vcr: VolumeConstructionRequest = serde_json::from_str(snapshot_volume.data()).unwrap(); - let step_3_volume_id = Uuid::new_v4(); + let step_3_volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( step_3_volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: step_3_volume_id, + id: *step_3_volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: Some(Box::new(snapshot_vcr.clone())), @@ -5917,7 +5930,8 @@ async fn test_no_zombie_region_snapshots(cptestctx: &ControlPlaneTestContext) { // Soft-delete the snapshot volume - let cr = datastore.soft_delete_volume(db_snapshot.volume_id).await.unwrap(); + let cr = + datastore.soft_delete_volume(db_snapshot.volume_id()).await.unwrap(); // Assert that no resources are returned for clean-up @@ -5937,12 +5951,12 @@ async fn test_no_zombie_region_snapshots(cptestctx: &ControlPlaneTestContext) { // that uses the snapshot volume as a read-only parent. This call should // fail! - let racing_volume_id = Uuid::new_v4(); + let racing_volume_id = VolumeUuid::new_v4(); let result = datastore .volume_create(nexus_db_model::Volume::new( racing_volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: racing_volume_id, + id: *racing_volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: Some(Box::new(snapshot_vcr.clone())), @@ -5981,7 +5995,7 @@ async fn test_no_zombie_read_only_regions(cptestctx: &ControlPlaneTestContext) { // Create a volume with three read-only regions - let step_1_volume_id = Uuid::new_v4(); + let step_1_volume_id = VolumeUuid::new_v4(); let snapshot_id = Uuid::new_v4(); let datasets_and_regions = datastore @@ -6040,7 +6054,7 @@ async fn test_no_zombie_read_only_regions(cptestctx: &ControlPlaneTestContext) { .volume_create(nexus_db_model::Volume::new( step_1_volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: step_1_volume_id, + id: *step_1_volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 512, @@ -6083,13 +6097,13 @@ async fn test_no_zombie_read_only_regions(cptestctx: &ControlPlaneTestContext) { let step_1_vcr: VolumeConstructionRequest = serde_json::from_str(step_1_volume.data()).unwrap(); - let step_2_volume_id = Uuid::new_v4(); + let step_2_volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( step_2_volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: step_2_volume_id, + id: *step_2_volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: Some(Box::new(step_1_vcr.clone())), @@ -6121,12 +6135,12 @@ async fn test_no_zombie_read_only_regions(cptestctx: &ControlPlaneTestContext) { // that uses the step 1 volume as a read-only parent. This call should // fail! - let racing_volume_id = Uuid::new_v4(); + let racing_volume_id = VolumeUuid::new_v4(); let result = datastore .volume_create(nexus_db_model::Volume::new( racing_volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: racing_volume_id, + id: *racing_volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: Some(Box::new(step_1_vcr)), @@ -6167,7 +6181,7 @@ async fn test_no_zombie_read_write_regions( // Create a volume with three read-only regions - let step_1_volume_id = Uuid::new_v4(); + let step_1_volume_id = VolumeUuid::new_v4(); let snapshot_id = Uuid::new_v4(); let datasets_and_regions = datastore @@ -6226,7 +6240,7 @@ async fn test_no_zombie_read_write_regions( .volume_create(nexus_db_model::Volume::new( step_1_volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: step_1_volume_id, + id: *step_1_volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![VolumeConstructionRequest::Region { block_size: 512, @@ -6269,13 +6283,13 @@ async fn test_no_zombie_read_write_regions( let step_1_vcr: VolumeConstructionRequest = serde_json::from_str(step_1_volume.data()).unwrap(); - let step_2_volume_id = Uuid::new_v4(); + let step_2_volume_id = VolumeUuid::new_v4(); datastore .volume_create(nexus_db_model::Volume::new( step_2_volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: step_2_volume_id, + id: *step_2_volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: Some(Box::new(step_1_vcr.clone())), @@ -6307,12 +6321,12 @@ async fn test_no_zombie_read_write_regions( // that uses the step 1 volume as a read-only parent. This call should // fail! - let racing_volume_id = Uuid::new_v4(); + let racing_volume_id = VolumeUuid::new_v4(); let result = datastore .volume_create(nexus_db_model::Volume::new( racing_volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { - id: racing_volume_id, + id: *racing_volume_id.as_untyped_uuid(), block_size: 512, sub_volumes: vec![], read_only_parent: Some(Box::new(step_1_vcr)), diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 5c39652aa3..577f38e897 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -1526,8 +1526,7 @@ "name": "volume_id", "required": true, "schema": { - "type": "string", - "format": "uuid" + "$ref": "#/components/schemas/TypedUuidForVolumeKind" } } ], @@ -6096,6 +6095,10 @@ "TypedUuidForPropolisKind": { "type": "string", "format": "uuid" + }, + "TypedUuidForVolumeKind": { + "type": "string", + "format": "uuid" } }, "responses": { diff --git a/uuid-kinds/src/lib.rs b/uuid-kinds/src/lib.rs index 42c50379ce..c5d5551d0e 100644 --- a/uuid-kinds/src/lib.rs +++ b/uuid-kinds/src/lib.rs @@ -72,5 +72,6 @@ impl_typed_uuid_kind! { UpstairsRepair => "upstairs_repair", UpstairsSession => "upstairs_session", Vnic => "vnic", + Volume => "volume", Zpool => "zpool", }