diff --git a/Cargo.lock b/Cargo.lock index 57bea4456a..69cd1923e5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6135,6 +6135,7 @@ dependencies = [ "nexus-sled-agent-shared", "nexus-types", "omicron-common", + "omicron-uuid-kinds", "omicron-workspace-hack", "slog", "uuid", diff --git a/clients/nexus-client/src/lib.rs b/clients/nexus-client/src/lib.rs index acb3a1d558..41ce658233 100644 --- a/clients/nexus-client/src/lib.rs +++ b/clients/nexus-client/src/lib.rs @@ -48,6 +48,7 @@ progenitor::generate_api!( RecoverySiloConfig = nexus_sled_agent_shared::recovery_silo::RecoverySiloConfig, Srv = nexus_types::internal_api::params::Srv, TypedUuidForCollectionKind = omicron_uuid_kinds::CollectionUuid, + TypedUuidForDatasetKind = omicron_uuid_kinds::TypedUuid, TypedUuidForDemoSagaKind = omicron_uuid_kinds::DemoSagaUuid, TypedUuidForDownstairsKind = omicron_uuid_kinds::TypedUuid, TypedUuidForPropolisKind = omicron_uuid_kinds::TypedUuid, diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 3ceba3fc25..dcd9b4254f 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -52,6 +52,7 @@ use internal_dns_types::names::ServiceName; use ipnetwork::IpNetwork; use itertools::Itertools; use nexus_config::PostgresConfigWithUrl; +use nexus_db_model::to_db_typed_uuid; use nexus_db_model::Dataset; use nexus_db_model::Disk; use nexus_db_model::DnsGroup; @@ -126,6 +127,7 @@ use omicron_common::api::external::Generation; use omicron_common::api::external::InstanceState; use omicron_common::api::external::MacAddr; use omicron_uuid_kinds::CollectionUuid; +use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::InstanceUuid; use omicron_uuid_kinds::PropolisUuid; @@ -772,7 +774,7 @@ struct RegionSnapshotReplacementInfoArgs { #[derive(Debug, Args)] struct RegionSnapshotReplacementRequestArgs { /// The dataset id for a given region snapshot - dataset_id: Uuid, + dataset_id: DatasetUuid, /// The region id for a given region snapshot region_id: Uuid, @@ -1634,7 +1636,7 @@ async fn cmd_db_disk_physical( .context("loading zpool from pysical disk id")?; let mut sled_ids = HashSet::new(); - let mut dataset_ids = HashSet::new(); + let mut dataset_ids: HashSet = HashSet::new(); if zpools.is_empty() { println!("Found no zpools on physical disk UUID {}", args.uuid); @@ -1692,7 +1694,7 @@ async fn cmd_db_disk_physical( for did in dataset_ids.clone().into_iter() { use db::schema::region::dsl as region_dsl; let regions = region_dsl::region - .filter(region_dsl::dataset_id.eq(did)) + .filter(region_dsl::dataset_id.eq(to_db_typed_uuid(did))) .select(Region::as_select()) .load_async(&*conn) .await @@ -1774,6 +1776,9 @@ async fn cmd_db_disk_physical( println!("{}", table); // Collect the region_snapshots associated with the dataset IDs + let dataset_ids: Vec<_> = + dataset_ids.into_iter().map(|did| to_db_typed_uuid(did)).collect(); + let limit = fetch_opts.fetch_limit; use db::schema::region_snapshot::dsl as region_snapshot_dsl; let region_snapshots = region_snapshot_dsl::region_snapshot @@ -2328,7 +2333,7 @@ async fn cmd_db_region_list( #[derive(Tabled)] struct RegionRow { id: Uuid, - dataset_id: Uuid, + dataset_id: DatasetUuid, volume_id: Uuid, block_size: i64, blocks_per_extent: u64, @@ -2563,7 +2568,7 @@ async fn cmd_db_region_find_deleted( #[derive(Tabled)] struct Row { - dataset_id: Uuid, + dataset_id: DatasetUuid, region_id: Uuid, volume_id: String, } @@ -4418,7 +4423,7 @@ async fn cmd_db_validate_volume_references( #[derive(Tabled)] struct Row { - dataset_id: Uuid, + dataset_id: DatasetUuid, region_id: Uuid, snapshot_id: Uuid, error: String, @@ -4480,7 +4485,7 @@ async fn cmd_db_validate_volume_references( if matching_volumes != region_snapshot.volume_references as usize { rows.push(Row { - dataset_id: region_snapshot.dataset_id, + dataset_id: region_snapshot.dataset_id.into(), region_id: region_snapshot.region_id, snapshot_id: region_snapshot.snapshot_id, error: format!( @@ -4498,7 +4503,7 @@ async fn cmd_db_validate_volume_references( if matching_volumes == 0 && !region_snapshot.deleting { rows.push(Row { - dataset_id: region_snapshot.dataset_id, + dataset_id: region_snapshot.dataset_id.into(), region_id: region_snapshot.region_id, snapshot_id: region_snapshot.snapshot_id, error: String::from( @@ -4556,7 +4561,7 @@ async fn cmd_db_validate_region_snapshots( #[derive(Tabled)] struct Row { - dataset_id: Uuid, + dataset_id: DatasetUuid, region_id: Uuid, snapshot_id: Uuid, dataset_addr: std::net::SocketAddrV6, @@ -4640,7 +4645,7 @@ async fn cmd_db_validate_region_snapshots( // This is ok - Nexus currently soft-deletes its // resource records. rows.push(Row { - dataset_id: region_snapshot.dataset_id, + dataset_id: region_snapshot.dataset_id.into(), region_id: region_snapshot.region_id, snapshot_id: region_snapshot.snapshot_id, dataset_addr, @@ -4655,7 +4660,7 @@ async fn cmd_db_validate_region_snapshots( // higher level Snapshot was not deleted! rows.push(Row { - dataset_id: region_snapshot.dataset_id, + dataset_id: region_snapshot.dataset_id.into(), region_id: region_snapshot.region_id, snapshot_id: region_snapshot.snapshot_id, dataset_addr, @@ -4684,7 +4689,7 @@ async fn cmd_db_validate_region_snapshots( // the Agent, so it's a bug. rows.push(Row { - dataset_id: region_snapshot.dataset_id, + dataset_id: region_snapshot.dataset_id.into(), region_id: region_snapshot.region_id, snapshot_id: region_snapshot.snapshot_id, dataset_addr, diff --git a/nexus/db-model/src/dataset.rs b/nexus/db-model/src/dataset.rs index 64f602afd3..0f8845e216 100644 --- a/nexus/db-model/src/dataset.rs +++ b/nexus/db-model/src/dataset.rs @@ -2,7 +2,8 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use super::{ByteCount, DatasetKind, Generation, Region, SqlU16}; +use super::DatasetKind; +use super::{ByteCount, Generation, Region, SqlU16}; use crate::collection::DatastoreCollectionConfig; use crate::ipv6; use crate::schema::{dataset, region}; @@ -11,7 +12,6 @@ use db_macros::Asset; use nexus_types::deployment::BlueprintDatasetConfig; use omicron_common::api::external::Error; use omicron_common::api::internal::shared::DatasetKind as ApiDatasetKind; -use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::ZpoolUuid; use serde::{Deserialize, Serialize}; @@ -34,6 +34,7 @@ use uuid::Uuid; PartialEq, )] #[diesel(table_name = dataset)] +#[asset(uuid_kind = DatasetKind)] pub struct Dataset { #[diesel(embed)] identity: DatasetIdentity, @@ -62,7 +63,7 @@ pub struct Dataset { impl Dataset { pub fn new( - id: Uuid, + id: omicron_uuid_kinds::DatasetUuid, pool_id: Uuid, addr: Option, api_kind: ApiDatasetKind, @@ -119,7 +120,7 @@ impl From for Dataset { }; let addr = bp.address; Self { - identity: DatasetIdentity::new(bp.id.into_untyped_uuid()), + identity: DatasetIdentity::new(bp.id), time_deleted: None, rcgen: Generation::new(), pool_id: bp.pool.id().into_untyped_uuid(), @@ -148,7 +149,7 @@ impl TryFrom for omicron_common::disk::DatasetConfig { }; Ok(Self { - id: DatasetUuid::from_untyped_uuid(dataset.identity.id), + id: dataset.identity.id.into(), name: omicron_common::disk::DatasetName::new( omicron_common::zpool_name::ZpoolName::new_external( ZpoolUuid::from_untyped_uuid(dataset.pool_id), diff --git a/nexus/db-model/src/region.rs b/nexus/db-model/src/region.rs index 02c7db8120..417affea89 100644 --- a/nexus/db-model/src/region.rs +++ b/nexus/db-model/src/region.rs @@ -4,9 +4,12 @@ use super::ByteCount; use crate::schema::region; +use crate::typed_uuid::DbTypedUuid; use crate::SqlU16; use db_macros::Asset; use omicron_common::api::external; +use omicron_uuid_kinds::DatasetKind; +use omicron_uuid_kinds::DatasetUuid; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -30,7 +33,7 @@ pub struct Region { #[diesel(embed)] identity: RegionIdentity, - dataset_id: Uuid, + dataset_id: DbTypedUuid, volume_id: Uuid, block_size: ByteCount, @@ -54,7 +57,7 @@ pub struct Region { impl Region { pub fn new( - dataset_id: Uuid, + dataset_id: DatasetUuid, volume_id: Uuid, block_size: ByteCount, blocks_per_extent: u64, @@ -64,7 +67,7 @@ impl Region { ) -> Self { Self { identity: RegionIdentity::new(Uuid::new_v4()), - dataset_id, + dataset_id: dataset_id.into(), volume_id, block_size, blocks_per_extent: blocks_per_extent as i64, @@ -81,8 +84,8 @@ impl Region { pub fn volume_id(&self) -> Uuid { self.volume_id } - pub fn dataset_id(&self) -> Uuid { - self.dataset_id + pub fn dataset_id(&self) -> DatasetUuid { + self.dataset_id.into() } pub fn block_size(&self) -> external::ByteCount { self.block_size.0 diff --git a/nexus/db-model/src/region_snapshot.rs b/nexus/db-model/src/region_snapshot.rs index 1b39a5b6f4..5d52d1f0ad 100644 --- a/nexus/db-model/src/region_snapshot.rs +++ b/nexus/db-model/src/region_snapshot.rs @@ -3,6 +3,9 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use crate::schema::region_snapshot; +use crate::typed_uuid::DbTypedUuid; +use omicron_uuid_kinds::DatasetKind; +use omicron_uuid_kinds::DatasetUuid; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -23,7 +26,7 @@ use uuid::Uuid; #[diesel(table_name = region_snapshot)] pub struct RegionSnapshot { // unique identifier of this region snapshot - pub dataset_id: Uuid, + pub dataset_id: DbTypedUuid, pub region_id: Uuid, pub snapshot_id: Uuid, @@ -43,13 +46,13 @@ pub struct RegionSnapshot { impl RegionSnapshot { pub fn new( - dataset_id: Uuid, + dataset_id: DatasetUuid, region_id: Uuid, snapshot_id: Uuid, snapshot_addr: String, ) -> Self { RegionSnapshot { - dataset_id, + dataset_id: dataset_id.into(), region_id, snapshot_id, snapshot_addr, @@ -58,4 +61,8 @@ impl RegionSnapshot { deleting: false, } } + + pub fn dataset_id(&self) -> DatasetUuid { + self.dataset_id.into() + } } diff --git a/nexus/db-model/src/region_snapshot_replacement.rs b/nexus/db-model/src/region_snapshot_replacement.rs index 183c9034c0..bcbd55028d 100644 --- a/nexus/db-model/src/region_snapshot_replacement.rs +++ b/nexus/db-model/src/region_snapshot_replacement.rs @@ -4,9 +4,12 @@ use super::impl_enum_type; use crate::schema::region_snapshot_replacement; +use crate::typed_uuid::DbTypedUuid; use crate::RegionSnapshot; use chrono::DateTime; use chrono::Utc; +use omicron_uuid_kinds::DatasetKind; +use omicron_uuid_kinds::DatasetUuid; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -118,7 +121,7 @@ pub struct RegionSnapshotReplacement { pub request_time: DateTime, // These are a copy of fields from the corresponding region snapshot record - pub old_dataset_id: Uuid, + pub old_dataset_id: DbTypedUuid, pub old_region_id: Uuid, pub old_snapshot_id: Uuid, @@ -135,21 +138,21 @@ pub struct RegionSnapshotReplacement { impl RegionSnapshotReplacement { pub fn for_region_snapshot(region_snapshot: &RegionSnapshot) -> Self { Self::new( - region_snapshot.dataset_id, + region_snapshot.dataset_id(), region_snapshot.region_id, region_snapshot.snapshot_id, ) } pub fn new( - old_dataset_id: Uuid, + old_dataset_id: DatasetUuid, old_region_id: Uuid, old_snapshot_id: Uuid, ) -> Self { Self { id: Uuid::new_v4(), request_time: Utc::now(), - old_dataset_id, + old_dataset_id: old_dataset_id.into(), old_region_id, old_snapshot_id, old_snapshot_volume_id: None, diff --git a/nexus/db-model/src/volume_resource_usage.rs b/nexus/db-model/src/volume_resource_usage.rs index c55b053c62..a5203bae46 100644 --- a/nexus/db-model/src/volume_resource_usage.rs +++ b/nexus/db-model/src/volume_resource_usage.rs @@ -4,6 +4,9 @@ use super::impl_enum_type; use crate::schema::volume_resource_usage; +use crate::typed_uuid::DbTypedUuid; +use omicron_uuid_kinds::DatasetKind; +use omicron_uuid_kinds::DatasetUuid; use uuid::Uuid; impl_enum_type!( @@ -54,16 +57,22 @@ pub struct VolumeResourceUsageRecord { pub region_id: Option, - pub region_snapshot_dataset_id: Option, + pub region_snapshot_dataset_id: Option>, pub region_snapshot_region_id: Option, pub region_snapshot_snapshot_id: Option, } #[derive(Debug, Clone)] pub enum VolumeResourceUsage { - ReadOnlyRegion { region_id: Uuid }, - - RegionSnapshot { dataset_id: Uuid, region_id: Uuid, snapshot_id: Uuid }, + ReadOnlyRegion { + region_id: Uuid, + }, + + RegionSnapshot { + dataset_id: DatasetUuid, + region_id: Uuid, + snapshot_id: Uuid, + }, } impl VolumeResourceUsageRecord { @@ -94,7 +103,7 @@ impl VolumeResourceUsageRecord { region_id: None, - region_snapshot_dataset_id: Some(dataset_id), + region_snapshot_dataset_id: Some(dataset_id.into()), region_snapshot_region_id: Some(region_id), region_snapshot_snapshot_id: Some(snapshot_id), }, @@ -132,7 +141,7 @@ impl TryFrom for VolumeResourceUsage { }; Ok(VolumeResourceUsage::RegionSnapshot { - dataset_id, + dataset_id: dataset_id.into(), region_id, snapshot_id, }) diff --git a/nexus/db-queries/src/db/datastore/dataset.rs b/nexus/db-queries/src/db/datastore/dataset.rs index 996f105254..f931403cb9 100644 --- a/nexus/db-queries/src/db/datastore/dataset.rs +++ b/nexus/db-queries/src/db/datastore/dataset.rs @@ -15,6 +15,7 @@ use crate::db::error::public_error_from_diesel; use crate::db::error::retryable; use crate::db::error::ErrorHandler; use crate::db::identity::Asset; +use crate::db::model::to_db_typed_uuid; use crate::db::model::Dataset; use crate::db::model::PhysicalDisk; use crate::db::model::PhysicalDiskPolicy; @@ -42,11 +43,14 @@ use omicron_uuid_kinds::GenericUuid; use uuid::Uuid; impl DataStore { - pub async fn dataset_get(&self, dataset_id: Uuid) -> LookupResult { + pub async fn dataset_get( + &self, + dataset_id: DatasetUuid, + ) -> LookupResult { use db::schema::dataset::dsl; dsl::dataset - .filter(dsl::id.eq(dataset_id)) + .filter(dsl::id.eq(to_db_typed_uuid(dataset_id))) .select(Dataset::as_select()) .first_async::( &*self.pool_connection_unauthorized().await?, @@ -230,8 +234,10 @@ impl DataStore { let batch = self .dataset_list(opctx, filter_kind, &p.current_pagparams()) .await?; - paginator = - p.found_batch(&batch, &|d: &nexus_db_model::Dataset| d.id()); + paginator = p + .found_batch(&batch, &|d: &nexus_db_model::Dataset| { + d.id().into_untyped_uuid() + }); all_datasets.extend(batch); } @@ -282,10 +288,9 @@ impl DataStore { use db::schema::dataset::dsl as dataset_dsl; let now = Utc::now(); - let id = *id.as_untyped_uuid(); diesel::update(dataset_dsl::dataset) .filter(dataset_dsl::time_deleted.is_null()) - .filter(dataset_dsl::id.eq(id)) + .filter(dataset_dsl::id.eq(to_db_typed_uuid(id))) .set(dataset_dsl::time_deleted.eq(now)) .execute_async(conn) .await @@ -297,7 +302,7 @@ impl DataStore { pub async fn dataset_physical_disk_in_service( &self, - dataset_id: Uuid, + dataset_id: DatasetUuid, ) -> LookupResult { let conn = self.pool_connection_unauthorized().await?; @@ -305,7 +310,7 @@ impl DataStore { use db::schema::dataset::dsl; dsl::dataset - .filter(dsl::id.eq(dataset_id)) + .filter(dsl::id.eq(to_db_typed_uuid(dataset_id))) .select(Dataset::as_select()) .first_async::(&*conn) .await @@ -357,6 +362,7 @@ mod test { use nexus_types::deployment::BlueprintTarget; use omicron_common::api::internal::shared::DatasetKind as ApiDatasetKind; use omicron_test_utils::dev; + use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; @@ -418,7 +424,7 @@ mod test { // Inserting a new dataset should succeed. let dataset1 = datastore .dataset_insert_if_not_exists(Dataset::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), *zpool_id.as_untyped_uuid(), Some("[::1]:0".parse().unwrap()), ApiDatasetKind::Crucible, @@ -467,7 +473,7 @@ mod test { // We can can also upsert a different dataset... let dataset2 = datastore .dataset_upsert(Dataset::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), *zpool_id.as_untyped_uuid(), Some("[::1]:0".parse().unwrap()), ApiDatasetKind::Cockroach, @@ -540,7 +546,7 @@ mod test { fn new_dataset_on(zpool_id: ZpoolUuid) -> Dataset { Dataset::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), *zpool_id.as_untyped_uuid(), Some("[::1]:0".parse().unwrap()), ApiDatasetKind::Cockroach, @@ -605,7 +611,7 @@ mod test { .dataset_delete_if_blueprint_is_current_target( &opctx, old_blueprint_id, - DatasetUuid::from_untyped_uuid(dataset.id()), + dataset.id(), ) .await .expect_err( @@ -617,7 +623,7 @@ mod test { .dataset_delete_if_blueprint_is_current_target( &opctx, current_blueprint_id, - DatasetUuid::from_untyped_uuid(dataset.id()), + dataset.id(), ) .await .expect("Should be able to delete while blueprint is active"); diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 059d43b8c7..5bd35fbba9 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -473,6 +473,7 @@ mod test { use omicron_common::api::internal::shared::DatasetKind; use omicron_test_utils::dev; use omicron_uuid_kinds::CollectionUuid; + use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::SledUuid; use std::collections::HashMap; @@ -820,12 +821,12 @@ mod test { ineligible: SledToDatasetMap, // A map from eligible dataset IDs to their corresponding sled IDs. - eligible_dataset_ids: HashMap, - ineligible_dataset_ids: HashMap, + eligible_dataset_ids: HashMap, + ineligible_dataset_ids: HashMap, } // Map of sled IDs to dataset IDs. - type SledToDatasetMap = HashMap>; + type SledToDatasetMap = HashMap>; impl TestDatasets { pub(crate) async fn create( @@ -962,7 +963,7 @@ mod test { let zpool_iter: Vec = (0..3).map(|_| zpool).collect(); stream::iter(zpool_iter).then(|zpool| { - let dataset_id = Uuid::new_v4(); + let dataset_id = DatasetUuid::new_v4(); let dataset = Dataset::new( dataset_id, zpool.pool_id, @@ -1043,7 +1044,8 @@ mod test { for (dataset, region) in dataset_and_regions { // Must be 3 unique datasets - assert!(disk_datasets.insert(dataset.id())); + let dataset_id = dataset.id(); + assert!(disk_datasets.insert(dataset_id.into_untyped_uuid())); // All regions should be unique assert!(regions.insert(region.id())); @@ -1052,7 +1054,7 @@ mod test { // This is a little goofy, but it catches a bug that has // happened before. The returned columns share names (like // "id"), so we need to process them in-order. - assert!(!regions.contains(&dataset.id())); + assert!(!regions.contains(dataset_id.as_untyped_uuid())); assert!(!disk_datasets.contains(®ion.id())); // Dataset must not be eligible for provisioning. @@ -1334,7 +1336,7 @@ mod test { // 1 dataset per zpool stream::iter(zpool_ids.clone()) .then(|zpool_id| { - let id = Uuid::new_v4(); + let id = DatasetUuid::new_v4(); let dataset = Dataset::new( id, zpool_id, @@ -1434,7 +1436,7 @@ mod test { // 1 dataset per zpool stream::iter(zpool_ids) .then(|zpool_id| { - let id = Uuid::new_v4(); + let id = DatasetUuid::new_v4(); let dataset = Dataset::new( id, zpool_id, @@ -1511,7 +1513,7 @@ mod test { let bogus_addr = Some(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 8080, 0, 0)); let dataset = Dataset::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), zpool_id, bogus_addr, DatasetKind::Crucible, diff --git a/nexus/db-queries/src/db/datastore/region.rs b/nexus/db-queries/src/db/datastore/region.rs index da32494d6f..885cb622b8 100644 --- a/nexus/db-queries/src/db/datastore/region.rs +++ b/nexus/db-queries/src/db/datastore/region.rs @@ -13,6 +13,7 @@ use crate::db::datastore::SQL_BATCH_SIZE; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::lookup::LookupPath; +use crate::db::model::to_db_typed_uuid; use crate::db::model::Dataset; use crate::db::model::PhysicalDiskPolicy; use crate::db::model::Region; @@ -33,6 +34,7 @@ use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::UpdateResult; +use omicron_uuid_kinds::DatasetUuid; use slog::Logger; use std::net::SocketAddrV6; use uuid::Uuid; @@ -389,13 +391,13 @@ impl DataStore { /// Return the total occupied size for a dataset pub async fn regions_total_occupied_size( &self, - dataset_id: Uuid, + dataset_id: DatasetUuid, ) -> Result { use db::schema::region::dsl as region_dsl; let total_occupied_size: Option = region_dsl::region - .filter(region_dsl::dataset_id.eq(dataset_id)) + .filter(region_dsl::dataset_id.eq(to_db_typed_uuid(dataset_id))) .select(diesel::dsl::sum( region_dsl::block_size * region_dsl::blocks_per_extent diff --git a/nexus/db-queries/src/db/datastore/region_snapshot.rs b/nexus/db-queries/src/db/datastore/region_snapshot.rs index 4a5db14bd6..0129869f4f 100644 --- a/nexus/db-queries/src/db/datastore/region_snapshot.rs +++ b/nexus/db-queries/src/db/datastore/region_snapshot.rs @@ -9,6 +9,7 @@ use crate::context::OpContext; use crate::db; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; +use crate::db::model::to_db_typed_uuid; use crate::db::model::PhysicalDiskPolicy; use crate::db::model::RegionSnapshot; use async_bb8_diesel::AsyncRunQueryDsl; @@ -17,6 +18,7 @@ use diesel::OptionalExtension; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::LookupResult; +use omicron_uuid_kinds::DatasetUuid; use uuid::Uuid; impl DataStore { @@ -37,14 +39,14 @@ impl DataStore { pub async fn region_snapshot_get( &self, - dataset_id: Uuid, + dataset_id: DatasetUuid, region_id: Uuid, snapshot_id: Uuid, ) -> LookupResult> { use db::schema::region_snapshot::dsl; dsl::region_snapshot - .filter(dsl::dataset_id.eq(dataset_id)) + .filter(dsl::dataset_id.eq(to_db_typed_uuid(dataset_id))) .filter(dsl::region_id.eq(region_id)) .filter(dsl::snapshot_id.eq(snapshot_id)) .select(RegionSnapshot::as_select()) @@ -58,7 +60,7 @@ impl DataStore { pub async fn region_snapshot_remove( &self, - dataset_id: Uuid, + dataset_id: DatasetUuid, region_id: Uuid, snapshot_id: Uuid, ) -> DeleteResult { @@ -67,7 +69,7 @@ impl DataStore { let conn = self.pool_connection_unauthorized().await?; let result = diesel::delete(dsl::region_snapshot) - .filter(dsl::dataset_id.eq(dataset_id)) + .filter(dsl::dataset_id.eq(to_db_typed_uuid(dataset_id))) .filter(dsl::region_id.eq(region_id)) .filter(dsl::snapshot_id.eq(snapshot_id)) .execute_async(&*conn) diff --git a/nexus/db-queries/src/db/datastore/region_snapshot_replacement.rs b/nexus/db-queries/src/db/datastore/region_snapshot_replacement.rs index 6498ef3855..b970bb8962 100644 --- a/nexus/db-queries/src/db/datastore/region_snapshot_replacement.rs +++ b/nexus/db-queries/src/db/datastore/region_snapshot_replacement.rs @@ -1062,6 +1062,7 @@ mod test { use crate::db::model::RegionReplacement; use crate::db::pub_test_utils::TestDatabase; use omicron_test_utils::dev; + use omicron_uuid_kinds::DatasetUuid; #[tokio::test] async fn test_one_replacement_per_volume() { @@ -1069,11 +1070,11 @@ mod test { let db = TestDatabase::new_with_datastore(&logctx.log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); - let dataset_1_id = Uuid::new_v4(); + let dataset_1_id = DatasetUuid::new_v4(); let region_1_id = Uuid::new_v4(); let snapshot_1_id = Uuid::new_v4(); - let dataset_2_id = Uuid::new_v4(); + let dataset_2_id = DatasetUuid::new_v4(); let region_2_id = Uuid::new_v4(); let snapshot_2_id = Uuid::new_v4(); @@ -1117,7 +1118,7 @@ mod test { let db = TestDatabase::new_with_datastore(&logctx.log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); - let dataset_1_id = Uuid::new_v4(); + let dataset_1_id = DatasetUuid::new_v4(); let region_1_id = Uuid::new_v4(); let snapshot_1_id = Uuid::new_v4(); @@ -1155,7 +1156,7 @@ mod test { let db = TestDatabase::new_with_datastore(&logctx.log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); - let dataset_id = Uuid::new_v4(); + let dataset_id = DatasetUuid::new_v4(); let region_id = Uuid::new_v4(); let snapshot_id = Uuid::new_v4(); @@ -1411,7 +1412,7 @@ mod test { let (opctx, datastore) = (db.opctx(), db.datastore()); let mut request = RegionSnapshotReplacement::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), Uuid::new_v4(), Uuid::new_v4(), ); diff --git a/nexus/db-queries/src/db/datastore/volume.rs b/nexus/db-queries/src/db/datastore/volume.rs index 4cd83fff3f..4e0d1ccac1 100644 --- a/nexus/db-queries/src/db/datastore/volume.rs +++ b/nexus/db-queries/src/db/datastore/volume.rs @@ -13,6 +13,7 @@ use crate::db::datastore::SQL_BATCH_SIZE; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Asset; +use crate::db::model::to_db_typed_uuid; use crate::db::model::Dataset; use crate::db::model::Disk; use crate::db::model::DownstairsClientStopRequestNotification; @@ -47,6 +48,7 @@ use omicron_common::api::external::ResourceType; use omicron_common::api::internal::nexus::DownstairsClientStopRequest; use omicron_common::api::internal::nexus::DownstairsClientStopped; use omicron_common::api::internal::nexus::RepairProgress; +use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::DownstairsKind; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::PropolisUuid; @@ -315,7 +317,7 @@ impl DataStore { if let Some(region_snapshot) = maybe_region_snapshot { return Ok(Some(VolumeResourceUsage::RegionSnapshot { - dataset_id: region_snapshot.dataset_id, + dataset_id: region_snapshot.dataset_id(), region_id: region_snapshot.region_id, snapshot_id: region_snapshot.snapshot_id, })); @@ -471,7 +473,10 @@ impl DataStore { .filter( dsl::usage_type.eq(VolumeResourceUsageType::RegionSnapshot), ) - .filter(dsl::region_snapshot_dataset_id.eq(dataset_id)) + .filter( + dsl::region_snapshot_dataset_id + .eq(to_db_typed_uuid(dataset_id)), + ) .filter(dsl::region_snapshot_region_id.eq(region_id)) .filter(dsl::region_snapshot_snapshot_id.eq(snapshot_id)) .into_boxed(), @@ -529,7 +534,10 @@ impl DataStore { dsl::usage_type .eq(VolumeResourceUsageType::RegionSnapshot), ) - .filter(dsl::region_snapshot_dataset_id.eq(dataset_id)) + .filter( + dsl::region_snapshot_dataset_id + .eq(to_db_typed_uuid(dataset_id)), + ) .filter(dsl::region_snapshot_region_id.eq(region_id)) .filter(dsl::region_snapshot_snapshot_id.eq(snapshot_id)) .filter(dsl::volume_id.eq(from_volume_id)) @@ -1508,7 +1516,7 @@ impl DataStore { ) .filter( ru_dsl::region_snapshot_dataset_id - .eq(Some(dataset_id)), + .eq(Some(to_db_typed_uuid(dataset_id))), ) .filter( ru_dsl::region_snapshot_region_id @@ -1541,7 +1549,7 @@ impl DataStore { ) .filter( ru_dsl::region_snapshot_dataset_id - .eq(Some(dataset_id)), + .eq(Some(to_db_typed_uuid(dataset_id))), ) .filter( ru_dsl::region_snapshot_region_id @@ -1559,7 +1567,10 @@ impl DataStore { // Don't forget to set `deleting`! see: omicron#4095 use db::schema::region_snapshot::dsl; let updated_rows = diesel::update(dsl::region_snapshot) - .filter(dsl::dataset_id.eq(dataset_id)) + .filter( + dsl::dataset_id + .eq(to_db_typed_uuid(dataset_id)), + ) .filter(dsl::region_id.eq(region_id)) .filter(dsl::snapshot_id.eq(snapshot_id)) .filter( @@ -1896,7 +1907,7 @@ impl DataStore { pub async fn get_dataset_rw_regions_in_volume( &self, opctx: &OpContext, - dataset_id: Uuid, + dataset_id: DatasetUuid, volume_id: Uuid, ) -> LookupResult> { let conn = self.pool_connection_authorized(opctx).await?; @@ -1905,7 +1916,7 @@ impl DataStore { use db::schema::dataset::dsl; dsl::dataset - .filter(dsl::id.eq(dataset_id)) + .filter(dsl::id.eq(to_db_typed_uuid(dataset_id))) .select(Dataset::as_select()) .first_async(&*conn) .await @@ -2299,14 +2310,14 @@ pub struct CrucibleResourcesV2 { pub snapshots_to_delete: Vec, } -#[derive(Debug, Default, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct RegionSnapshotV3 { - dataset: Uuid, + dataset: DatasetUuid, region: Uuid, snapshot: Uuid, } -#[derive(Debug, Default, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] pub struct CrucibleResourcesV3 { #[serde(deserialize_with = "null_to_empty_list")] pub regions: Vec, @@ -2426,7 +2437,10 @@ impl DataStore { for region_snapshots in &crucible_resources.region_snapshots { let maybe_tuple = dsl::region_snapshot - .filter(dsl::dataset_id.eq(region_snapshots.dataset)) + .filter( + dsl::dataset_id + .eq(to_db_typed_uuid(region_snapshots.dataset)), + ) .filter(dsl::region_id.eq(region_snapshots.region)) .filter(dsl::snapshot_id.eq(region_snapshots.snapshot)) .inner_join( @@ -4329,19 +4343,19 @@ mod tests { let region_snapshots = [ RegionSnapshot::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), Uuid::new_v4(), Uuid::new_v4(), address_1.clone(), ), RegionSnapshot::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), Uuid::new_v4(), Uuid::new_v4(), address_2.clone(), ), RegionSnapshot::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), Uuid::new_v4(), Uuid::new_v4(), address_3.clone(), @@ -4430,7 +4444,7 @@ mod tests { let usage = datastore .volume_usage_records_for_resource( VolumeResourceUsage::RegionSnapshot { - dataset_id: region_snapshot.dataset_id, + dataset_id: region_snapshot.dataset_id(), region_id: region_snapshot.region_id, snapshot_id: region_snapshot.snapshot_id, }, @@ -4592,7 +4606,7 @@ mod tests { let usage = datastore .volume_usage_records_for_resource( VolumeResourceUsage::RegionSnapshot { - dataset_id: region_snapshot.dataset_id, + dataset_id: region_snapshot.dataset_id(), region_id: region_snapshot.region_id, snapshot_id: region_snapshot.snapshot_id, }, @@ -4746,7 +4760,7 @@ mod tests { let usage = datastore .volume_usage_records_for_resource( VolumeResourceUsage::RegionSnapshot { - dataset_id: region_snapshot.dataset_id, + dataset_id: region_snapshot.dataset_id(), region_id: region_snapshot.region_id, snapshot_id: region_snapshot.snapshot_id, }, @@ -4793,7 +4807,7 @@ mod tests { datastore .region_snapshot_create(RegionSnapshot::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), Uuid::new_v4(), Uuid::new_v4(), address_1.clone(), @@ -4802,7 +4816,7 @@ mod tests { .unwrap(); datastore .region_snapshot_create(RegionSnapshot::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), Uuid::new_v4(), Uuid::new_v4(), address_2.clone(), @@ -4811,7 +4825,7 @@ mod tests { .unwrap(); datastore .region_snapshot_create(RegionSnapshot::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), Uuid::new_v4(), Uuid::new_v4(), address_3.clone(), diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index e013d0f412..97cacb3efe 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -141,7 +141,7 @@ pub(crate) async fn ensure_dataset_records_exist( .await .context("failed to list all datasets")? .into_iter() - .map(|dataset| (DatasetUuid::from_untyped_uuid(dataset.id()), dataset)) + .map(|dataset| (dataset.id(), dataset)) .collect::>(); let mut num_inserted = 0; @@ -551,9 +551,7 @@ mod tests { let first_dataset = &mut all_datasets[0]; let observed_dataset = observed_datasets .into_iter() - .find(|dataset| { - dataset.id() == first_dataset.id.into_untyped_uuid() - }) + .find(|dataset| dataset.id() == first_dataset.id) .expect("Couldn't find dataset we tried to update?"); let observed_dataset: DatasetConfig = observed_dataset.try_into().unwrap(); @@ -674,10 +672,10 @@ mod tests { datastore.dataset_list_all_batched(opctx, None).await.unwrap(); assert!(observed_datasets .iter() - .any(|d| d.id() == crucible_dataset_id.into_untyped_uuid())); + .any(|d| d.id() == crucible_dataset_id)); assert!(!observed_datasets .iter() - .any(|d| d.id() == non_crucible_dataset_id.into_untyped_uuid())); + .any(|d| d.id() == non_crucible_dataset_id)); } #[nexus_test] @@ -766,8 +764,6 @@ mod tests { // "blueprint". let observed_datasets = datastore.dataset_list_all_batched(opctx, None).await.unwrap(); - assert!(observed_datasets - .iter() - .any(|d| d.id() == dataset_id.into_untyped_uuid())); + assert!(observed_datasets.iter().any(|d| d.id() == dataset_id)); } } diff --git a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs index b56c9c0433..be16341df6 100644 --- a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs +++ b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs @@ -155,6 +155,7 @@ mod test { use omicron_common::api::external::Generation; use omicron_common::api::internal::shared::DatasetKind; use omicron_common::disk::DiskIdentity; + use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; @@ -442,7 +443,7 @@ mod test { let dataset = datastore .dataset_upsert(Dataset::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), zpool.id(), Some(std::net::SocketAddrV6::new( std::net::Ipv6Addr::LOCALHOST, diff --git a/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs b/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs index 6e49ddc7f0..2782c8799d 100644 --- a/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs +++ b/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs @@ -237,7 +237,7 @@ mod tests { let dataset = datastore .dataset_upsert(Dataset::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), zpool.id(), Some(std::net::SocketAddrV6::new( std::net::Ipv6Addr::LOCALHOST, @@ -275,7 +275,7 @@ mod tests { ( ZpoolUuid::from_untyped_uuid(zpool.id()), - DatasetUuid::from_untyped_uuid(dataset.id()), + dataset.id(), RegionUuid::from_untyped_uuid(region_id), ) } diff --git a/nexus/src/app/background/tasks/region_replacement_driver.rs b/nexus/src/app/background/tasks/region_replacement_driver.rs index 02db86eab3..e7fe0d6338 100644 --- a/nexus/src/app/background/tasks/region_replacement_driver.rs +++ b/nexus/src/app/background/tasks/region_replacement_driver.rs @@ -251,6 +251,7 @@ mod test { use nexus_db_model::UpstairsRepairNotificationType; use nexus_db_model::UpstairsRepairType; use nexus_test_utils_macros::nexus_test; + use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::DownstairsRegionKind; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::TypedUuid; @@ -337,7 +338,7 @@ mod test { // Insert some region records let old_region = { - let dataset_id = Uuid::new_v4(); + let dataset_id = DatasetUuid::new_v4(); let volume_id = Uuid::new_v4(); Region::new( dataset_id, @@ -351,7 +352,7 @@ mod test { }; let new_region = { - let dataset_id = Uuid::new_v4(); + let dataset_id = DatasetUuid::new_v4(); let volume_id = Uuid::new_v4(); Region::new( dataset_id, @@ -436,7 +437,7 @@ mod test { // Insert some region records let old_region = { - let dataset_id = Uuid::new_v4(); + let dataset_id = DatasetUuid::new_v4(); let volume_id = Uuid::new_v4(); Region::new( dataset_id, @@ -450,7 +451,7 @@ mod test { }; let new_region = { - let dataset_id = Uuid::new_v4(); + let dataset_id = DatasetUuid::new_v4(); let volume_id = Uuid::new_v4(); Region::new( dataset_id, @@ -585,7 +586,7 @@ mod test { // Insert some region records let old_region = { - let dataset_id = Uuid::new_v4(); + let dataset_id = DatasetUuid::new_v4(); let volume_id = Uuid::new_v4(); Region::new( dataset_id, @@ -599,7 +600,7 @@ mod test { }; let new_region = { - let dataset_id = Uuid::new_v4(); + let dataset_id = DatasetUuid::new_v4(); let volume_id = Uuid::new_v4(); Region::new( dataset_id, diff --git a/nexus/src/app/background/tasks/region_snapshot_replacement_finish.rs b/nexus/src/app/background/tasks/region_snapshot_replacement_finish.rs index caa2fa7bed..0eebd37fb7 100644 --- a/nexus/src/app/background/tasks/region_snapshot_replacement_finish.rs +++ b/nexus/src/app/background/tasks/region_snapshot_replacement_finish.rs @@ -85,7 +85,7 @@ impl RegionSnapshotReplacementFinishDetector { match self .datastore .region_snapshot_get( - request.old_dataset_id, + request.old_dataset_id.into(), request.old_region_id, request.old_snapshot_id, ) @@ -169,6 +169,7 @@ mod test { use nexus_db_model::RegionSnapshotReplacementStepState; use nexus_db_queries::db::datastore::region_snapshot_replacement; use nexus_test_utils_macros::nexus_test; + use omicron_uuid_kinds::DatasetUuid; use uuid::Uuid; type ControlPlaneTestContext = @@ -195,7 +196,7 @@ mod test { // Add a region snapshot replacement request for a fake region snapshot. - let dataset_id = Uuid::new_v4(); + let dataset_id = DatasetUuid::new_v4(); let region_id = Uuid::new_v4(); let snapshot_id = Uuid::new_v4(); diff --git a/nexus/src/app/background/tasks/region_snapshot_replacement_garbage_collect.rs b/nexus/src/app/background/tasks/region_snapshot_replacement_garbage_collect.rs index f3b1b68198..db30e1d074 100644 --- a/nexus/src/app/background/tasks/region_snapshot_replacement_garbage_collect.rs +++ b/nexus/src/app/background/tasks/region_snapshot_replacement_garbage_collect.rs @@ -154,6 +154,7 @@ mod test { use nexus_db_model::RegionSnapshotReplacement; use nexus_db_model::RegionSnapshotReplacementState; use nexus_test_utils_macros::nexus_test; + use omicron_uuid_kinds::DatasetUuid; use uuid::Uuid; type ControlPlaneTestContext = @@ -188,7 +189,7 @@ mod test { // Add two region snapshot requests that need garbage collection let mut request = RegionSnapshotReplacement::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), Uuid::new_v4(), Uuid::new_v4(), ); @@ -208,7 +209,7 @@ mod test { .unwrap(); let mut request = RegionSnapshotReplacement::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), Uuid::new_v4(), Uuid::new_v4(), ); diff --git a/nexus/src/app/background/tasks/region_snapshot_replacement_start.rs b/nexus/src/app/background/tasks/region_snapshot_replacement_start.rs index bc739ecf27..70d9073971 100644 --- a/nexus/src/app/background/tasks/region_snapshot_replacement_start.rs +++ b/nexus/src/app/background/tasks/region_snapshot_replacement_start.rs @@ -274,6 +274,7 @@ mod test { use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils_macros::nexus_test; use omicron_common::api::external; + use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use std::collections::BTreeMap; use uuid::Uuid; @@ -309,9 +310,9 @@ mod test { // Add a region snapshot replacement request for a fake region snapshot let request = RegionSnapshotReplacement::new( - Uuid::new_v4(), // dataset id - Uuid::new_v4(), // region id - Uuid::new_v4(), // snapshot id + DatasetUuid::new_v4(), // dataset id + Uuid::new_v4(), // region id + Uuid::new_v4(), // snapshot id ); let request_id = request.id; diff --git a/nexus/src/app/background/tasks/region_snapshot_replacement_step.rs b/nexus/src/app/background/tasks/region_snapshot_replacement_step.rs index 29878364e6..ac259ecba8 100644 --- a/nexus/src/app/background/tasks/region_snapshot_replacement_step.rs +++ b/nexus/src/app/background/tasks/region_snapshot_replacement_step.rs @@ -207,7 +207,7 @@ impl RegionSnapshotReplacementFindAffected { let region_snapshot = match self .datastore .region_snapshot_get( - request.old_dataset_id, + request.old_dataset_id.into(), request.old_region_id, request.old_snapshot_id, ) @@ -468,6 +468,7 @@ mod test { use nexus_db_model::RegionSnapshotReplacementStepState; use nexus_db_model::Volume; use nexus_test_utils_macros::nexus_test; + use omicron_uuid_kinds::DatasetUuid; use sled_agent_client::types::CrucibleOpts; use sled_agent_client::types::VolumeConstructionRequest; use uuid::Uuid; @@ -486,7 +487,7 @@ mod test { datastore .region_snapshot_create(RegionSnapshot::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), Uuid::new_v4(), Uuid::new_v4(), snapshot_addr.clone(), @@ -555,7 +556,7 @@ mod test { // Add a region snapshot replacement request for a fake region snapshot. - let dataset_id = Uuid::new_v4(); + let dataset_id = DatasetUuid::new_v4(); let region_id = Uuid::new_v4(); let snapshot_id = Uuid::new_v4(); let snapshot_addr = String::from("[fd00:1122:3344::101]:9876"); diff --git a/nexus/src/app/crucible.rs b/nexus/src/app/crucible.rs index 86de328355..1c7e9d83b0 100644 --- a/nexus/src/app/crucible.rs +++ b/nexus/src/app/crucible.rs @@ -22,6 +22,7 @@ use omicron_common::api::external::Error; use omicron_common::backoff::{self, BackoffError}; use omicron_common::progenitor_operation_retry::ProgenitorOperationRetry; use omicron_common::progenitor_operation_retry::ProgenitorOperationRetryError; +use omicron_uuid_kinds::DatasetUuid; use slog::Logger; // Arbitrary limit on concurrency, for operations issued on multiple regions @@ -90,7 +91,7 @@ impl super::Nexus { /// ProgenitorOperationRetry loop. async fn crucible_agent_gone_check( &self, - dataset_id: Uuid, + dataset_id: DatasetUuid, ) -> Result { let on_in_service_physical_disk = self .datastore() diff --git a/nexus/src/app/sagas/region_replacement_finish.rs b/nexus/src/app/sagas/region_replacement_finish.rs index 8ea77f4e97..c7efa2f03f 100644 --- a/nexus/src/app/sagas/region_replacement_finish.rs +++ b/nexus/src/app/sagas/region_replacement_finish.rs @@ -218,6 +218,7 @@ pub(crate) mod test { use nexus_db_queries::authn::saga::Serialized; use nexus_db_queries::context::OpContext; use nexus_test_utils_macros::nexus_test; + use omicron_uuid_kinds::DatasetUuid; use sled_agent_client::types::CrucibleOpts; use sled_agent_client::types::VolumeConstructionRequest; use uuid::Uuid; @@ -241,7 +242,7 @@ pub(crate) mod test { let new_volume_id = Uuid::new_v4(); let replaced_region = { - let dataset_id = Uuid::new_v4(); + let dataset_id = DatasetUuid::new_v4(); Region::new( dataset_id, old_region_volume_id, diff --git a/nexus/src/app/sagas/region_replacement_start.rs b/nexus/src/app/sagas/region_replacement_start.rs index a71a7498ac..ce063dc5be 100644 --- a/nexus/src/app/sagas/region_replacement_start.rs +++ b/nexus/src/app/sagas/region_replacement_start.rs @@ -793,6 +793,7 @@ pub(crate) mod test { use nexus_test_utils_macros::nexus_test; use nexus_types::identity::Asset; use omicron_common::api::internal::shared::DatasetKind; + use omicron_uuid_kinds::DatasetUuid; use sled_agent_client::types::VolumeConstructionRequest; use uuid::Uuid; @@ -905,25 +906,25 @@ pub(crate) mod test { let datasets = vec![ Dataset::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), Uuid::new_v4(), Some("[fd00:1122:3344:101::1]:12345".parse().unwrap()), DatasetKind::Crucible, ), Dataset::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), Uuid::new_v4(), Some("[fd00:1122:3344:102::1]:12345".parse().unwrap()), DatasetKind::Crucible, ), Dataset::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), Uuid::new_v4(), Some("[fd00:1122:3344:103::1]:12345".parse().unwrap()), DatasetKind::Crucible, ), Dataset::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), Uuid::new_v4(), Some("[fd00:1122:3344:104::1]:12345".parse().unwrap()), DatasetKind::Crucible, diff --git a/nexus/src/app/sagas/region_snapshot_replacement_garbage_collect.rs b/nexus/src/app/sagas/region_snapshot_replacement_garbage_collect.rs index 9ebc6f0271..762182724b 100644 --- a/nexus/src/app/sagas/region_snapshot_replacement_garbage_collect.rs +++ b/nexus/src/app/sagas/region_snapshot_replacement_garbage_collect.rs @@ -218,6 +218,7 @@ pub(crate) mod test { use nexus_db_queries::authn::saga::Serialized; use nexus_db_queries::context::OpContext; use nexus_test_utils_macros::nexus_test; + use omicron_uuid_kinds::DatasetUuid; use sled_agent_client::types::CrucibleOpts; use sled_agent_client::types::VolumeConstructionRequest; use uuid::Uuid; @@ -275,7 +276,7 @@ pub(crate) mod test { .unwrap(); let mut request = RegionSnapshotReplacement::new( - Uuid::new_v4(), + DatasetUuid::new_v4(), Uuid::new_v4(), Uuid::new_v4(), ); diff --git a/nexus/src/app/sagas/region_snapshot_replacement_start.rs b/nexus/src/app/sagas/region_snapshot_replacement_start.rs index 4e6c3e1e16..55927f7de8 100644 --- a/nexus/src/app/sagas/region_snapshot_replacement_start.rs +++ b/nexus/src/app/sagas/region_snapshot_replacement_start.rs @@ -402,7 +402,7 @@ async fn rsrss_new_region_ensure( let region_snapshot = osagactx .datastore() .region_snapshot_get( - params.request.old_dataset_id, + params.request.old_dataset_id.into(), params.request.old_region_id, params.request.old_snapshot_id, ) @@ -597,7 +597,7 @@ async fn get_replace_params( let region_snapshot = osagactx .datastore() .region_snapshot_get( - params.request.old_dataset_id, + params.request.old_dataset_id.into(), params.request.old_region_id, params.request.old_snapshot_id, ) diff --git a/nexus/src/app/sagas/region_snapshot_replacement_step.rs b/nexus/src/app/sagas/region_snapshot_replacement_step.rs index 66d9426cdd..7b1d598861 100644 --- a/nexus/src/app/sagas/region_snapshot_replacement_step.rs +++ b/nexus/src/app/sagas/region_snapshot_replacement_step.rs @@ -229,7 +229,7 @@ async fn rsrss_create_replace_params( let region_snapshot = osagactx .datastore() .region_snapshot_get( - region_snapshot_replace_request.old_dataset_id, + region_snapshot_replace_request.old_dataset_id.into(), region_snapshot_replace_request.old_region_id, region_snapshot_replace_request.old_snapshot_id, ) diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 41f97237c9..4dd958c50b 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -1467,7 +1467,7 @@ async fn ssc_start_running_snapshot( osagactx .datastore() .region_snapshot_create(db::model::RegionSnapshot { - dataset_id: dataset.id(), + dataset_id: dataset.id().into(), region_id: region.id(), snapshot_id, snapshot_addr, diff --git a/nexus/src/app/sagas/volume_delete.rs b/nexus/src/app/sagas/volume_delete.rs index 12d48cb367..a9be1f34ee 100644 --- a/nexus/src/app/sagas/volume_delete.rs +++ b/nexus/src/app/sagas/volume_delete.rs @@ -311,7 +311,7 @@ async fn svd_delete_crucible_snapshot_records( osagactx .datastore() .region_snapshot_remove( - region_snapshot.dataset_id, + region_snapshot.dataset_id.into(), region_snapshot.region_id, region_snapshot.snapshot_id, ) diff --git a/nexus/src/app/sled.rs b/nexus/src/app/sled.rs index 5c8b991043..ff4cbc89c5 100644 --- a/nexus/src/app/sled.rs +++ b/nexus/src/app/sled.rs @@ -23,7 +23,9 @@ use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::internal::shared::DatasetKind; -use omicron_uuid_kinds::{GenericUuid, SledUuid}; +use omicron_uuid_kinds::DatasetUuid; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; use sled_agent_client::Client as SledAgentClient; use std::net::SocketAddrV6; use std::sync::Arc; @@ -307,7 +309,7 @@ impl super::Nexus { /// Upserts a crucible dataset into the database, updating it if it already exists. pub(crate) async fn upsert_crucible_dataset( &self, - id: Uuid, + id: DatasetUuid, zpool_id: Uuid, address: SocketAddrV6, ) -> Result<(), Error> { diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index 564277ad05..1a25b2e9aa 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -42,6 +42,7 @@ use omicron_common::api::internal::shared::{ AllowedSourceIps, ExternalPortDiscovery, RackNetworkConfig, SwitchLocation, }; use omicron_common::FileKv; +use omicron_uuid_kinds::DatasetUuid; use oximeter::types::ProducerRegistry; use oximeter_producer::Server as ProducerServer; use slog::Logger; @@ -368,7 +369,7 @@ impl nexus_test_interface::NexusServer for Server { &self, physical_disk: PhysicalDiskPutRequest, zpool: ZpoolPutRequest, - dataset_id: Uuid, + dataset_id: DatasetUuid, address: SocketAddrV6, ) { let opctx = self.apictx.context.nexus.opctx_for_internal_api(); diff --git a/nexus/test-interface/Cargo.toml b/nexus/test-interface/Cargo.toml index 52298c1267..00da4cb6a3 100644 --- a/nexus/test-interface/Cargo.toml +++ b/nexus/test-interface/Cargo.toml @@ -16,3 +16,4 @@ omicron-common.workspace = true slog.workspace = true uuid.workspace = true omicron-workspace-hack.workspace = true +omicron-uuid-kinds.workspace = true diff --git a/nexus/test-interface/src/lib.rs b/nexus/test-interface/src/lib.rs index d011ceccf2..68f462551c 100644 --- a/nexus/test-interface/src/lib.rs +++ b/nexus/test-interface/src/lib.rs @@ -39,6 +39,7 @@ use nexus_types::internal_api::params::{ }; use nexus_types::inventory::Collection; use omicron_common::api::external::Error; +use omicron_uuid_kinds::DatasetUuid; use slog::Logger; use std::net::{SocketAddr, SocketAddrV6}; use uuid::Uuid; @@ -111,7 +112,7 @@ pub trait NexusServer: Send + Sync + 'static { &self, physical_disk: PhysicalDiskPutRequest, zpool: ZpoolPutRequest, - dataset_id: Uuid, + dataset_id: DatasetUuid, address: SocketAddrV6, ); diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 7c50a5e303..bf966c382c 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -66,6 +66,7 @@ use omicron_common::api::internal::shared::SwitchLocation; use omicron_common::zpool_name::ZpoolName; use omicron_sled_agent::sim; use omicron_test_utils::dev; +use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::ExternalIpUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneUuid; @@ -240,8 +241,9 @@ impl RackInitRequestBuilder { // - The internal DNS configuration for this service fn add_dataset( &mut self, + zone_id: OmicronZoneUuid, zpool_id: ZpoolUuid, - dataset_id: Uuid, + dataset_id: DatasetUuid, address: SocketAddrV6, kind: DatasetKind, service_name: ServiceName, @@ -253,11 +255,7 @@ impl RackInitRequestBuilder { }); let zone = self .internal_dns_config - .host_zone( - // TODO-cleanup use TypedUuid everywhere - OmicronZoneUuid::from_untyped_uuid(dataset_id), - *address.ip(), - ) + .host_zone(zone_id, *address.ip()) .expect("Failed to set up DNS for {kind}"); self.internal_dns_config .service_backend_zone(service_name, &zone, address.port()) @@ -268,8 +266,9 @@ impl RackInitRequestBuilder { // single zone. fn add_clickhouse_dataset( &mut self, + zone_id: OmicronZoneUuid, zpool_id: ZpoolUuid, - dataset_id: Uuid, + dataset_id: DatasetUuid, address: SocketAddrV6, ) { self.datasets.push(DatasetCreateRequest { @@ -281,11 +280,7 @@ impl RackInitRequestBuilder { }, }); self.internal_dns_config - .host_zone_clickhouse( - OmicronZoneUuid::from_untyped_uuid(dataset_id), - ServiceName::Clickhouse, - address, - ) + .host_zone_clickhouse(zone_id, ServiceName::Clickhouse, address) .expect("Failed to setup ClickHouse DNS"); } } @@ -443,10 +438,12 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .parse::() .expect("Failed to parse port"); + let zone_id = OmicronZoneUuid::new_v4(); let zpool_id = ZpoolUuid::new_v4(); - let dataset_id = Uuid::new_v4(); + let dataset_id = DatasetUuid::new_v4(); eprintln!("DB address: {}", address); self.rack_init_builder.add_dataset( + zone_id, zpool_id, dataset_id, address, @@ -459,7 +456,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .unwrap(); self.blueprint_zones.push(BlueprintZoneConfig { disposition: BlueprintZoneDisposition::InService, - id: OmicronZoneUuid::from_untyped_uuid(dataset_id), + id: zone_id, filesystem_pool: Some(ZpoolName::new_external(zpool_id)), zone_type: BlueprintZoneType::CockroachDb( blueprint_zone_type::CockroachDb { @@ -482,12 +479,14 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .await .unwrap(); + let zone_id = OmicronZoneUuid::new_v4(); let zpool_id = ZpoolUuid::new_v4(); - let dataset_id = Uuid::new_v4(); + let dataset_id = DatasetUuid::new_v4(); let http_address = clickhouse.http_address(); let http_port = http_address.port(); let native_address = clickhouse.native_address(); self.rack_init_builder.add_clickhouse_dataset( + zone_id, zpool_id, dataset_id, http_address, @@ -514,7 +513,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .unwrap(); self.blueprint_zones.push(BlueprintZoneConfig { disposition: BlueprintZoneDisposition::InService, - id: OmicronZoneUuid::from_untyped_uuid(dataset_id), + id: zone_id, filesystem_pool: Some(ZpoolName::new_external(zpool_id)), zone_type: BlueprintZoneType::Clickhouse( blueprint_zone_type::Clickhouse { diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 36f40be256..ce1fc87996 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -49,6 +49,7 @@ use omicron_common::disk::DiskIdentity; use omicron_sled_agent::sim::SledAgent; use omicron_test_utils::dev::poll::wait_for_condition; use omicron_test_utils::dev::poll::CondCheckError; +use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; @@ -1010,7 +1011,7 @@ pub async fn projects_list( } pub struct TestDataset { - pub id: Uuid, + pub id: DatasetUuid, } pub struct TestZpool { @@ -1172,7 +1173,7 @@ impl<'a, N: NexusServer> DiskTest<'a, N> { sled_id, Uuid::new_v4(), ZpoolUuid::new_v4(), - Uuid::new_v4(), + DatasetUuid::new_v4(), Self::DEFAULT_ZPOOL_SIZE_GIB, ) .await @@ -1205,7 +1206,7 @@ impl<'a, N: NexusServer> DiskTest<'a, N> { sled_id: SledUuid, physical_disk_id: Uuid, zpool_id: ZpoolUuid, - dataset_id: Uuid, + dataset_id: DatasetUuid, gibibytes: u32, ) { let cptestctx = self.cptestctx; diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index 7543a74597..accb4470fb 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -10,6 +10,7 @@ use dropshot::test_util::ClientTestContext; use http::method::Method; use http::StatusCode; use nexus_config::RegionAllocationStrategy; +use nexus_db_model::to_db_typed_uuid; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; @@ -38,6 +39,7 @@ use omicron_common::api::external::Instance; use omicron_common::api::external::InstanceCpuCount; use omicron_common::api::external::Name; use omicron_nexus::app::MIN_DISK_SIZE_BYTES; +use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::InstanceUuid; use uuid::Uuid; @@ -1217,7 +1219,7 @@ async fn test_region_snapshot_create_idempotent( let datastore = nexus.datastore(); let region_snapshot = db::model::RegionSnapshot { - dataset_id: Uuid::new_v4(), + dataset_id: to_db_typed_uuid(DatasetUuid::new_v4()), region_id: Uuid::new_v4(), snapshot_id: Uuid::new_v4(), @@ -1573,7 +1575,10 @@ async fn test_region_allocation_for_snapshot( let region_snapshots: Vec = dsl::region_snapshot - .filter(dsl::dataset_id.eq(region.dataset_id())) + .filter( + dsl::dataset_id + .eq(to_db_typed_uuid(region.dataset_id())), + ) .filter(dsl::snapshot_id.eq(snapshot.identity.id)) .select(db::model::RegionSnapshot::as_select()) .load_async::(&*conn) diff --git a/nexus/tests/integration_tests/unauthorized.rs b/nexus/tests/integration_tests/unauthorized.rs index 93e40dbc2e..6581dcb9e9 100644 --- a/nexus/tests/integration_tests/unauthorized.rs +++ b/nexus/tests/integration_tests/unauthorized.rs @@ -19,6 +19,7 @@ use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::http_testing::TestResponse; use nexus_test_utils_macros::nexus_test; +use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::ZpoolUuid; use once_cell::sync::Lazy; @@ -63,7 +64,7 @@ async fn test_unauthorized(cptestctx: &ControlPlaneTestContext) { sled_id, nexus_test_utils::PHYSICAL_DISK_UUID.parse().unwrap(), ZpoolUuid::new_v4(), - uuid::Uuid::new_v4(), + DatasetUuid::new_v4(), DiskTest::DEFAULT_ZPOOL_SIZE_GIB, ) .await; diff --git a/nexus/tests/integration_tests/volume_management.rs b/nexus/tests/integration_tests/volume_management.rs index 887afff20f..6a9ce28389 100644 --- a/nexus/tests/integration_tests/volume_management.rs +++ b/nexus/tests/integration_tests/volume_management.rs @@ -12,6 +12,7 @@ use dropshot::test_util::ClientTestContext; use http::method::Method; use http::StatusCode; use nexus_config::RegionAllocationStrategy; +use nexus_db_model::to_db_typed_uuid; use nexus_db_model::RegionSnapshotReplacement; use nexus_db_model::RegionSnapshotReplacementState; use nexus_db_model::Volume; @@ -2254,7 +2255,7 @@ async fn test_keep_your_targets_straight(cptestctx: &ControlPlaneTestContext) { ®ion_snapshots[i]; datastore .region_snapshot_create(nexus_db_model::RegionSnapshot { - dataset_id: *dataset_id, + dataset_id: (*dataset_id).into(), region_id: *region_id, snapshot_id: *snapshot_id, snapshot_addr: snapshot_addr.clone(), @@ -2375,7 +2376,7 @@ async fn test_keep_your_targets_straight(cptestctx: &ControlPlaneTestContext) { ®ion_snapshots[i]; datastore .region_snapshot_create(nexus_db_model::RegionSnapshot { - dataset_id: *dataset_id, + dataset_id: (*dataset_id).into(), region_id: *region_id, snapshot_id: *snapshot_id, snapshot_addr: snapshot_addr.clone(), @@ -4837,7 +4838,7 @@ async fn test_volume_remove_rop_respects_accounting( let usage = datastore .volume_usage_records_for_resource( VolumeResourceUsage::RegionSnapshot { - dataset_id: region_snapshot.dataset_id, + dataset_id: region_snapshot.dataset_id.into(), region_id: region_snapshot.region_id, snapshot_id: region_snapshot.snapshot_id, }, @@ -4897,7 +4898,7 @@ async fn test_volume_remove_rop_respects_accounting( let usage = datastore .volume_usage_records_for_resource( VolumeResourceUsage::RegionSnapshot { - dataset_id: region_snapshot.dataset_id, + dataset_id: region_snapshot.dataset_id.into(), region_id: region_snapshot.region_id, snapshot_id: region_snapshot.snapshot_id, }, @@ -5017,7 +5018,7 @@ async fn test_volume_remove_rop_respects_accounting_no_modify_others( let usage = datastore .volume_usage_records_for_resource( VolumeResourceUsage::RegionSnapshot { - dataset_id: region_snapshot.dataset_id, + dataset_id: region_snapshot.dataset_id.into(), region_id: region_snapshot.region_id, snapshot_id: region_snapshot.snapshot_id, }, @@ -5081,7 +5082,7 @@ async fn test_volume_remove_rop_respects_accounting_no_modify_others( let usage = datastore .volume_usage_records_for_resource( VolumeResourceUsage::RegionSnapshot { - dataset_id: region_snapshot.dataset_id, + dataset_id: region_snapshot.dataset_id.into(), region_id: region_snapshot.region_id, snapshot_id: region_snapshot.snapshot_id, }, @@ -5408,7 +5409,7 @@ async fn test_migrate_to_ref_count_with_records_region_snapshot_deleting( datastore .region_snapshot_create(nexus_db_model::RegionSnapshot { - dataset_id: *dataset_id, + dataset_id: to_db_typed_uuid(*dataset_id), region_id: *region_id, snapshot_id: *snapshot_id, snapshot_addr: snapshot_addr.clone(), @@ -5511,7 +5512,10 @@ async fn test_migrate_to_ref_count_with_records_region_snapshot_deleting( let region_snapshot_to_delete = &snapshots_to_delete[0].1; - assert_eq!(region_snapshot_to_delete.dataset_id, region_snapshots[0].0); + assert_eq!( + region_snapshot_to_delete.dataset_id, + to_db_typed_uuid(region_snapshots[0].0) + ); assert_eq!(region_snapshot_to_delete.region_id, region_snapshots[0].1); assert_eq!(region_snapshot_to_delete.snapshot_id, region_snapshots[0].2); assert_eq!(region_snapshot_to_delete.snapshot_addr, region_snapshots[0].3); diff --git a/nexus/types/src/internal_api/params.rs b/nexus/types/src/internal_api/params.rs index 1279bca53e..32a16788b4 100644 --- a/nexus/types/src/internal_api/params.rs +++ b/nexus/types/src/internal_api/params.rs @@ -20,6 +20,7 @@ use omicron_common::api::internal::shared::DatasetKind; use omicron_common::api::internal::shared::ExternalPortDiscovery; use omicron_common::api::internal::shared::RackNetworkConfig; use omicron_common::api::internal::shared::SourceNatConfig; +use omicron_uuid_kinds::DatasetUuid; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::fmt; @@ -157,7 +158,7 @@ impl fmt::Display for ServiceKind { #[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] pub struct DatasetCreateRequest { pub zpool_id: Uuid, - pub dataset_id: Uuid, + pub dataset_id: DatasetUuid, pub request: DatasetPutRequest, } diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 118bc22e09..70b281c143 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -3102,8 +3102,7 @@ "type": "object", "properties": { "dataset_id": { - "type": "string", - "format": "uuid" + "$ref": "#/components/schemas/TypedUuidForDatasetKind" }, "request": { "$ref": "#/components/schemas/DatasetPutRequest" diff --git a/sled-agent/src/rack_setup/service.rs b/sled-agent/src/rack_setup/service.rs index 90ad65b065..05283a835e 100644 --- a/sled-agent/src/rack_setup/service.rs +++ b/sled-agent/src/rack_setup/service.rs @@ -834,7 +834,7 @@ impl ServiceInner { (dataset.name.pool().id(), dataset.name.dataset().clone()), NexusTypes::DatasetCreateRequest { zpool_id: dataset.name.pool().id().into_untyped_uuid(), - dataset_id: dataset.id.into_untyped_uuid(), + dataset_id: dataset.id, request: NexusTypes::DatasetPutRequest { address: None, kind: dataset.name.dataset().clone(), diff --git a/sled-agent/src/sim/server.rs b/sled-agent/src/sim/server.rs index 57bfbac760..a7cf8bb382 100644 --- a/sled-agent/src/sim/server.rs +++ b/sled-agent/src/sim/server.rs @@ -42,6 +42,7 @@ use omicron_common::backoff::{ }; use omicron_common::disk::DiskIdentity; use omicron_common::FileKv; +use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::ZpoolUuid; @@ -197,7 +198,7 @@ impl Server { sled_agent .create_zpool(zpool_id, physical_disk_id, zpool.size) .await; - let dataset_id = Uuid::new_v4(); + let dataset_id = DatasetUuid::new_v4(); let address = sled_agent.create_crucible_dataset(zpool_id, dataset_id).await; diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index 5a482f3196..a5c094ec21 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -38,7 +38,11 @@ use omicron_common::disk::{ DatasetsConfig, DatasetsManagementResult, DiskIdentity, DiskVariant, DisksManagementResult, OmicronPhysicalDisksConfig, }; -use omicron_uuid_kinds::{GenericUuid, PropolisUuid, SledUuid, ZpoolUuid}; +use omicron_uuid_kinds::DatasetUuid; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::PropolisUuid; +use omicron_uuid_kinds::SledUuid; +use omicron_uuid_kinds::ZpoolUuid; use oxnet::Ipv6Net; use propolis_client::{ types::VolumeConstructionRequest, Client as PropolisClient, @@ -614,7 +618,7 @@ impl SledAgent { pub async fn get_datasets( &self, zpool_id: ZpoolUuid, - ) -> Vec<(Uuid, SocketAddr)> { + ) -> Vec<(DatasetUuid, SocketAddr)> { self.storage.lock().await.get_all_datasets(zpool_id) } @@ -636,7 +640,7 @@ impl SledAgent { pub async fn create_crucible_dataset( &self, zpool_id: ZpoolUuid, - dataset_id: Uuid, + dataset_id: DatasetUuid, ) -> SocketAddr { self.storage.lock().await.insert_dataset(zpool_id, dataset_id).await } @@ -645,7 +649,7 @@ impl SledAgent { pub async fn get_crucible_dataset( &self, zpool_id: ZpoolUuid, - dataset_id: Uuid, + dataset_id: DatasetUuid, ) -> Arc { self.storage.lock().await.get_dataset(zpool_id, dataset_id).await } @@ -929,7 +933,11 @@ impl SledAgent { *self.fake_zones.lock().await = requested_zones; } - pub async fn drop_dataset(&self, zpool_id: ZpoolUuid, dataset_id: Uuid) { + pub async fn drop_dataset( + &self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + ) { self.storage.lock().await.drop_dataset(zpool_id, dataset_id) } diff --git a/sled-agent/src/sim/storage.rs b/sled-agent/src/sim/storage.rs index bf9ba75a48..344c3d730c 100644 --- a/sled-agent/src/sim/storage.rs +++ b/sled-agent/src/sim/storage.rs @@ -28,6 +28,7 @@ use omicron_common::disk::DiskManagementStatus; use omicron_common::disk::DiskVariant; use omicron_common::disk::DisksManagementResult; use omicron_common::disk::OmicronPhysicalDisksConfig; +use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::PropolisUuid; @@ -818,7 +819,7 @@ pub(crate) struct Zpool { id: ZpoolUuid, physical_disk_id: Uuid, total_size: u64, - datasets: HashMap, + datasets: HashMap, } impl Zpool { @@ -829,7 +830,7 @@ impl Zpool { fn insert_dataset( &mut self, log: &Logger, - id: Uuid, + id: DatasetUuid, crucible_ip: IpAddr, start_port: u16, end_port: u16, @@ -884,7 +885,7 @@ impl Zpool { regions.pop() } - pub fn drop_dataset(&mut self, id: Uuid) { + pub fn drop_dataset(&mut self, id: DatasetUuid) { let _ = self.datasets.remove(&id).expect("Failed to get the dataset"); } } @@ -1032,7 +1033,7 @@ impl Storage { pub async fn insert_dataset( &mut self, zpool_id: ZpoolUuid, - dataset_id: Uuid, + dataset_id: DatasetUuid, ) -> SocketAddr { // Update our local data let dataset = self @@ -1093,7 +1094,7 @@ impl Storage { pub fn get_all_datasets( &self, zpool_id: ZpoolUuid, - ) -> Vec<(Uuid, SocketAddr)> { + ) -> Vec<(DatasetUuid, SocketAddr)> { let zpool = self.zpools.get(&zpool_id).expect("Zpool does not exist"); zpool @@ -1106,7 +1107,7 @@ impl Storage { pub async fn get_dataset( &self, zpool_id: ZpoolUuid, - dataset_id: Uuid, + dataset_id: DatasetUuid, ) -> Arc { self.zpools .get(&zpool_id) @@ -1146,7 +1147,11 @@ impl Storage { regions.pop() } - pub fn drop_dataset(&mut self, zpool_id: ZpoolUuid, dataset_id: Uuid) { + pub fn drop_dataset( + &mut self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + ) { self.zpools .get_mut(&zpool_id) .expect("Zpool does not exist")