Skip to content

Commit

Permalink
Handle region snapshot replacement volume deletes (#7046)
Browse files Browse the repository at this point in the history
Volumes can be deleted at any time, but the tasks and sagas that perform
region snapshot replacement did not account for this. This commit adds
checks in a few places for if a volume is soft-deleted or hard-deleted,
and bails out of any affected region snapshot replacement accordingly:

- if a volume that has the region snapshot being replaced is
soft-deleted, then skip making a region snapshot replacement step for it

- if a region snapshot replacement step has the volume deleted after the
step was created, transition it directly to the VolumeDeleted state

- if a region snapshot replacement step has the volume deleted during
the saga invocation, then skip notifying any Upstairs and allow the saga
to transition the request to Complete, where then associated clean up
can proceed

An interesting race condition emerged during unit testing: the read-only
region allocated to replace a region snapshot would be swapped into the
snapshot volume, but would be susceptible to being deleted by the user,
and therefore unable to be swapped into other volumes that have that
snapshot volume as a read-only parent.

This requires an additional volume that used that read-only region in
order to bump the reference count associated with that region, so that
the user cannot delete it before it was used to replace all other uses
of the region snapshot it was meant to replace.

This additional volume's lifetime lives as long as the region snapshot
replacement, and therefore needs to be deleted when the region snapshot
replacement is finished. This required a new region snapshot replacement
finish saga, which required a new "Completing" state to perform the same
type of state based lock on the replacement request done for all the
other sagas.

Testing also revealed that there were scenarios where
`find_deleted_volume_regions` would return volumes for hard-deletion
prematurely. The function now returns a struct instead of a list of
tuples, and in that struct, regions freed for deletion are now distinct
from volumes freed for deletion. Volumes are now only returned for
hard-deletion when all associated read/write regions have been (or are
going to be) deleted.

Fixes #6353
  • Loading branch information
jmpesp authored Dec 19, 2024
1 parent d337333 commit 09b150f
Show file tree
Hide file tree
Showing 34 changed files with 3,540 additions and 493 deletions.
41 changes: 25 additions & 16 deletions dev-tools/omdb/src/bin/omdb/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2591,39 +2591,48 @@ async fn cmd_db_region_used_by(
async fn cmd_db_region_find_deleted(
datastore: &DataStore,
) -> Result<(), anyhow::Error> {
let datasets_regions_volumes =
let freed_crucible_resources =
datastore.find_deleted_volume_regions().await?;

#[derive(Tabled)]
struct Row {
struct RegionRow {
dataset_id: DatasetUuid,
region_id: Uuid,
volume_id: String,
}

let rows: Vec<Row> = datasets_regions_volumes
.into_iter()
#[derive(Tabled)]
struct VolumeRow {
volume_id: Uuid,
}

let region_rows: Vec<RegionRow> = freed_crucible_resources
.datasets_and_regions
.iter()
.map(|row| {
let (dataset, region, volume) = row;
let (dataset, region) = row;

Row {
dataset_id: dataset.id(),
region_id: region.id(),
volume_id: if let Some(volume) = volume {
volume.id().to_string()
} else {
String::from("")
},
}
RegionRow { dataset_id: dataset.id(), region_id: region.id() }
})
.collect();

let table = tabled::Table::new(rows)
let table = tabled::Table::new(region_rows)
.with(tabled::settings::Style::psql())
.to_string();

println!("{}", table);

let volume_rows: Vec<VolumeRow> = freed_crucible_resources
.volumes
.iter()
.map(|volume_id| VolumeRow { volume_id: *volume_id })
.collect();

let volume_table = tabled::Table::new(volume_rows)
.with(tabled::settings::Style::psql())
.to_string();

println!("{}", volume_table);

Ok(())
}

Expand Down
23 changes: 20 additions & 3 deletions dev-tools/omdb/src/bin/omdb/nexus.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1655,6 +1655,14 @@ fn print_task_details(bgtask: &BackgroundTask, details: &serde_json::Value) {
println!(" > {line}");
}

println!(
" total requests completed ok: {}",
status.requests_completed_ok.len(),
);
for line in &status.requests_completed_ok {
println!(" > {line}");
}

println!(" errors: {}", status.errors.len());
for line in &status.errors {
println!(" > {line}");
Expand Down Expand Up @@ -1720,6 +1728,14 @@ fn print_task_details(bgtask: &BackgroundTask, details: &serde_json::Value) {
println!(" > {line}");
}

println!(
" total steps set to volume_deleted ok: {}",
status.step_set_volume_deleted_ok.len(),
);
for line in &status.step_set_volume_deleted_ok {
println!(" > {line}");
}

println!(" errors: {}", status.errors.len());
for line in &status.errors {
println!(" > {line}");
Expand Down Expand Up @@ -1831,10 +1847,11 @@ fn print_task_details(bgtask: &BackgroundTask, details: &serde_json::Value) {

Ok(status) => {
println!(
" total records transitioned to done: {}",
status.records_set_to_done.len(),
" region snapshot replacement finish sagas started \
ok: {}",
status.finish_invoked_ok.len()
);
for line in &status.records_set_to_done {
for line in &status.finish_invoked_ok {
println!(" > {line}");
}

Expand Down
8 changes: 6 additions & 2 deletions dev-tools/omdb/tests/successes.out
Original file line number Diff line number Diff line change
Expand Up @@ -627,7 +627,7 @@ task: "region_snapshot_replacement_finish"
currently executing: no
last completed activation: <REDACTED ITERATIONS>, triggered by a periodic timer firing
started at <REDACTED_TIMESTAMP> (<REDACTED DURATION>s ago) and ran for <REDACTED DURATION>ms
total records transitioned to done: 0
region snapshot replacement finish sagas started ok: 0
errors: 0

task: "region_snapshot_replacement_garbage_collection"
Expand All @@ -645,6 +645,7 @@ task: "region_snapshot_replacement_start"
started at <REDACTED_TIMESTAMP> (<REDACTED DURATION>s ago) and ran for <REDACTED DURATION>ms
total requests created ok: 0
total start saga invoked ok: 0
total requests completed ok: 0
errors: 0

task: "region_snapshot_replacement_step"
Expand All @@ -655,6 +656,7 @@ task: "region_snapshot_replacement_step"
total step records created ok: 0
total step garbage collect saga invoked ok: 0
total step saga invoked ok: 0
total steps set to volume_deleted ok: 0
errors: 0

task: "saga_recovery"
Expand Down Expand Up @@ -1070,7 +1072,7 @@ task: "region_snapshot_replacement_finish"
currently executing: no
last completed activation: <REDACTED ITERATIONS>, triggered by a periodic timer firing
started at <REDACTED_TIMESTAMP> (<REDACTED DURATION>s ago) and ran for <REDACTED DURATION>ms
total records transitioned to done: 0
region snapshot replacement finish sagas started ok: 0
errors: 0

task: "region_snapshot_replacement_garbage_collection"
Expand All @@ -1088,6 +1090,7 @@ task: "region_snapshot_replacement_start"
started at <REDACTED_TIMESTAMP> (<REDACTED DURATION>s ago) and ran for <REDACTED DURATION>ms
total requests created ok: 0
total start saga invoked ok: 0
total requests completed ok: 0
errors: 0

task: "region_snapshot_replacement_step"
Expand All @@ -1098,6 +1101,7 @@ task: "region_snapshot_replacement_step"
total step records created ok: 0
total step garbage collect saga invoked ok: 0
total step saga invoked ok: 0
total steps set to volume_deleted ok: 0
errors: 0

task: "saga_recovery"
Expand Down
20 changes: 17 additions & 3 deletions nexus/db-model/src/region_snapshot_replacement.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ impl_enum_type!(
ReplacementDone => b"replacement_done"
DeletingOldVolume => b"deleting_old_volume"
Running => b"running"
Completing => b"completing"
Complete => b"complete"
);

Expand All @@ -46,6 +47,7 @@ impl std::str::FromStr for RegionSnapshotReplacementState {
Ok(RegionSnapshotReplacementState::DeletingOldVolume)
}
"running" => Ok(RegionSnapshotReplacementState::Running),
"completing" => Ok(RegionSnapshotReplacementState::Completing),
"complete" => Ok(RegionSnapshotReplacementState::Complete),
_ => Err(format!("unrecognized value {} for enum", s)),
}
Expand Down Expand Up @@ -79,9 +81,14 @@ impl std::str::FromStr for RegionSnapshotReplacementState {
/// | |
/// v ---
/// ---
/// Running |
/// | set in region snapshot replacement
/// | | finish background task
/// Running <-- |
/// | |
/// | | |
/// v | |
/// | | responsibility of region snapshot
/// Completing -- | replacement finish saga
/// |
/// | |
/// v |
/// |
/// Complete ---
Expand Down Expand Up @@ -133,6 +140,12 @@ pub struct RegionSnapshotReplacement {
pub replacement_state: RegionSnapshotReplacementState,

pub operating_saga_id: Option<Uuid>,

/// In order for the newly created region not to be deleted inadvertently,
/// an additional reference count bump is required. This volume should live
/// as long as this request so that all necessary replacements can be
/// completed.
pub new_region_volume_id: Option<Uuid>,
}

impl RegionSnapshotReplacement {
Expand All @@ -157,6 +170,7 @@ impl RegionSnapshotReplacement {
old_snapshot_id,
old_snapshot_volume_id: None,
new_region_id: None,
new_region_volume_id: None,
replacement_state: RegionSnapshotReplacementState::Requested,
operating_saga_id: None,
}
Expand Down
1 change: 1 addition & 0 deletions nexus/db-model/src/schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1932,6 +1932,7 @@ table! {
new_region_id -> Nullable<Uuid>,
replacement_state -> crate::RegionSnapshotReplacementStateEnum,
operating_saga_id -> Nullable<Uuid>,
new_region_volume_id -> Nullable<Uuid>,
}
}

Expand Down
3 changes: 2 additions & 1 deletion nexus/db-model/src/schema_versions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ use std::collections::BTreeMap;
///
/// This must be updated when you change the database schema. Refer to
/// schema/crdb/README.adoc in the root of this repository for details.
pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(116, 0, 0);
pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(117, 0, 0);

/// List of all past database schema versions, in *reverse* order
///
Expand All @@ -29,6 +29,7 @@ static KNOWN_VERSIONS: Lazy<Vec<KnownVersion>> = Lazy::new(|| {
// | leaving the first copy as an example for the next person.
// v
// KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"),
KnownVersion::new(117, "add-completing-and-new-region-volume"),
KnownVersion::new(116, "bp-physical-disk-disposition"),
KnownVersion::new(115, "inv-omicron-physical-disks-generation"),
KnownVersion::new(114, "crucible-ref-count-records"),
Expand Down
11 changes: 1 addition & 10 deletions nexus/db-queries/src/db/datastore/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -125,16 +125,7 @@ pub use sled::TransitionError;
pub use switch_port::SwitchPortSettingsCombinedResult;
pub use virtual_provisioning_collection::StorageType;
pub use vmm::VmmStateUpdateResult;
pub use volume::read_only_resources_associated_with_volume;
pub use volume::CrucibleResources;
pub use volume::CrucibleTargets;
pub use volume::ExistingTarget;
pub use volume::ReplacementTarget;
pub use volume::VolumeCheckoutReason;
pub use volume::VolumeReplaceResult;
pub use volume::VolumeReplacementParams;
pub use volume::VolumeToDelete;
pub use volume::VolumeWithTarget;
pub use volume::*;

// Number of unique datasets required to back a region.
// TODO: This should likely turn into a configuration option.
Expand Down
58 changes: 55 additions & 3 deletions nexus/db-queries/src/db/datastore/region_replacement.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,19 +59,22 @@ impl DataStore {
opctx: &OpContext,
request: RegionReplacement,
) -> Result<(), Error> {
let err = OptionalError::new();
let conn = self.pool_connection_authorized(opctx).await?;

self.transaction_retry_wrapper("insert_region_replacement_request")
.transaction(&conn, |conn| {
let request = request.clone();
let err = err.clone();
async move {
use db::schema::region_replacement::dsl;

Self::volume_repair_insert_query(
Self::volume_repair_insert_in_txn(
&conn,
err,
request.volume_id,
request.id,
)
.execute_async(&conn)
.await?;

diesel::insert_into(dsl::region_replacement)
Expand All @@ -83,7 +86,13 @@ impl DataStore {
}
})
.await
.map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))
.map_err(|e| {
if let Some(err) = err.take() {
err
} else {
public_error_from_diesel(e, ErrorHandler::Server)
}
})
}

pub async fn get_region_replacement_request_by_id(
Expand Down Expand Up @@ -915,6 +924,7 @@ mod test {

use crate::db::pub_test_utils::TestDatabase;
use omicron_test_utils::dev;
use sled_agent_client::types::VolumeConstructionRequest;

#[tokio::test]
async fn test_one_replacement_per_volume() {
Expand All @@ -926,6 +936,20 @@ mod test {
let region_2_id = Uuid::new_v4();
let volume_id = Uuid::new_v4();

datastore
.volume_create(nexus_db_model::Volume::new(
volume_id,
serde_json::to_string(&VolumeConstructionRequest::Volume {
id: volume_id,
block_size: 512,
sub_volumes: vec![],
read_only_parent: None,
})
.unwrap(),
))
.await
.unwrap();

let request_1 = RegionReplacement::new(region_1_id, volume_id);
let request_2 = RegionReplacement::new(region_2_id, volume_id);

Expand Down Expand Up @@ -957,6 +981,20 @@ mod test {
let region_id = Uuid::new_v4();
let volume_id = Uuid::new_v4();

datastore
.volume_create(nexus_db_model::Volume::new(
volume_id,
serde_json::to_string(&VolumeConstructionRequest::Volume {
id: volume_id,
block_size: 512,
sub_volumes: vec![],
read_only_parent: None,
})
.unwrap(),
))
.await
.unwrap();

let request = {
let mut request = RegionReplacement::new(region_id, volume_id);
request.replacement_state = RegionReplacementState::Running;
Expand Down Expand Up @@ -1049,6 +1087,20 @@ mod test {
let region_id = Uuid::new_v4();
let volume_id = Uuid::new_v4();

datastore
.volume_create(nexus_db_model::Volume::new(
volume_id,
serde_json::to_string(&VolumeConstructionRequest::Volume {
id: volume_id,
block_size: 512,
sub_volumes: vec![],
read_only_parent: None,
})
.unwrap(),
))
.await
.unwrap();

let request = {
let mut request = RegionReplacement::new(region_id, volume_id);
request.replacement_state = RegionReplacementState::ReplacementDone;
Expand Down
Loading

0 comments on commit 09b150f

Please sign in to comment.