Skip to content

Commit

Permalink
PR feedback: SocketAddrV6, misc cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
gjcolombo committed Dec 20, 2024
1 parent cf4053f commit 97d1e40
Show file tree
Hide file tree
Showing 6 changed files with 66 additions and 86 deletions.
23 changes: 15 additions & 8 deletions nexus/db-queries/src/db/datastore/volume.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2578,11 +2578,15 @@ fn region_in_vcr(

VolumeConstructionRequest::Region { opts, .. } => {
for target in &opts.target {
if let SocketAddr::V6(target) = target {
if *target == *region {
match target {
SocketAddr::V6(t) if *t == *region => {
region_found = true;
break;
}
SocketAddr::V6(_) => {}
SocketAddr::V4(_) => {
bail!("region target contains an IPv4 address");
}
}
}
}
Expand Down Expand Up @@ -2644,10 +2648,16 @@ fn read_only_target_in_vcr(
}

for target in &opts.target {
if let SocketAddr::V6(target) = target {
if *target == *read_only_target && opts.read_only {
match target {
SocketAddr::V6(t)
if *t == *read_only_target && opts.read_only =>
{
return Ok(true);
}
SocketAddr::V6(_) => {}
SocketAddr::V4(_) => {
bail!("region target contains an IPv4 address");
}
}
}
}
Expand Down Expand Up @@ -4986,10 +4996,7 @@ mod tests {
.unwrap();

let volumes = datastore
.find_volumes_referencing_socket_addr(
&opctx,
address_1.into(),
)
.find_volumes_referencing_socket_addr(&opctx, address_1.into())
.await
.unwrap();

Expand Down
30 changes: 13 additions & 17 deletions nexus/src/app/background/tasks/region_snapshot_replacement_step.rs
Original file line number Diff line number Diff line change
Expand Up @@ -568,14 +568,15 @@ mod test {
use omicron_uuid_kinds::DatasetUuid;
use sled_agent_client::CrucibleOpts;
use sled_agent_client::VolumeConstructionRequest;
use std::net::SocketAddrV6;
use uuid::Uuid;

type ControlPlaneTestContext =
nexus_test_utils::ControlPlaneTestContext<crate::Server>;

async fn add_fake_volume_for_snapshot_addr(
datastore: &DataStore,
snapshot_addr: String,
snapshot_addr: SocketAddrV6,
) -> Uuid {
let new_volume_id = Uuid::new_v4();

Expand All @@ -587,7 +588,7 @@ mod test {
DatasetUuid::new_v4(),
Uuid::new_v4(),
Uuid::new_v4(),
snapshot_addr.clone(),
snapshot_addr.to_string(),
))
.await
.unwrap();
Expand All @@ -604,7 +605,7 @@ mod test {
gen: 0,
opts: CrucibleOpts {
id: Uuid::new_v4(),
target: vec![snapshot_addr.parse().unwrap()],
target: vec![snapshot_addr.into()],
lossy: false,
flush_timeout: None,
key: None,
Expand Down Expand Up @@ -656,13 +657,14 @@ mod test {
let dataset_id = DatasetUuid::new_v4();
let region_id = Uuid::new_v4();
let snapshot_id = Uuid::new_v4();
let snapshot_addr = String::from("[fd00:1122:3344::101]:9876");
let snapshot_addr: SocketAddrV6 =
"[fd00:1122:3344::101]:9876".parse().unwrap();

let fake_region_snapshot = RegionSnapshot::new(
dataset_id,
region_id,
snapshot_id,
snapshot_addr.clone(),
snapshot_addr.to_string(),
);

datastore.region_snapshot_create(fake_region_snapshot).await.unwrap();
Expand Down Expand Up @@ -746,28 +748,22 @@ mod test {
// Add some fake volumes that reference the region snapshot being
// replaced

let new_volume_1_id = add_fake_volume_for_snapshot_addr(
&datastore,
snapshot_addr.clone(),
)
.await;
let new_volume_2_id = add_fake_volume_for_snapshot_addr(
&datastore,
snapshot_addr.clone(),
)
.await;
let new_volume_1_id =
add_fake_volume_for_snapshot_addr(&datastore, snapshot_addr).await;
let new_volume_2_id =
add_fake_volume_for_snapshot_addr(&datastore, snapshot_addr).await;

// Add some fake volumes that do not

let other_volume_1_id = add_fake_volume_for_snapshot_addr(
&datastore,
String::from("[fd00:1122:3344::101]:1000"),
"[fd00:1122:3344::101]:1000".parse().unwrap(),
)
.await;

let other_volume_2_id = add_fake_volume_for_snapshot_addr(
&datastore,
String::from("[fd12:5544:3344::912]:3901"),
"[fd12:5544:3344::912]:3901".parse().unwrap(),
)
.await;

Expand Down
46 changes: 7 additions & 39 deletions nexus/src/app/instance.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1052,7 +1052,6 @@ impl super::Nexus {
)
.await?;

let mut found_boot_disk = None;
let mut disk_reqs = vec![];
for disk in &disks {
// Disks that are attached to an instance should always have a slot
Expand Down Expand Up @@ -1087,12 +1086,6 @@ impl super::Nexus {
)
.await?;

if let Some(wanted_id) = &db_instance.boot_disk_id {
if disk.id() == *wanted_id {
found_boot_disk = Some(*wanted_id);
}
}

disk_reqs.push(sled_agent_client::types::InstanceDisk {
disk_id: disk.id(),
name: disk.name().to_string(),
Expand All @@ -1102,38 +1095,13 @@ impl super::Nexus {
});
}

let boot_settings = if let Some(boot_disk_id) = found_boot_disk {
Some(InstanceBootSettings { order: vec![boot_disk_id] })
} else {
if let Some(instance_boot_disk_id) =
db_instance.boot_disk_id.as_ref()
{
// This should never occur: when setting the boot disk we ensure it is
// attached, and when detaching a disk we ensure it is not the boot
// disk. If this error is seen, the instance somehow had a boot disk
// that was not attached anyway.
//
// When Propolis accepts an ID rather than name, and we don't need to
// look up a name when assembling the Propolis request, we might as well
// remove this check; we can just pass the ID and rely on Propolis' own
// check that the boot disk is attached.
if found_boot_disk.is_none() {
error!(self.log, "instance boot disk is not attached";
"boot_disk_id" => ?instance_boot_disk_id,
"instance id" => %db_instance.id());

return Err(InstanceStateChangeError::Other(
Error::internal_error(&format!(
"instance {} has boot disk {:?} but it is not attached",
db_instance.id(),
db_instance.boot_disk_id.as_ref(),
)),
));
}
}

None
};
// The routines that maintain an instance's boot options are supposed to
// guarantee that the boot disk ID, if present, is the ID of an attached
// disk. If this invariant isn't upheld, Propolis will catch the failure
// when it processes its received VM configuration.
let boot_settings = db_instance
.boot_disk_id
.map(|id| InstanceBootSettings { order: vec![id] });

let nics = self
.db_datastore
Expand Down
27 changes: 19 additions & 8 deletions nexus/src/app/sagas/snapshot_create.rs
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,8 @@ use steno::ActionError;
use steno::Node;
use uuid::Uuid;

type ReplaceSocketsMap = BTreeMap<SocketAddrV6, SocketAddrV6>;

// snapshot create saga: input parameters

#[derive(Debug, Deserialize, Serialize)]
Expand Down Expand Up @@ -1384,7 +1386,7 @@ async fn ssc_detach_disk_from_pantry(

async fn ssc_start_running_snapshot(
sagactx: NexusActionContext,
) -> Result<BTreeMap<SocketAddr, SocketAddr>, ActionError> {
) -> Result<ReplaceSocketsMap, ActionError> {
let log = sagactx.user_data().log();
let osagactx = sagactx.user_data();
let params = sagactx.saga_params::<Params>()?;
Expand All @@ -1410,7 +1412,7 @@ async fn ssc_start_running_snapshot(
.await
.map_err(ActionError::action_failed)?;

let mut map: BTreeMap<SocketAddr, SocketAddr> = BTreeMap::new();
let mut map: ReplaceSocketsMap = BTreeMap::new();

for (dataset, region) in datasets_and_regions {
let Some(dataset_addr) = dataset.address() else {
Expand Down Expand Up @@ -1454,7 +1456,7 @@ async fn ssc_start_running_snapshot(
);

info!(log, "map {} to {}", region_addr, snapshot_addr);
map.insert(region_addr.into(), snapshot_addr.into());
map.insert(region_addr, snapshot_addr);

// Once snapshot has been validated, and running snapshot has been
// started, add an entry in the region_snapshot table to correspond to
Expand Down Expand Up @@ -1564,8 +1566,8 @@ async fn ssc_create_volume_record(
// The volume construction request must then be modified to point to the
// read-only crucible agent downstairs (corresponding to this snapshot)
// launched through this saga.
let replace_sockets_map = sagactx
.lookup::<BTreeMap<SocketAddr, SocketAddr>>("replace_sockets_map")?;
let replace_sockets_map =
sagactx.lookup::<ReplaceSocketsMap>("replace_sockets_map")?;
let snapshot_volume_construction_request: VolumeConstructionRequest =
create_snapshot_from_disk(
&disk_volume_construction_request,
Expand Down Expand Up @@ -1689,7 +1691,7 @@ async fn ssc_release_volume_lock(
/// VolumeConstructionRequest and modifying it accordingly.
fn create_snapshot_from_disk(
disk: &VolumeConstructionRequest,
socket_map: &BTreeMap<SocketAddr, SocketAddr>,
socket_map: &ReplaceSocketsMap,
) -> anyhow::Result<VolumeConstructionRequest> {
// When copying a disk's VolumeConstructionRequest to turn it into a
// snapshot:
Expand Down Expand Up @@ -1751,6 +1753,16 @@ fn create_snapshot_from_disk(

if work.socket_modification_required {
for target in &mut opts.target {
let target = match target {
SocketAddr::V6(v6) => v6,
SocketAddr::V4(_) => {
anyhow::bail!(
"unexpected IPv4 address in VCR: {:?}",
work.vcr_part
)
}
};

*target = *socket_map.get(target).ok_or_else(|| {
anyhow!("target {} not found in map!", target)
})?;
Expand Down Expand Up @@ -1869,8 +1881,7 @@ mod test {
],
};

let mut replace_sockets: BTreeMap<SocketAddr, SocketAddr> =
BTreeMap::new();
let mut replace_sockets = ReplaceSocketsMap::new();

// Replacements for top level Region only
replace_sockets.insert(
Expand Down
15 changes: 7 additions & 8 deletions nexus/tests/integration_tests/volume_management.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3567,7 +3567,7 @@ struct TestReadOnlyRegionReferenceUsage {
datastore: Arc<DataStore>,

region: db::model::Region,
region_address: SocketAddr,
region_address: SocketAddrV6,

first_volume_id: Uuid,
second_volume_id: Uuid,
Expand Down Expand Up @@ -3628,9 +3628,8 @@ impl TestReadOnlyRegionReferenceUsage {
// so fill in a random port here.
datastore.region_set_port(region.id(), 12345).await.unwrap();

let region_address = SocketAddr::V6(
datastore.region_addr(region.id()).await.unwrap().unwrap(),
);
let region_address =
datastore.region_addr(region.id()).await.unwrap().unwrap();

let region = datastore.get_region(region.id()).await.unwrap();

Expand Down Expand Up @@ -3661,7 +3660,7 @@ impl TestReadOnlyRegionReferenceUsage {
gen: 1,
opts: CrucibleOpts {
id: Uuid::new_v4(),
target: vec![self.region_address],
target: vec![self.region_address.into()],
lossy: false,
flush_timeout: None,
key: None,
Expand Down Expand Up @@ -3791,7 +3790,7 @@ impl TestReadOnlyRegionReferenceUsage {
gen: 1,
opts: CrucibleOpts {
id: Uuid::new_v4(),
target: vec![self.region_address],
target: vec![self.region_address.into()],
lossy: false,
flush_timeout: None,
key: None,
Expand Down Expand Up @@ -3824,7 +3823,7 @@ impl TestReadOnlyRegionReferenceUsage {
gen: 1,
opts: CrucibleOpts {
id: Uuid::new_v4(),
target: vec![self.region_address],
target: vec![self.region_address.into()],
lossy: false,
flush_timeout: None,
key: None,
Expand Down Expand Up @@ -3859,7 +3858,7 @@ impl TestReadOnlyRegionReferenceUsage {
gen: 1,
opts: CrucibleOpts {
id: Uuid::new_v4(),
target: vec![self.region_address],
target: vec![self.region_address.into()],
lossy: false,
flush_timeout: None,
key: None,
Expand Down
11 changes: 5 additions & 6 deletions sled-agent/src/instance.rs
Original file line number Diff line number Diff line change
Expand Up @@ -869,8 +869,8 @@ impl InstanceRunner {
// components (e.g., converting device slot numbers into concrete PCI
// bus/device/function numbers).
//
// Today's Propolis VM creation API (the one used below) differs from
// this scheme in two important ways:
// The Propolis VM creation API used below differs from this scheme in
// two important ways:
//
// 1. Callers are responsible for filling in all the details of a VM's
// configuration (e.g., choosing PCI BDFs for PCI devices).
Expand Down Expand Up @@ -974,20 +974,19 @@ impl InstanceRunner {
let nics: Vec<NicComponents> = running_zone
.opte_ports()
.map(|port| -> Result<NicComponents, Error> {
let pos = self
let nic = self
.requested_nics
.iter()
// We expect to match NIC slots to OPTE port slots.
// Error out if we can't find a NIC for a port.
.position(|nic| nic.slot == port.slot())
.find(|nic| nic.slot == port.slot())
.ok_or(Error::Opte(
illumos_utils::opte::Error::NoNicforPort(
port.name().into(),
port.slot().into(),
),
))?;

let nic = &self.requested_nics[pos];
Ok(NicComponents {
id: nic.id,
device: VirtioNic {
Expand All @@ -1012,7 +1011,7 @@ impl InstanceRunner {
// - Crucible disks: the target needs to connect to its downstairs
// instances with new generation numbers supplied from Nexus
// - OPTE ports: the target needs to bind its VNICs to the correct
// devices for its new host, which devices may have different names
// devices for its new host; those devices may have different names
// than their counterparts on the source host
//
// If this is a request to migrate, construct a list of source component
Expand Down

0 comments on commit 97d1e40

Please sign in to comment.