Skip to content

Commit

Permalink
deal with dead code in a more compiley way
Browse files Browse the repository at this point in the history
  • Loading branch information
hawkw committed May 29, 2024
1 parent 2dc69d1 commit 7b7dd98
Show file tree
Hide file tree
Showing 8 changed files with 183 additions and 213 deletions.
349 changes: 173 additions & 176 deletions nexus/src/app/instance_network.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,9 @@ use nexus_db_model::Ipv4NatValues;
use nexus_db_model::Vni as DbVni;
use nexus_db_queries::authz;
use nexus_db_queries::context::OpContext;
use nexus_db_queries::db;
use nexus_db_queries::db::lookup::LookupPath;
use nexus_db_queries::db::DataStore;
use omicron_common::api::external::Error;
use omicron_common::api::internal::nexus;
use omicron_common::api::internal::shared::NetworkInterface;
use omicron_common::api::internal::shared::SwitchLocation;
use oxnet::Ipv4Net;
Expand Down Expand Up @@ -227,180 +225,179 @@ pub(crate) async fn boundary_switches(
Ok(boundary_switches)
}

/// Given old and new instance runtime states, determines the desired
/// networking configuration for a given instance and ensures it has been
/// propagated to all relevant sleds.
///
/// # Arguments
///
/// - `datastore`: the datastore to use for lookups and updates.
/// - `log`: the [`slog::Logger`] to log to.
/// - `resolver`: an internal DNS resolver to look up DPD service addresses.
/// - `opctx`: An operation context for this operation.
/// - `opctx_alloc`: An operational context list permissions for all sleds. When
/// called by methods on the [`Nexus`] type, this is the `OpContext` used for
/// instance allocation. In a background task, this may be the background
/// task's operational context; nothing stops you from passing the same
/// `OpContext` as both `opctx` and `opctx_alloc`.
/// - `authz_instance``: A resolved authorization context for the instance of
/// interest.
/// - `prev_instance_state``: The most-recently-recorded instance runtime
/// state for this instance.
/// - `new_instance_state`: The instance state that the caller of this routine
/// has observed and that should be used to set up this instance's
/// networking state.
///
/// # Return value
///
/// `Ok(())` if this routine completed all the operations it wanted to
/// complete, or an appropriate `Err` otherwise.
#[allow(clippy::too_many_arguments)] // Yeah, I know, I know, Clippy...
#[allow(dead_code)] // TODO(eliza): this probably needs to be deleted eventually
pub(crate) async fn ensure_updated_instance_network_config(
datastore: &DataStore,
log: &slog::Logger,
resolver: &internal_dns::resolver::Resolver,
opctx: &OpContext,
opctx_alloc: &OpContext,
authz_instance: &authz::Instance,
prev_instance_state: &db::model::InstanceRuntimeState,
new_instance_state: &nexus::InstanceRuntimeState,
v2p_notification_tx: tokio::sync::watch::Sender<()>,
) -> Result<(), Error> {
let instance_id = authz_instance.id();

// If this instance update is stale, do nothing, since the superseding
// update may have allowed the instance's location to change further.
if prev_instance_state.gen >= new_instance_state.gen.into() {
debug!(log,
"instance state generation already advanced, \
won't touch network config";
"instance_id" => %instance_id);

return Ok(());
}

// If this update will retire the instance's active VMM, delete its
// networking state. It will be re-established the next time the
// instance starts.
if new_instance_state.propolis_id.is_none() {
info!(log,
"instance cleared its Propolis ID, cleaning network config";
"instance_id" => %instance_id,
"propolis_id" => ?prev_instance_state.propolis_id);

clear_instance_networking_state(
datastore,
log,
resolver,
opctx,
opctx_alloc,
authz_instance,
v2p_notification_tx,
)
.await?;
return Ok(());
}

// If the instance still has a migration in progress, don't change
// any networking state until an update arrives that retires that
// migration.
//
// This is needed to avoid the following race:
//
// 1. Migration from S to T completes.
// 2. Migration source sends an update that changes the instance's
// active VMM but leaves the migration ID in place.
// 3. Meanwhile, migration target sends an update that changes the
// instance's active VMM and clears the migration ID.
// 4. The migration target's call updates networking state and commits
// the new instance record.
// 5. The instance migrates from T to T' and Nexus applies networking
// configuration reflecting that the instance is on T'.
// 6. The update in step 2 applies configuration saying the instance
// is on sled T.
if new_instance_state.migration_id.is_some() {
debug!(log,
"instance still has a migration in progress, won't touch \
network config";
"instance_id" => %instance_id,
"migration_id" => ?new_instance_state.migration_id);

return Ok(());
}

let new_propolis_id = new_instance_state.propolis_id.unwrap();

// Updates that end live migration need to push OPTE V2P state even if
// the instance's active sled did not change (see below).
let migration_retired = prev_instance_state.migration_id.is_some()
&& new_instance_state.migration_id.is_none();

if (prev_instance_state.propolis_id == new_instance_state.propolis_id)
&& !migration_retired
{
debug!(log, "instance didn't move, won't touch network config";
"instance_id" => %instance_id);

return Ok(());
}

// Either the instance moved from one sled to another, or it attempted
// to migrate and failed. Ensure the correct networking configuration
// exists for its current home.
//
// TODO(#3107) This is necessary even if the instance didn't move,
// because registering a migration target on a sled creates OPTE ports
// for its VNICs, and that creates new V2P mappings on that sled that
// place the relevant virtual IPs on the local sled. Once OPTE stops
// creating these mappings, this path only needs to be taken if an
// instance has changed sleds.
let new_sled_id = match datastore
.vmm_fetch(&opctx, authz_instance, &new_propolis_id)
.await
{
Ok(vmm) => vmm.sled_id,

// A VMM in the active position should never be destroyed. If the
// sled sending this message is the owner of the instance's last
// active VMM and is destroying it, it should also have retired that
// VMM.
Err(Error::ObjectNotFound { .. }) => {
error!(log, "instance's active vmm unexpectedly not found";
"instance_id" => %instance_id,
"propolis_id" => %new_propolis_id);

return Ok(());
}

Err(e) => return Err(e),
};

if let Err(e) = v2p_notification_tx.send(()) {
error!(
log,
"error notifying background task of v2p change";
"error" => ?e
)
};

let (.., sled) =
LookupPath::new(opctx, datastore).sled_id(new_sled_id).fetch().await?;

instance_ensure_dpd_config(
datastore,
log,
resolver,
opctx,
opctx_alloc,
instance_id,
&sled.address(),
None,
)
.await?;

Ok(())
}
// /// Given old and new instance runtime states, determines the desired
// /// networking configuration for a given instance and ensures it has been
// /// propagated to all relevant sleds.
// ///
// /// # Arguments
// ///
// /// - `datastore`: the datastore to use for lookups and updates.
// /// - `log`: the [`slog::Logger`] to log to.
// /// - `resolver`: an internal DNS resolver to look up DPD service addresses.
// /// - `opctx`: An operation context for this operation.
// /// - `opctx_alloc`: An operational context list permissions for all sleds. When
// /// called by methods on the [`Nexus`] type, this is the `OpContext` used for
// /// instance allocation. In a background task, this may be the background
// /// task's operational context; nothing stops you from passing the same
// /// `OpContext` as both `opctx` and `opctx_alloc`.
// /// - `authz_instance``: A resolved authorization context for the instance of
// /// interest.
// /// - `prev_instance_state``: The most-recently-recorded instance runtime
// /// state for this instance.
// /// - `new_instance_state`: The instance state that the caller of this routine
// /// has observed and that should be used to set up this instance's
// /// networking state.
// ///
// /// # Return value
// ///
// /// `Ok(())` if this routine completed all the operations it wanted to
// /// complete, or an appropriate `Err` otherwise.
// #[allow(clippy::too_many_arguments)] // Yeah, I know, I know, Clippy...
// pub(crate) async fn ensure_updated_instance_network_config(
// datastore: &DataStore,
// log: &slog::Logger,
// resolver: &internal_dns::resolver::Resolver,
// opctx: &OpContext,
// opctx_alloc: &OpContext,
// authz_instance: &authz::Instance,
// prev_instance_state: &db::model::InstanceRuntimeState,
// new_instance_state: &nexus::InstanceRuntimeState,
// v2p_notification_tx: tokio::sync::watch::Sender<()>,
// ) -> Result<(), Error> {
// let instance_id = authz_instance.id();

// // If this instance update is stale, do nothing, since the superseding
// // update may have allowed the instance's location to change further.
// if prev_instance_state.gen >= new_instance_state.gen.into() {
// debug!(log,
// "instance state generation already advanced, \
// won't touch network config";
// "instance_id" => %instance_id);

// return Ok(());
// }

// // If this update will retire the instance's active VMM, delete its
// // networking state. It will be re-established the next time the
// // instance starts.
// if new_instance_state.propolis_id.is_none() {
// info!(log,
// "instance cleared its Propolis ID, cleaning network config";
// "instance_id" => %instance_id,
// "propolis_id" => ?prev_instance_state.propolis_id);

// clear_instance_networking_state(
// datastore,
// log,
// resolver,
// opctx,
// opctx_alloc,
// authz_instance,
// v2p_notification_tx,
// )
// .await?;
// return Ok(());
// }

// // If the instance still has a migration in progress, don't change
// // any networking state until an update arrives that retires that
// // migration.
// //
// // This is needed to avoid the following race:
// //
// // 1. Migration from S to T completes.
// // 2. Migration source sends an update that changes the instance's
// // active VMM but leaves the migration ID in place.
// // 3. Meanwhile, migration target sends an update that changes the
// // instance's active VMM and clears the migration ID.
// // 4. The migration target's call updates networking state and commits
// // the new instance record.
// // 5. The instance migrates from T to T' and Nexus applies networking
// // configuration reflecting that the instance is on T'.
// // 6. The update in step 2 applies configuration saying the instance
// // is on sled T.
// if new_instance_state.migration_id.is_some() {
// debug!(log,
// "instance still has a migration in progress, won't touch \
// network config";
// "instance_id" => %instance_id,
// "migration_id" => ?new_instance_state.migration_id);

// return Ok(());
// }

// let new_propolis_id = new_instance_state.propolis_id.unwrap();

// // Updates that end live migration need to push OPTE V2P state even if
// // the instance's active sled did not change (see below).
// let migration_retired = prev_instance_state.migration_id.is_some()
// && new_instance_state.migration_id.is_none();

// if (prev_instance_state.propolis_id == new_instance_state.propolis_id)
// && !migration_retired
// {
// debug!(log, "instance didn't move, won't touch network config";
// "instance_id" => %instance_id);

// return Ok(());
// }

// // Either the instance moved from one sled to another, or it attempted
// // to migrate and failed. Ensure the correct networking configuration
// // exists for its current home.
// //
// // TODO(#3107) This is necessary even if the instance didn't move,
// // because registering a migration target on a sled creates OPTE ports
// // for its VNICs, and that creates new V2P mappings on that sled that
// // place the relevant virtual IPs on the local sled. Once OPTE stops
// // creating these mappings, this path only needs to be taken if an
// // instance has changed sleds.
// let new_sled_id = match datastore
// .vmm_fetch(&opctx, authz_instance, &new_propolis_id)
// .await
// {
// Ok(vmm) => vmm.sled_id,

// // A VMM in the active position should never be destroyed. If the
// // sled sending this message is the owner of the instance's last
// // active VMM and is destroying it, it should also have retired that
// // VMM.
// Err(Error::ObjectNotFound { .. }) => {
// error!(log, "instance's active vmm unexpectedly not found";
// "instance_id" => %instance_id,
// "propolis_id" => %new_propolis_id);

// return Ok(());
// }

// Err(e) => return Err(e),
// };

// if let Err(e) = v2p_notification_tx.send(()) {
// error!(
// log,
// "error notifying background task of v2p change";
// "error" => ?e
// )
// };

// let (.., sled) =
// LookupPath::new(opctx, datastore).sled_id(new_sled_id).fetch().await?;

// instance_ensure_dpd_config(
// datastore,
// log,
// resolver,
// opctx,
// opctx_alloc,
// instance_id,
// &sled.address(),
// None,
// )
// .await?;

// Ok(())
// }

/// Ensures that the Dendrite configuration for the supplied instance is
/// up-to-date.
Expand Down
16 changes: 7 additions & 9 deletions nexus/src/app/sagas/instance_update/destroyed.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ use nexus_db_model::Vmm;
use nexus_db_queries::authn;
use nexus_db_queries::authz;
use nexus_db_queries::db::datastore::InstanceAndVmms;
use nexus_db_queries::db::identity::Resource;
use omicron_common::api::external;
use omicron_common::api::external::Error;
use omicron_common::api::external::InstanceState;
Expand Down Expand Up @@ -154,15 +153,14 @@ async fn siud_release_sled_resources(
async fn siud_release_virtual_provisioning(
sagactx: NexusActionContext,
) -> Result<(), ActionError> {
let Some((instance, vmm)) = get_destroyed_vmm(&sagactx)? else {
// if the update we are handling is not an active VMM destroyed update,
// bail --- there's nothing to do here.
return Ok(());
};

let osagactx = sagactx.user_data();
let Params { ref serialized_authn, ref authz_instance, vmm_id, .. } =
sagactx.saga_params::<Params>()?;
let Params {
ref serialized_authn,
ref authz_instance,
vmm_id,
instance,
..
} = sagactx.saga_params::<Params>()?;

let opctx =
crate::context::op_context_for_saga_action(&sagactx, serialized_authn);
Expand Down
Loading

0 comments on commit 7b7dd98

Please sign in to comment.