Skip to content

Commit

Permalink
okay, i think this is the migration part
Browse files Browse the repository at this point in the history
  • Loading branch information
hawkw committed Jun 21, 2024
1 parent 394b442 commit ca199fc
Showing 5 changed files with 146 additions and 244 deletions.
6 changes: 6 additions & 0 deletions nexus/db-model/src/migration.rs
Original file line number Diff line number Diff line change
@@ -84,6 +84,12 @@ impl Migration {
}
}

/// Returns `true` if either side reports that the migration is in a
/// terminal state.
pub fn is_terminal(&self) -> bool {
self.source_state.is_terminal() || self.target_state.is_terminal()
}

/// Returns `true` if either side of the migration has failed.
pub fn either_side_failed(&self) -> bool {
self.source_state == MigrationState::FAILED
4 changes: 4 additions & 0 deletions nexus/db-model/src/migration_state.rs
Original file line number Diff line number Diff line change
@@ -32,6 +32,10 @@ impl MigrationState {
MigrationState(nexus::MigrationState::Completed);
pub const FAILED: MigrationState =
MigrationState(nexus::MigrationState::Failed);
pub const PENDING: MigrationState =
MigrationState(nexus::MigrationState::Pending);
pub const IN_PROGRESS: MigrationState =
MigrationState(nexus::MigrationState::InProgress);

/// Returns `true` if this migration state means that the migration is no
/// longer in progress (it has either succeeded or failed).
175 changes: 0 additions & 175 deletions nexus/src/app/instance_network.rs
Original file line number Diff line number Diff line change
@@ -227,181 +227,6 @@ pub(crate) async fn boundary_switches(
Ok(boundary_switches)
}

// /// Given old and new instance runtime states, determines the desired
// /// networking configuration for a given instance and ensures it has been
// /// propagated to all relevant sleds.
// ///
// /// # Arguments
// ///
// /// - `datastore`: the datastore to use for lookups and updates.
// /// - `log`: the [`slog::Logger`] to log to.
// /// - `resolver`: an internal DNS resolver to look up DPD service addresses.
// /// - `opctx`: An operation context for this operation.
// /// - `opctx_alloc`: An operational context list permissions for all sleds. When
// /// called by methods on the [`Nexus`] type, this is the `OpContext` used for
// /// instance allocation. In a background task, this may be the background
// /// task's operational context; nothing stops you from passing the same
// /// `OpContext` as both `opctx` and `opctx_alloc`.
// /// - `authz_instance``: A resolved authorization context for the instance of
// /// interest.
// /// - `prev_instance_state``: The most-recently-recorded instance runtime
// /// state for this instance.
// /// - `new_instance_state`: The instance state that the caller of this routine
// /// has observed and that should be used to set up this instance's
// /// networking state.
// ///
// /// # Return value
// ///
// /// `Ok(())` if this routine completed all the operations it wanted to
// /// complete, or an appropriate `Err` otherwise.
// #[allow(clippy::too_many_arguments)] // Yeah, I know, I know, Clippy...
// pub(crate) async fn ensure_updated_instance_network_config(
// datastore: &DataStore,
// log: &slog::Logger,
// resolver: &internal_dns::resolver::Resolver,
// opctx: &OpContext,
// opctx_alloc: &OpContext,
// authz_instance: &authz::Instance,
// prev_instance_state: &db::model::InstanceRuntimeState,
// new_instance_state: &nexus::InstanceRuntimeState,
// v2p_notification_tx: tokio::sync::watch::Sender<()>,
// ) -> Result<(), Error> {
// let instance_id = authz_instance.id();

// // If this instance update is stale, do nothing, since the superseding
// // update may have allowed the instance's location to change further.
// if prev_instance_state.gen >= new_instance_state.gen.into() {
// debug!(log,
// "instance state generation already advanced, \
// won't touch network config";
// "instance_id" => %instance_id);

// return Ok(());
// }

// // If this update will retire the instance's active VMM, delete its
// // networking state. It will be re-established the next time the
// // instance starts.
// if new_instance_state.propolis_id.is_none() {
// info!(log,
// "instance cleared its Propolis ID, cleaning network config";
// "instance_id" => %instance_id,
// "propolis_id" => ?prev_instance_state.propolis_id);

// clear_instance_networking_state(
// datastore,
// log,
// resolver,
// opctx,
// opctx_alloc,
// authz_instance,
// v2p_notification_tx,
// )
// .await?;
// return Ok(());
// }

// // If the instance still has a migration in progress, don't change
// // any networking state until an update arrives that retires that
// // migration.
// //
// // This is needed to avoid the following race:
// //
// // 1. Migration from S to T completes.
// // 2. Migration source sends an update that changes the instance's
// // active VMM but leaves the migration ID in place.
// // 3. Meanwhile, migration target sends an update that changes the
// // instance's active VMM and clears the migration ID.
// // 4. The migration target's call updates networking state and commits
// // the new instance record.
// // 5. The instance migrates from T to T' and Nexus applies networking
// // configuration reflecting that the instance is on T'.
// // 6. The update in step 2 applies configuration saying the instance
// // is on sled T.
// if new_instance_state.migration_id.is_some() {
// debug!(log,
// "instance still has a migration in progress, won't touch \
// network config";
// "instance_id" => %instance_id,
// "migration_id" => ?new_instance_state.migration_id);

// return Ok(());
// }

// let new_propolis_id = new_instance_state.propolis_id.unwrap();

// // Updates that end live migration need to push OPTE V2P state even if
// // the instance's active sled did not change (see below).
// let migration_retired = prev_instance_state.migration_id.is_some()
// && new_instance_state.migration_id.is_none();

// if (prev_instance_state.propolis_id
// == new_instance_state.propolis_id.map(GenericUuid::into_untyped_uuid))
// && !migration_retired
// {
// debug!(log, "instance didn't move, won't touch network config";
// "instance_id" => %instance_id);

// return Ok(());
// }

// // Either the instance moved from one sled to another, or it attempted
// // to migrate and failed. Ensure the correct networking configuration
// // exists for its current home.
// //
// // TODO(#3107) This is necessary even if the instance didn't move,
// // because registering a migration target on a sled creates OPTE ports
// // for its VNICs, and that creates new V2P mappings on that sled that
// // place the relevant virtual IPs on the local sled. Once OPTE stops
// // creating these mappings, this path only needs to be taken if an
// // instance has changed sleds.
// let new_sled_id = match datastore
// .vmm_fetch(&opctx, authz_instance, &new_propolis_id)
// .await
// {
// Ok(vmm) => vmm.sled_id,

// // A VMM in the active position should never be destroyed. If the
// // sled sending this message is the owner of the instance's last
// // active VMM and is destroying it, it should also have retired that
// // VMM.
// Err(Error::ObjectNotFound { .. }) => {
// error!(log, "instance's active vmm unexpectedly not found";
// "instance_id" => %instance_id,
// "propolis_id" => %new_propolis_id);

// return Ok(());
// }

// Err(e) => return Err(e),
// };

// if let Err(e) = v2p_notification_tx.send(()) {
// error!(
// log,
// "error notifying background task of v2p change";
// "error" => ?e
// )
// };

// let (.., sled) =
// LookupPath::new(opctx, datastore).sled_id(new_sled_id).fetch().await?;

// instance_ensure_dpd_config(
// datastore,
// log,
// resolver,
// opctx,
// opctx_alloc,
// instance_id,
// &sled.address(),
// None,
// )
// .await?;

// Ok(())
// }

/// Ensures that the Dendrite configuration for the supplied instance is
/// up-to-date.
///
Loading

0 comments on commit ca199fc

Please sign in to comment.