From 51d779ff7b87fc7f1f45099509a9424a0054faea Mon Sep 17 00:00:00 2001 From: Rain Date: Tue, 20 Aug 2024 13:16:37 -0700 Subject: [PATCH 1/4] [spr] changes to main this commit is based on Created using spr 1.3.6-beta.1 [skip ci] --- .../execution/src/cockroachdb.rs | 22 ++-- nexus/reconfigurator/execution/src/dns.rs | 106 +++++++++--------- nexus/reconfigurator/execution/src/lib.rs | 28 +++-- .../execution/src/omicron_physical_disks.rs | 51 ++++++--- .../background/tasks/blueprint_execution.rs | 7 +- 5 files changed, 130 insertions(+), 84 deletions(-) diff --git a/nexus/reconfigurator/execution/src/cockroachdb.rs b/nexus/reconfigurator/execution/src/cockroachdb.rs index 277f5f91c4..01baebfb57 100644 --- a/nexus/reconfigurator/execution/src/cockroachdb.rs +++ b/nexus/reconfigurator/execution/src/cockroachdb.rs @@ -34,6 +34,7 @@ pub(crate) async fn ensure_settings( mod test { use super::*; use crate::overridables::Overridables; + use crate::RealizeBlueprintOutput; use nexus_db_queries::authn; use nexus_db_queries::authz; use nexus_test_utils_macros::nexus_test; @@ -97,16 +98,17 @@ mod test { .await; // Execute the initial blueprint. let overrides = Overridables::for_test(cptestctx); - crate::realize_blueprint_with_overrides( - &opctx, - datastore, - resolver, - &blueprint, - Uuid::new_v4(), - &overrides, - ) - .await - .expect("failed to execute initial blueprint"); + let _: RealizeBlueprintOutput = + crate::realize_blueprint_with_overrides( + &opctx, + datastore, + resolver, + &blueprint, + Uuid::new_v4(), + &overrides, + ) + .await + .expect("failed to execute initial blueprint"); // The CockroachDB settings should not have changed. assert_eq!( settings, diff --git a/nexus/reconfigurator/execution/src/dns.rs b/nexus/reconfigurator/execution/src/dns.rs index 4395944b25..9ca14f8e24 100644 --- a/nexus/reconfigurator/execution/src/dns.rs +++ b/nexus/reconfigurator/execution/src/dns.rs @@ -458,6 +458,7 @@ pub fn blueprint_nexus_external_ips(blueprint: &Blueprint) -> Vec { mod test { use super::*; use crate::overridables::Overridables; + use crate::RealizeBlueprintOutput; use crate::Sled; use dns_service_client::DnsDiff; use internal_dns::config::Host; @@ -1245,16 +1246,17 @@ mod test { // Now, execute the initial blueprint. let overrides = Overridables::for_test(cptestctx); - crate::realize_blueprint_with_overrides( - &opctx, - datastore, - resolver, - &blueprint, - Uuid::new_v4(), - &overrides, - ) - .await - .expect("failed to execute initial blueprint"); + let _: RealizeBlueprintOutput = + crate::realize_blueprint_with_overrides( + &opctx, + datastore, + resolver, + &blueprint, + Uuid::new_v4(), + &overrides, + ) + .await + .expect("failed to execute initial blueprint"); // DNS ought not to have changed. verify_dns_unchanged( @@ -1385,16 +1387,17 @@ mod test { .await .expect("failed to set blueprint as target"); - crate::realize_blueprint_with_overrides( - &opctx, - datastore, - resolver, - &blueprint2, - Uuid::new_v4(), - &overrides, - ) - .await - .expect("failed to execute second blueprint"); + let _: RealizeBlueprintOutput = + crate::realize_blueprint_with_overrides( + &opctx, + datastore, + resolver, + &blueprint2, + Uuid::new_v4(), + &overrides, + ) + .await + .expect("failed to execute second blueprint"); // Now fetch DNS again. Both should have changed this time. let dns_latest_internal = datastore @@ -1459,16 +1462,17 @@ mod test { } // If we execute it again, we should see no more changes. - crate::realize_blueprint_with_overrides( - &opctx, - datastore, - resolver, - &blueprint2, - Uuid::new_v4(), - &overrides, - ) - .await - .expect("failed to execute second blueprint again"); + let _: RealizeBlueprintOutput = + crate::realize_blueprint_with_overrides( + &opctx, + datastore, + resolver, + &blueprint2, + Uuid::new_v4(), + &overrides, + ) + .await + .expect("failed to execute second blueprint again"); verify_dns_unchanged( &opctx, datastore, @@ -1495,16 +1499,17 @@ mod test { // One more time, make sure that executing the blueprint does not do // anything. - crate::realize_blueprint_with_overrides( - &opctx, - datastore, - resolver, - &blueprint2, - Uuid::new_v4(), - &overrides, - ) - .await - .expect("failed to execute second blueprint again"); + let _: RealizeBlueprintOutput = + crate::realize_blueprint_with_overrides( + &opctx, + datastore, + resolver, + &blueprint2, + Uuid::new_v4(), + &overrides, + ) + .await + .expect("failed to execute second blueprint again"); verify_dns_unchanged( &opctx, datastore, @@ -1589,16 +1594,17 @@ mod test { ); // If we execute the blueprint, DNS should not be changed. - crate::realize_blueprint_with_overrides( - &opctx, - datastore, - resolver, - &blueprint, - Uuid::new_v4(), - &overrides, - ) - .await - .expect("failed to execute blueprint"); + let _: RealizeBlueprintOutput = + crate::realize_blueprint_with_overrides( + &opctx, + datastore, + resolver, + &blueprint, + Uuid::new_v4(), + &overrides, + ) + .await + .expect("failed to execute blueprint"); let dns_latest_internal = datastore .dns_config_read(&opctx, DnsGroup::Internal) .await diff --git a/nexus/reconfigurator/execution/src/lib.rs b/nexus/reconfigurator/execution/src/lib.rs index 8606187762..2c70c7acbb 100644 --- a/nexus/reconfigurator/execution/src/lib.rs +++ b/nexus/reconfigurator/execution/src/lib.rs @@ -70,6 +70,15 @@ impl From for Sled { } } +/// The result of calling [`realize_blueprint`] or +/// [`realize_blueprint_with_overrides`]. +#[derive(Debug)] +#[must_use = "the output of realize_blueprint should probably be used"] +pub struct RealizeBlueprintOutput { + /// Whether any sagas need to be reassigned to a new Nexus. + pub needs_saga_recovery: bool, +} + /// Make one attempt to realize the given blueprint, meaning to take actions to /// alter the real system to match the blueprint /// @@ -81,7 +90,7 @@ pub async fn realize_blueprint( resolver: &Resolver, blueprint: &Blueprint, nexus_id: Uuid, -) -> Result> { +) -> Result> { realize_blueprint_with_overrides( opctx, datastore, @@ -100,7 +109,7 @@ pub async fn realize_blueprint_with_overrides( blueprint: &Blueprint, nexus_id: Uuid, overrides: &Overridables, -) -> Result> { +) -> Result> { let opctx = opctx.child(BTreeMap::from([( "comment".to_string(), blueprint.comment.clone(), @@ -132,7 +141,7 @@ pub async fn realize_blueprint_with_overrides( }) .collect(); - omicron_physical_disks::deploy_disks( + let deploy_disks_done = omicron_physical_disks::deploy_disks( &opctx, &sleds_by_id, &blueprint.blueprint_disks, @@ -205,11 +214,12 @@ pub async fn realize_blueprint_with_overrides( ) .await?; - // This depends on the "deploy_disks" call earlier -- disk expungement is a - // statement of policy, but we need to be assured that the Sled Agent has - // stopped using that disk before we can mark its state as decommissioned. - omicron_physical_disks::decommission_expunged_disks(&opctx, datastore) - .await?; + omicron_physical_disks::decommission_expunged_disks( + &opctx, + datastore, + deploy_disks_done, + ) + .await?; // From this point on, we'll assume that any errors that we encounter do // *not* require stopping execution. We'll just accumulate them and return @@ -244,7 +254,7 @@ pub async fn realize_blueprint_with_overrides( } if errors.is_empty() { - Ok(needs_saga_recovery) + Ok(RealizeBlueprintOutput { needs_saga_recovery }) } else { Err(errors) } diff --git a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs index 7adc41213e..af95eb8e77 100644 --- a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs +++ b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs @@ -25,7 +25,7 @@ pub(crate) async fn deploy_disks( opctx: &OpContext, sleds_by_id: &BTreeMap, sled_configs: &BTreeMap, -) -> Result<(), Vec> { +) -> Result> { let errors: Vec<_> = stream::iter(sled_configs) .filter_map(|(sled_id, config)| async move { let log = opctx.log.new(o!( @@ -92,16 +92,26 @@ pub(crate) async fn deploy_disks( .await; if errors.is_empty() { - Ok(()) + Ok(DeployDisksDone {}) } else { Err(errors) } } -/// Decommissions all disks which are currently expunged +/// Typestate indicating that the deploy disks step was performed. +#[derive(Debug)] +#[must_use = "this should be passed into decommission_expunged_disks"] +pub(crate) struct DeployDisksDone {} + +/// Decommissions all disks which are currently expunged. pub(crate) async fn decommission_expunged_disks( opctx: &OpContext, datastore: &DataStore, + // This is taken as a parameter to ensure that this depends on a + // "deploy_disks" call made earlier. Disk expungement is a statement of + // policy, but we need to be assured that the Sled Agent has stopped using + // that disk before we can mark its state as decommissioned. + _deploy_disks_done: DeployDisksDone, ) -> Result<(), Vec> { datastore .physical_disk_decommission_all_expunged(&opctx) @@ -113,6 +123,7 @@ pub(crate) async fn decommission_expunged_disks( #[cfg(test)] mod test { use super::deploy_disks; + use super::DeployDisksDone; use crate::DataStore; use crate::Sled; @@ -217,9 +228,13 @@ mod test { // Get a success result back when the blueprint has an empty set of // disks. let (_, blueprint) = create_blueprint(BTreeMap::new()); - deploy_disks(&opctx, &sleds_by_id, &blueprint.blueprint_disks) - .await - .expect("failed to deploy no disks"); + // Use an explicit type here because not doing so can cause errors to + // be ignored (this behavior is genuinely terrible). Instead, ensure + // that the type has the right result. + let _: DeployDisksDone = + deploy_disks(&opctx, &sleds_by_id, &blueprint.blueprint_disks) + .await + .expect("failed to deploy no disks"); // Disks are updated in a particular order, but each request contains // the full set of disks that must be running. @@ -272,9 +287,10 @@ mod test { } // Execute it. - deploy_disks(&opctx, &sleds_by_id, &blueprint.blueprint_disks) - .await - .expect("failed to deploy initial disks"); + let _: DeployDisksDone = + deploy_disks(&opctx, &sleds_by_id, &blueprint.blueprint_disks) + .await + .expect("failed to deploy initial disks"); s1.verify_and_clear(); s2.verify_and_clear(); @@ -293,9 +309,10 @@ mod test { )), ); } - deploy_disks(&opctx, &sleds_by_id, &blueprint.blueprint_disks) - .await - .expect("failed to deploy same disks"); + let _: DeployDisksDone = + deploy_disks(&opctx, &sleds_by_id, &blueprint.blueprint_disks) + .await + .expect("failed to deploy same disks"); s1.verify_and_clear(); s2.verify_and_clear(); @@ -567,7 +584,15 @@ mod test { assert_eq!(d.disk_state, PhysicalDiskState::Active); assert_eq!(d.disk_policy, PhysicalDiskPolicy::InService); - super::decommission_expunged_disks(&opctx, &datastore).await.unwrap(); + super::decommission_expunged_disks( + &opctx, + &datastore, + // This is an internal test, and we're testing decommissioning in + // isolation, so it's okay to create the typestate here. + DeployDisksDone {}, + ) + .await + .unwrap(); // After decommissioning, we see the expunged disk become // decommissioned. The other disk remains in-service. diff --git a/nexus/src/app/background/tasks/blueprint_execution.rs b/nexus/src/app/background/tasks/blueprint_execution.rs index b430270ec9..44d1e94675 100644 --- a/nexus/src/app/background/tasks/blueprint_execution.rs +++ b/nexus/src/app/background/tasks/blueprint_execution.rs @@ -101,12 +101,15 @@ impl BlueprintExecutor { // If executing the blueprint requires activating the saga recovery // background task, do that now. info!(&opctx.log, "activating saga recovery task"); - if let Ok(true) = result { - self.saga_recovery.activate(); + if let Ok(output) = &result { + if output.needs_saga_recovery { + self.saga_recovery.activate(); + } } // Return the result as a `serde_json::Value` match result { + // TODO: should we serialize the output above as a JSON object? Ok(_) => json!({}), Err(errors) => { let errors: Vec<_> = From 88e0da5f49f04132b5b1d53e13ec8461ac64c798 Mon Sep 17 00:00:00 2001 From: Rain Date: Mon, 26 Aug 2024 22:33:07 -0700 Subject: [PATCH 2/4] [spr] changes introduced through rebase Created using spr 1.3.6-beta.1 [skip ci] --- dev-tools/omdb/src/bin/omdb/nexus.rs | 56 ++++++++++++++++++ nexus/reconfigurator/execution/src/dns.rs | 6 +- .../background/tasks/blueprint_execution.rs | 8 ++- .../app/background/tasks/blueprint_load.rs | 9 ++- .../src/display/line_display_shared.rs | 18 +++--- update-engine/src/display/mod.rs | 3 + update-engine/src/display/utils.rs | 58 +++++++++++++++++++ wicket/src/ui/panes/update.rs | 25 +++++--- 8 files changed, 160 insertions(+), 23 deletions(-) create mode 100644 update-engine/src/display/utils.rs diff --git a/dev-tools/omdb/src/bin/omdb/nexus.rs b/dev-tools/omdb/src/bin/omdb/nexus.rs index d45865b4a7..5af75fac8f 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus.rs @@ -1556,6 +1556,62 @@ fn print_task_details(bgtask: &BackgroundTask, details: &serde_json::Value) { } } } + } else if name == "blueprint_loader" { + #[derive(Deserialize)] + struct BlueprintLoaderStatus { + target_id: Uuid, + time_created: DateTime, + status: String, + enabled: bool, + } + + match serde_json::from_value::(details.clone()) { + Err(error) => eprintln!( + "warning: failed to interpret task details: {:?}: {:?}", + error, details + ), + Ok(status) => { + println!(" target blueprint: {}", status.target_id); + println!( + " execution: {}", + if status.enabled { "enabled" } else { "disabled" } + ); + println!( + " created at: {}", + humantime::format_rfc3339_millis( + status.time_created.into() + ) + ); + println!(" status: {}", status.status); + } + } + } else if name == "blueprint_executor" { + #[derive(Deserialize)] + struct BlueprintExecutorStatus { + target_id: Uuid, + enabled: bool, + errors: Option>, + } + + match serde_json::from_value::(details.clone()) + { + Err(error) => eprintln!( + "warning: failed to interpret task details: {:?}: {:?}", + error, details + ), + Ok(status) => { + println!(" target blueprint: {}", status.target_id); + println!( + " execution: {}", + if status.enabled { "enabled" } else { "disabled" } + ); + let errors = status.errors.as_deref().unwrap_or(&[]); + println!(" errors: {}", errors.len()); + for (i, e) in errors.iter().enumerate() { + println!(" error {}: {}", i, e); + } + } + } } else { println!( "warning: unknown background task: {:?} \ diff --git a/nexus/reconfigurator/execution/src/dns.rs b/nexus/reconfigurator/execution/src/dns.rs index 9531843259..9ab84e15ff 100644 --- a/nexus/reconfigurator/execution/src/dns.rs +++ b/nexus/reconfigurator/execution/src/dns.rs @@ -1665,7 +1665,11 @@ mod test { // If we execute it again, we should see no more changes. _ = realize_blueprint_and_expect( - &opctx, datastore, resolver, &blueprint, &overrides, + &opctx, + datastore, + resolver, + &blueprint2, + &overrides, ) .await; verify_dns_unchanged( diff --git a/nexus/src/app/background/tasks/blueprint_execution.rs b/nexus/src/app/background/tasks/blueprint_execution.rs index dbbfcb3b14..2b1e3eedca 100644 --- a/nexus/src/app/background/tasks/blueprint_execution.rs +++ b/nexus/src/app/background/tasks/blueprint_execution.rs @@ -83,7 +83,7 @@ impl BlueprintExecutor { "target_id" => %blueprint.id); return json!({ "target_id": blueprint.id.to_string(), - "error": "blueprint disabled" + "enabled": false, }); } @@ -111,6 +111,7 @@ impl BlueprintExecutor { json!({ "target_id": blueprint.id.to_string(), + "enabled": true, "needs_saga_recovery": needs_saga_recovery, }) } @@ -119,6 +120,7 @@ impl BlueprintExecutor { errors.into_iter().map(|e| format!("{:#}", e)).collect(); json!({ "target_id": blueprint.id.to_string(), + "enabled": true, "errors": errors }) } @@ -316,6 +318,7 @@ mod test { value, json!({ "target_id": blueprint_id, + "enabled": true, "needs_saga_recovery": false, }) ); @@ -410,6 +413,7 @@ mod test { value, json!({ "target_id": blueprint.1.id.to_string(), + "enabled": true, "needs_saga_recovery": false, }) ); @@ -427,7 +431,7 @@ mod test { assert_eq!( value, json!({ - "error": "blueprint disabled", + "enabled": false, "target_id": blueprint.1.id.to_string() }) ); diff --git a/nexus/src/app/background/tasks/blueprint_load.rs b/nexus/src/app/background/tasks/blueprint_load.rs index 31bc00441d..70fcf713bc 100644 --- a/nexus/src/app/background/tasks/blueprint_load.rs +++ b/nexus/src/app/background/tasks/blueprint_load.rs @@ -78,6 +78,7 @@ impl BackgroundTask for TargetBlueprintLoader { }; // Decide what to do with the new blueprint + let enabled = new_bp_target.enabled; let Some((old_bp_target, old_blueprint)) = self.last.as_deref() else { // We've found a target blueprint for the first time. @@ -97,6 +98,7 @@ impl BackgroundTask for TargetBlueprintLoader { "time_created": time_created, "time_found": chrono::Utc::now(), "status": "first target blueprint", + "enabled": enabled, }); }; @@ -116,7 +118,8 @@ impl BackgroundTask for TargetBlueprintLoader { "target_id": target_id, "time_created": time_created, "time_found": chrono::Utc::now(), - "status": "target blueprint updated" + "status": "target blueprint updated", + "enabled": enabled, }) } else { // The new target id matches the old target id @@ -159,6 +162,7 @@ impl BackgroundTask for TargetBlueprintLoader { "time_created": time_created, "time_found": chrono::Utc::now(), "status": format!("target blueprint {status}"), + "enabled": enabled, }) } else { // We found a new target blueprint that exactly @@ -173,7 +177,8 @@ impl BackgroundTask for TargetBlueprintLoader { json!({ "target_id": target_id, "time_created": time_created, - "status": "target blueprint unchanged" + "status": "target blueprint unchanged", + "enabled": enabled, }) } } diff --git a/update-engine/src/display/line_display_shared.rs b/update-engine/src/display/line_display_shared.rs index 99b03b13f7..99d66bd06f 100644 --- a/update-engine/src/display/line_display_shared.rs +++ b/update-engine/src/display/line_display_shared.rs @@ -16,6 +16,7 @@ use owo_colors::OwoColorize; use swrite::{swrite, SWrite as _}; use crate::{ + display::StepIndexDisplay, events::{ ProgressCounter, ProgressEvent, ProgressEventKind, StepEvent, StepEventKind, StepInfo, StepOutcome, @@ -716,17 +717,16 @@ impl LineDisplayFormatter { ) { ld_step_info.nest_data.add_prefix(line); - // Print out "/)". Leave space such that we - // print out e.g. "1/8)" and " 3/14)". - // Add 1 to the index to make it 1-based. - let step_index = ld_step_info.step_info.index + 1; - let step_index_width = ld_step_info.total_steps.to_string().len(); + // Print out "(/)" in a padded way, so that successive + // steps are vertically aligned. swrite!( line, - "{:width$}/{:width$}) ", - step_index, - ld_step_info.total_steps, - width = step_index_width + "({}) ", + StepIndexDisplay::new( + ld_step_info.step_info.index, + ld_step_info.total_steps + ) + .padded(true), ); swrite!( diff --git a/update-engine/src/display/mod.rs b/update-engine/src/display/mod.rs index c58a4535a0..f6775dd37b 100644 --- a/update-engine/src/display/mod.rs +++ b/update-engine/src/display/mod.rs @@ -11,11 +11,14 @@ //! * [`LineDisplay`]: a line-oriented display suitable for the command line. //! * [`GroupDisplay`]: manages state and shows the results of several //! [`LineDisplay`]s at once. +//! * Some utility displayers which can be used to build custom displayers. mod group_display; mod line_display; mod line_display_shared; +mod utils; pub use group_display::GroupDisplay; pub use line_display::{LineDisplay, LineDisplayStyles}; use line_display_shared::*; +pub use utils::*; diff --git a/update-engine/src/display/utils.rs b/update-engine/src/display/utils.rs new file mode 100644 index 0000000000..0a03a09fa1 --- /dev/null +++ b/update-engine/src/display/utils.rs @@ -0,0 +1,58 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Utility displayers. + +use std::fmt; + +/// Given an index and a count of total steps, displays `{current}/{total}`. +/// +/// Here: +/// +/// * `current` is `index + 1`. +/// * If `padded` is `true`, `current` is right-aligned and padded with spaces +/// to the width of `total`. +/// +/// # Examples +/// +/// ``` +/// use update_engine::display::StepIndexDisplay; +/// +/// let display = StepIndexDisplay::new(0, 8); +/// assert_eq!(display.to_string(), "1/8"); +/// let display = StepIndexDisplay::new(82, 230); +/// assert_eq!(display.to_string(), "83/230"); +/// let display = display.padded(true); +/// assert_eq!(display.to_string(), " 83/230"); +/// ``` +#[derive(Debug)] +pub struct StepIndexDisplay { + index: usize, + total: usize, + padded: bool, +} + +impl StepIndexDisplay { + /// Create a new `StepIndexDisplay`. + /// + /// The index is 0-based (i.e. 1 is added to it when it is displayed). + pub fn new(index: usize, total: usize) -> Self { + Self { index, total, padded: false } + } + + pub fn padded(self, padded: bool) -> Self { + Self { padded, ..self } + } +} + +impl fmt::Display for StepIndexDisplay { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.padded { + let width = self.total.to_string().len(); + write!(f, "{:>width$}/{}", self.index + 1, self.total) + } else { + write!(f, "{}/{}", self.index + 1, self.total) + } + } +} diff --git a/wicket/src/ui/panes/update.rs b/wicket/src/ui/panes/update.rs index 3a61e25a3a..96a55667fe 100644 --- a/wicket/src/ui/panes/update.rs +++ b/wicket/src/ui/panes/update.rs @@ -29,6 +29,7 @@ use ratatui::widgets::{ use ratatui::Frame; use slog::{info, o, Logger}; use tui_tree_widget::{Tree, TreeItem, TreeState}; +use update_engine::display::StepIndexDisplay; use update_engine::{ AbortReason, CompletionReason, ExecutionStatus, FailureReason, StepKey, TerminalKind, WillNotBeRunReason, @@ -1984,9 +1985,11 @@ impl ComponentUpdateListState { )); status_text.push(Span::styled( format!( - " (step {}/{})", - step_key.index + 1, - summary.total_steps, + " (step {})", + StepIndexDisplay::new( + step_key.index, + summary.total_steps, + ) ), style::plain_text(), )); @@ -2015,9 +2018,11 @@ impl ComponentUpdateListState { )); status_text.push(Span::styled( format!( - " at step {}/{}", - info.step_key.index + 1, - summary.total_steps, + " at step {}", + StepIndexDisplay::new( + info.step_key.index, + summary.total_steps, + ) ), style::plain_text(), )); @@ -2033,9 +2038,11 @@ impl ComponentUpdateListState { )); status_text.push(Span::styled( format!( - " at step {}/{}", - info.step_key.index + 1, - summary.total_steps, + " at step {}", + StepIndexDisplay::new( + info.step_key.index, + summary.total_steps, + ) ), style::plain_text(), )); From 012cc526cdda1b7d68a7c05e8f40109b45eb7527 Mon Sep 17 00:00:00 2001 From: Rain Date: Mon, 26 Aug 2024 22:35:17 -0700 Subject: [PATCH 3/4] remove a dead file Created using spr 1.3.6-beta.1 --- update-engine/src/display/simple_display.rs | 62 --------------------- 1 file changed, 62 deletions(-) delete mode 100644 update-engine/src/display/simple_display.rs diff --git a/update-engine/src/display/simple_display.rs b/update-engine/src/display/simple_display.rs deleted file mode 100644 index 0b45ff2889..0000000000 --- a/update-engine/src/display/simple_display.rs +++ /dev/null @@ -1,62 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -use std::fmt; - -use crate::{EventBuffer, ExecutionStatus, StepSpec, TerminalKind}; - -/// Display the root execution status of an event buffer. -#[derive(Debug)] -pub struct RootExecutionDisplay<'a, S: StepSpec> { - buffer: &'a EventBuffer, - // TODO: implement color -} - -impl<'a, S: StepSpec> fmt::Display for RootExecutionDisplay<'a, S> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let Some(summary) = self.buffer.root_execution_summary() else { - return write!(f, "(event buffer is empty)"); - }; - let steps_width = summary.total_steps.to_string().len(); - - match summary.execution_status { - ExecutionStatus::NotStarted => write!(f, "not started"), - ExecutionStatus::Running { step_key, .. } => { - let step_data = self.buffer.get(&step_key).unwrap(); - write!( - f, - "running: {} (step {:>steps_width$}/{})", - step_data.step_info().description, - step_key.index + 1, - summary.total_steps - ) - } - ExecutionStatus::Terminal(info) => { - let step_data = - self.buffer.get(&info.step_key).expect("step exists"); - match info.kind { - TerminalKind::Completed => write!(f, "completed")?, - TerminalKind::Failed => { - write!( - f, - "failed at: {} (step {:>steps_width$}/{})", - step_data.step_info().description, - info.step_key.index + 1, - summary.total_steps - )?; - } - TerminalKind::Aborted => write!( - f, - "aborted at: {} (step {:>steps_width$}/{})", - step_data.step_info().description, - info.step_key.index + 1, - summary.total_steps - )?, - } - - Ok(()) - } - } - } -} From 931a6ef805e19949d8d2f74fbba2375aa1ad1bcc Mon Sep 17 00:00:00 2001 From: Rain Date: Fri, 6 Sep 2024 15:53:25 -0700 Subject: [PATCH 4/4] unused Created using spr 1.3.6-beta.1 --- wicketd/src/preflight_check/uplink.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/wicketd/src/preflight_check/uplink.rs b/wicketd/src/preflight_check/uplink.rs index ff1a33b0e6..da30cd0199 100644 --- a/wicketd/src/preflight_check/uplink.rs +++ b/wicketd/src/preflight_check/uplink.rs @@ -39,7 +39,6 @@ use std::sync::Mutex; use std::time::Duration; use std::time::Instant; use tokio::process::Command; -use tokio::sync::mpsc; use wicket_common::preflight_check::EventBuffer; use wicket_common::preflight_check::StepContext; use wicket_common::preflight_check::StepProgress;