From d313f7f7f3faba5477bc009e386f8815a74f41e7 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Wed, 14 Feb 2024 21:25:57 +0000 Subject: [PATCH 01/12] Update Rust crate strum to 0.26 (#4915) --- Cargo.lock | 23 ++++++++++++----------- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 2 ++ 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index caddb6d8ea..708447b644 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4341,7 +4341,7 @@ dependencies = [ "serde_json", "sled-agent-client", "steno", - "strum 0.25.0", + "strum 0.26.1", "thiserror", "uuid", ] @@ -4413,7 +4413,7 @@ dependencies = [ "slog", "static_assertions", "steno", - "strum 0.25.0", + "strum 0.26.1", "subprocess", "swrite", "term", @@ -4476,7 +4476,7 @@ dependencies = [ "serde_json", "sled-agent-client", "slog", - "strum 0.25.0", + "strum 0.26.1", "thiserror", "tokio", "uuid", @@ -4565,7 +4565,7 @@ dependencies = [ "serde_json", "sled-agent-client", "steno", - "strum 0.25.0", + "strum 0.26.1", "thiserror", "uuid", ] @@ -4882,7 +4882,7 @@ dependencies = [ "serde_urlencoded", "serde_with", "slog", - "strum 0.25.0", + "strum 0.26.1", "test-strategy", "thiserror", "tokio", @@ -5070,7 +5070,7 @@ dependencies = [ "slog-term", "sp-sim", "steno", - "strum 0.25.0", + "strum 0.26.1", "subprocess", "tempfile", "term", @@ -5128,7 +5128,7 @@ dependencies = [ "sled-agent-client", "slog", "slog-error-chain", - "strum 0.25.0", + "strum 0.26.1", "subprocess", "tabled", "textwrap 0.16.0", @@ -5163,7 +5163,7 @@ dependencies = [ "slog-bunyan", "slog-term", "smf", - "strum 0.25.0", + "strum 0.26.1", "swrite", "tar", "thiserror", @@ -5412,6 +5412,7 @@ dependencies = [ "socket2 0.5.5", "spin 0.9.8", "string_cache", + "strum 0.25.0", "subtle", "syn 1.0.109", "syn 2.0.48", @@ -5687,7 +5688,7 @@ dependencies = [ "schemars", "serde", "serde_json", - "strum 0.25.0", + "strum 0.26.1", "thiserror", "trybuild", "uuid", @@ -5740,7 +5741,7 @@ dependencies = [ "slog-async", "slog-dtrace", "slog-term", - "strum 0.25.0", + "strum 0.26.1", "subprocess", "thiserror", "tokio", @@ -5782,7 +5783,7 @@ dependencies = [ "slog-term", "sqlformat", "sqlparser", - "strum 0.25.0", + "strum 0.26.1", "tabled", "tempfile", "thiserror", diff --git a/Cargo.toml b/Cargo.toml index d33758fc06..66fbbc018d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -370,7 +370,7 @@ static_assertions = "1.1.0" # harder than expected to make breaking changes (even if you specify a specific # SHA). Cut a new Steno release instead. See omicron#2117. steno = "0.4.0" -strum = { version = "0.25", features = [ "derive" ] } +strum = { version = "0.26", features = [ "derive" ] } subprocess = "0.2.9" supports-color = "2.1.0" swrite = "0.1.0" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 7038f9c038..686cc3f218 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -98,6 +98,7 @@ slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "rele socket2 = { version = "0.5.5", default-features = false, features = ["all"] } spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } +strum = { version = "0.25.0", features = ["derive"] } subtle = { version = "2.5.0" } syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit"] } syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.48", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } @@ -205,6 +206,7 @@ slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "rele socket2 = { version = "0.5.5", default-features = false, features = ["all"] } spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } +strum = { version = "0.25.0", features = ["derive"] } subtle = { version = "2.5.0" } syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit"] } syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.48", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } From 4fdedceec5f62525b0b907a9010695ba1cb62931 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 14 Feb 2024 17:22:39 -0600 Subject: [PATCH 02/12] `/lookup/*` route for console (#5040) Needed to make https://github.com/oxidecomputer/console/pull/1944 work. Adding one top-level route `/lookup/*` so we can freely add more routes under it in the console without adding anything here. Will hold on merging until after the release is finalized. --- nexus/src/external_api/console_api.rs | 1 + nexus/src/external_api/http_entrypoints.rs | 1 + nexus/tests/integration_tests/console_api.rs | 2 ++ 3 files changed, 4 insertions(+) diff --git a/nexus/src/external_api/console_api.rs b/nexus/src/external_api/console_api.rs index d369f57905..86a808a47b 100644 --- a/nexus/src/external_api/console_api.rs +++ b/nexus/src/external_api/console_api.rs @@ -700,6 +700,7 @@ macro_rules! console_page_wildcard { console_page_wildcard!(console_projects, "/projects/{path:.*}"); console_page_wildcard!(console_settings_page, "/settings/{path:.*}"); console_page_wildcard!(console_system_page, "/system/{path:.*}"); +console_page_wildcard!(console_lookup, "/lookup/{path:.*}"); console_page!(console_root, "/"); console_page!(console_projects_new, "/projects-new"); console_page!(console_silo_images, "/images"); diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 3a9e957328..fd18cb2dab 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -332,6 +332,7 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(console_api::login_saml)?; api.register(console_api::logout)?; + api.register(console_api::console_lookup)?; api.register(console_api::console_projects)?; api.register(console_api::console_projects_new)?; api.register(console_api::console_silo_images)?; diff --git a/nexus/tests/integration_tests/console_api.rs b/nexus/tests/integration_tests/console_api.rs index d7918b01aa..78b5ee21d5 100644 --- a/nexus/tests/integration_tests/console_api.rs +++ b/nexus/tests/integration_tests/console_api.rs @@ -209,6 +209,8 @@ async fn test_console_pages(cptestctx: &ControlPlaneTestContext) { "/images", "/utilization", "/access", + "/lookup/", + "/lookup/abc", ]; for path in console_paths { From b2cd53dfc93adab6c4ac44b8028421824e506f1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karen=20C=C3=A1rcamo?= Date: Thu, 15 Feb 2024 13:20:37 +1300 Subject: [PATCH 03/12] [docs] Guide to creating/editing SMF services in omicron (#5061) This is probably incomplete, we can add more stuff down the track. Just wanted to add some information while it's fresh in my mind --- docs/adding-or-modifying-smf-services.adoc | 112 +++++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100644 docs/adding-or-modifying-smf-services.adoc diff --git a/docs/adding-or-modifying-smf-services.adoc b/docs/adding-or-modifying-smf-services.adoc new file mode 100644 index 0000000000..0ee051a26e --- /dev/null +++ b/docs/adding-or-modifying-smf-services.adoc @@ -0,0 +1,112 @@ +:showtitle: +:numbered: +:toc: left + += Adding or modifying SMF services + +== SMF services + +Go to the `smf/` directory, there you will find all current services or you create a new directory if you're working on a new service. + +Each directory can contain a myriad of files depending on the service, but only a `manifest.xml` file is necessary. This file contains the complete set of properties associated with the service instance. + +== Packaging + +To build the omicron package that will contain your new or edited service, you will need to modify the `package-manifest.toml` file. + +There are a few caveats when modifying or creating a new package. + +- When creating a rust binary as part of the service, the name of the omicron package _must_ be the same as the name of the rust package. The service name can be something different if you wish. + +omdb entry in `package-manifest.toml` : +```toml +[package.omicron-omdb] +service_name = "omdb" +only_for_targets.image = "standard" +source.type = "local" +source.rust.binary_names = ["omdb"] +source.rust.release = true +output.type = "zone" +output.intermediate_only = true +``` + +omdb Cargo.toml file: +```toml +[package] +name = "omicron-omdb" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" +``` + +- If a zone is created for this service, the service name _must_ be the same as the zone name. For example, the `oximeter` service is a composite package which depends on the `oximeter-collector` service. This means that even though the `oximeter-collector` package contains the `oximeter` binary, we _must_ name the composite package `oximeter` as this is the package that will be deployed to a zone. + +oximeter entry in `package-manifest.toml` : +```toml +[package.oximeter] +service_name = "oximeter" +only_for_targets.image = "standard" +source.type = "composite" +source.packages = [ "oximeter-collector.tar.gz", "zone-network-setup.tar.gz" ] +output.type = "zone" + +[package.oximeter-collector] +service_name = "oximeter-collector" +only_for_targets.image = "standard" +source.type = "local" +source.rust.binary_names = ["oximeter", "clickhouse-schema-updater"] +source.rust.release = true +source.paths = [ + { from = "smf/oximeter", to = "/var/svc/manifest/site/oximeter" }, + { from = "oximeter/db/schema", to = "/opt/oxide/oximeter/schema" }, +] +output.type = "zone" +output.intermediate_only = true +``` + +== Sled agent services + +Service properties are values which can only be known at run-time. To make sure all of them are populated, you'll have to edit the `sled-agent/src/services.rs` file. This is where all of the omicron zones are built. + +If your service is new, it may look something like the following code. As you can see, the properties we are populating are the same as the ones on the service's manifest.xml file. + +`sled-agent/src/services.rs` file: +```rust +fn zone_network_setup_install( + info: &SledAgentInfo, + zone: &InstalledZone, + static_addr: &String, +) -> Result { + let datalink = zone.get_control_vnic_name(); + let gateway = &info.underlay_address.to_string(); + + let mut config_builder = PropertyGroupBuilder::new("config"); + config_builder = config_builder + .add_property("datalink", "astring", datalink) + .add_property("gateway", "astring", gateway) + .add_property("static_addr", "astring", static_addr); + + Ok(ServiceBuilder::new("oxide/zone-network-setup") + .add_property_group(config_builder) + .add_instance(ServiceInstanceBuilder::new("default"))) +} +``` + +`smf/zone-network-setup/manifest.xml` file: +```xml +<...> + + + + + + + + + + + +<...> +``` \ No newline at end of file From e73c334c087afbe12d45aaca6dc2bd48a336df52 Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Wed, 14 Feb 2024 19:55:35 -0500 Subject: [PATCH 04/12] Trigger inventory collection on sled add (#5066) This is based on discussion in https://github.com/oxidecomputer/omicron/issues/5058 --- nexus/src/app/rack.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index a4d559f823..7a1ad0e6a9 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -891,6 +891,11 @@ impl super::Nexus { ), })?; + // Trigger an inventory collection so that the newly added sled is known + // about. + self.background_tasks + .activate(&self.background_tasks.task_inventory_collection); + Ok(()) } From 77f81eea96ff808cb1e4125bdca2e16615e548c9 Mon Sep 17 00:00:00 2001 From: iliana etaoin Date: Wed, 14 Feb 2024 19:24:14 -0800 Subject: [PATCH 05/12] Set version to 7.0.0 (#5070) --- .github/buildomat/jobs/package.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/buildomat/jobs/package.sh b/.github/buildomat/jobs/package.sh index 79590a44df..8546a67a4f 100755 --- a/.github/buildomat/jobs/package.sh +++ b/.github/buildomat/jobs/package.sh @@ -37,7 +37,7 @@ rustc --version # trampoline global zone images. # COMMIT=$(git rev-parse HEAD) -VERSION="6.0.0-0.ci+git${COMMIT:0:11}" +VERSION="7.0.0-0.ci+git${COMMIT:0:11}" echo "$VERSION" >/work/version.txt ptime -m ./tools/install_builder_prerequisites.sh -yp From 6d33855f5da8a932fee001a81237f2153926815a Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 14 Feb 2024 20:41:12 -0800 Subject: [PATCH 06/12] [sled-agent] Drop usage of locks from instances to grab fewer locks during drop (#4936) This PR refactors both `Instance` and `InstanceManager` to a message-passing structure to avoid the need for locking. We do the following: - Most of the `Instance` / `InstanceManager` internals have been moved to the `{Instance, InstanceManager}Runner` structure, which is elusively owned by a `tokio::task`. - The `{Instance,InstanceManager}` sends requests to this task to process ongoing work. - The `InstanceTicket` has been re-worked -- rather than removing an instance from an inner `BTreeMap`, it sends a request for the `tokio::task` to do this removal. This avoids any need for locking. Fixes https://github.com/oxidecomputer/omicron/issues/4931 --- Cargo.lock | 1 + sled-agent/Cargo.toml | 1 + sled-agent/src/instance.rs | 687 ++++++++++++++++++++--------- sled-agent/src/instance_manager.rs | 646 ++++++++++++++++++++------- sled-agent/src/zone_bundle.rs | 6 + 5 files changed, 969 insertions(+), 372 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 708447b644..c3ac516f82 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5270,6 +5270,7 @@ dependencies = [ "slog-term", "smf", "static_assertions", + "strum 0.26.1", "subprocess", "tar", "tempfile", diff --git a/sled-agent/Cargo.toml b/sled-agent/Cargo.toml index 5bd205b32e..a70b3b4638 100644 --- a/sled-agent/Cargo.toml +++ b/sled-agent/Cargo.toml @@ -67,6 +67,7 @@ slog-async.workspace = true slog-dtrace.workspace = true slog-term.workspace = true smf.workspace = true +strum.workspace = true tar.workspace = true thiserror.workspace = true tofino.workspace = true diff --git a/sled-agent/src/instance.rs b/sled-agent/src/instance.rs index 79c0e92160..5b9bf54dd9 100644 --- a/sled-agent/src/instance.rs +++ b/sled-agent/src/instance.rs @@ -8,13 +8,16 @@ use crate::common::instance::{ Action as InstanceAction, InstanceStates, ObservedPropolisState, PublishedVmmState, }; -use crate::instance_manager::{InstanceManagerServices, InstanceTicket}; +use crate::instance_manager::{ + Error as ManagerError, InstanceManagerServices, InstanceTicket, +}; use crate::nexus::NexusClientWithResolver; use crate::params::ZoneBundleMetadata; use crate::params::{InstanceExternalIpBody, ZoneBundleCause}; use crate::params::{ InstanceHardware, InstanceMigrationSourceParams, - InstanceMigrationTargetParams, InstanceStateRequested, VpcFirewallRule, + InstanceMigrationTargetParams, InstancePutStateResponse, + InstanceStateRequested, InstanceUnregisterResponse, VpcFirewallRule, }; use crate::profile::*; use crate::zone_bundle::BundleError; @@ -22,7 +25,6 @@ use crate::zone_bundle::ZoneBundler; use anyhow::anyhow; use backoff::BackoffError; use chrono::Utc; -use futures::lock::{Mutex, MutexGuard}; use illumos_utils::dladm::Etherstub; use illumos_utils::link::VnicAllocator; use illumos_utils::opte::{DhcpCfg, PortManager}; @@ -47,8 +49,12 @@ use slog::Logger; use std::net::IpAddr; use std::net::{SocketAddr, SocketAddrV6}; use std::sync::Arc; +use tokio::sync::{mpsc, oneshot}; use uuid::Uuid; +// The depth of the request queue for the instance. +const QUEUE_SIZE: usize = 32; + #[derive(thiserror::Error, Debug)] pub enum Error { #[error("Failed to wait for service: {0}")] @@ -108,6 +114,17 @@ pub enum Error { #[error("I/O error")] Io(#[from] std::io::Error), + + #[error("Failed to send request to Instance: Channel closed")] + FailedSendChannelClosed, + + #[error( + "Failed to send request from Instance Runner: Client Channel closed" + )] + FailedSendClientClosed, + + #[error("Instance dropped our request")] + RequestDropped(#[from] oneshot::error::RecvError), } // Issues read-only, idempotent HTTP requests at propolis until it responds with @@ -186,9 +203,111 @@ struct PropolisSetup { running_zone: RunningZone, } -struct InstanceInner { +// Requests that can be made of instances +#[derive(strum::Display)] +enum InstanceRequest { + RequestZoneBundle { + tx: oneshot::Sender>, + }, + CurrentState { + tx: oneshot::Sender, + }, + PutState { + state: crate::params::InstanceStateRequested, + tx: oneshot::Sender>, + }, + PutMigrationIds { + old_runtime: InstanceRuntimeState, + migration_ids: Option, + tx: oneshot::Sender>, + }, + Terminate { + tx: oneshot::Sender>, + }, + IssueSnapshotRequest { + disk_id: Uuid, + snapshot_id: Uuid, + tx: oneshot::Sender>, + }, + AddExternalIp { + ip: InstanceExternalIpBody, + tx: oneshot::Sender>, + }, + DeleteExternalIp { + ip: InstanceExternalIpBody, + tx: oneshot::Sender>, + }, +} + +// A small task which tracks the state of the instance, by constantly querying +// the state of Propolis for updates. +// +// This task communicates with the "InstanceRunner" task to report status. +struct InstanceMonitorRunner { + client: Arc, + tx_monitor: mpsc::Sender, +} + +impl InstanceMonitorRunner { + async fn run(self) -> Result<(), anyhow::Error> { + let mut gen = 0; + loop { + // State monitoring always returns the most recent state/gen pair + // known to Propolis. + let response = self + .client + .instance_state_monitor() + .body(propolis_client::types::InstanceStateMonitorRequest { + gen, + }) + .send() + .await? + .into_inner(); + let observed_gen = response.gen; + + // Now that we have the response from Propolis' HTTP server, we + // forward that to the InstanceRunner. + // + // It will decide the new state, provide that info to Nexus, + // and possibly identify if we should terminate. + let (tx, rx) = oneshot::channel(); + self.tx_monitor + .send(InstanceMonitorRequest::Update { state: response, tx }) + .await?; + + if let Reaction::Terminate = rx.await? { + return Ok(()); + } + + // Update the generation number we're asking for, to ensure the + // Propolis will only return more recent values. + gen = observed_gen + 1; + } + } +} + +enum InstanceMonitorRequest { + Update { + state: propolis_client::types::InstanceStateMonitorResponse, + tx: oneshot::Sender, + }, +} + +struct InstanceRunner { log: Logger, + // A signal the InstanceRunner should shut down. + // This is currently only activated by the runner itself. + should_terminate: bool, + + // Request channel on which most instance requests are made. + rx: mpsc::Receiver, + + // Request channel on which monitor requests are made. + tx_monitor: mpsc::Sender, + rx_monitor: mpsc::Receiver, + monitor_handle: Option>, + // Properties visible to Propolis properties: propolis_client::types::InstanceProperties, @@ -237,7 +356,115 @@ struct InstanceInner { instance_ticket: InstanceTicket, } -impl InstanceInner { +impl InstanceRunner { + async fn run(mut self) { + while !self.should_terminate { + tokio::select! { + biased; + + // Handle messages from our own "Monitor the VMM" task. + request = self.rx_monitor.recv() => { + use InstanceMonitorRequest::*; + match request { + Some(Update { state, tx }) => { + let observed = ObservedPropolisState::new( + self.state.instance(), + &state, + ); + let reaction = self.observe_state(&observed).await; + self.publish_state_to_nexus().await; + + // NOTE: If we fail to send here, the + // InstanceMonitorRunner has stopped running for + // some reason. We'd presumably handle that on the + // next iteration of the loop. + if let Err(_) = tx.send(reaction) { + warn!(self.log, "InstanceRunner failed to send to InstanceMonitorRunner"); + } + }, + // NOTE: This case shouldn't really happen, as we keep a copy + // of the sender alive in "self.tx_monitor". + None => { + warn!(self.log, "Instance 'VMM monitor' channel closed; shutting down"); + self.terminate().await; + }, + } + + }, + // Handle external requests to act upon the instance. + request = self.rx.recv() => { + use InstanceRequest::*; + let request_variant = request.as_ref().map(|r| r.to_string()); + let result = match request { + Some(RequestZoneBundle { tx }) => { + tx.send(self.request_zone_bundle().await) + .map_err(|_| Error::FailedSendClientClosed) + }, + Some(CurrentState{ tx }) => { + tx.send(self.current_state().await) + .map_err(|_| Error::FailedSendClientClosed) + }, + Some(PutState{ state, tx }) => { + tx.send(self.put_state(state).await + .map(|r| InstancePutStateResponse { updated_runtime: Some(r) }) + .map_err(|e| e.into())) + .map_err(|_| Error::FailedSendClientClosed) + }, + Some(PutMigrationIds{ old_runtime, migration_ids, tx }) => { + tx.send( + self.put_migration_ids( + &old_runtime, + &migration_ids + ).await.map_err(|e| e.into()) + ) + .map_err(|_| Error::FailedSendClientClosed) + }, + Some(Terminate { tx }) => { + tx.send(Ok(InstanceUnregisterResponse { + updated_runtime: Some(self.terminate().await) + })) + .map_err(|_| Error::FailedSendClientClosed) + }, + Some(IssueSnapshotRequest { disk_id, snapshot_id, tx }) => { + tx.send( + self.issue_snapshot_request( + disk_id, + snapshot_id + ).await.map_err(|e| e.into()) + ) + .map_err(|_| Error::FailedSendClientClosed) + }, + Some(AddExternalIp { ip, tx }) => { + tx.send(self.add_external_ip(&ip).await.map_err(|e| e.into())) + .map_err(|_| Error::FailedSendClientClosed) + }, + Some(DeleteExternalIp { ip, tx }) => { + tx.send(self.delete_external_ip(&ip).await.map_err(|e| e.into())) + .map_err(|_| Error::FailedSendClientClosed) + }, + None => { + warn!(self.log, "Instance request channel closed; shutting down"); + self.terminate().await; + break; + }, + }; + + if let Err(err) = result { + warn!( + self.log, + "Error handling request"; + "request" => request_variant.unwrap(), + "err" => ?err, + + ); + } + } + + } + } + self.publish_state_to_nexus().await; + } + /// Yields this instance's ID. fn id(&self) -> &Uuid { &self.properties.id @@ -333,7 +560,7 @@ impl InstanceInner { async fn observe_state( &mut self, state: &ObservedPropolisState, - ) -> Result { + ) -> Reaction { info!(self.log, "Observing new propolis state: {:?}", state); // This instance may no longer have a Propolis zone if it was rudely @@ -351,7 +578,7 @@ impl InstanceInner { // Return the Terminate action so that the caller will cleanly // cease to monitor this Propolis. Note that terminating an instance // that's already terminated is a no-op. - return Ok(Reaction::Terminate); + return Reaction::Terminate; } // Update the Sled Agent's internal state machine. @@ -374,10 +601,10 @@ impl InstanceInner { info!(self.log, "terminating VMM that has exited"; "instance_id" => %self.id()); - self.terminate().await?; - Ok(Reaction::Terminate) + self.terminate().await; + Reaction::Terminate } - None => Ok(Reaction::Continue), + None => Reaction::Continue, } } @@ -406,7 +633,7 @@ impl InstanceInner { } /// Sends an instance ensure request to this instance's Propolis. - async fn propolis_ensure( + async fn propolis_ensure_inner( &self, client: &PropolisClient, running_zone: &RunningZone, @@ -466,14 +693,13 @@ impl InstanceInner { /// Panics if this routine is called more than once for a given Instance. async fn ensure_propolis_and_tasks( &mut self, - instance: Instance, setup: PropolisSetup, migrate: Option, ) -> Result<(), Error> { assert!(self.running_state.is_none()); let PropolisSetup { client, running_zone } = setup; - self.propolis_ensure(&client, &running_zone, migrate).await?; + self.propolis_ensure_inner(&client, &running_zone, migrate).await?; // Monitor propolis for state changes in the background. // @@ -481,16 +707,18 @@ impl InstanceInner { // (either because the task observed a message from Propolis saying that // it exited or because the Propolis server was terminated by other // means). - let monitor_client = client.clone(); - let _monitor_task = tokio::task::spawn(async move { - let r = instance.monitor_state_task(monitor_client).await; - let log = &instance.inner.lock().await.log; - match r { + let runner = InstanceMonitorRunner { + client: client.clone(), + tx_monitor: self.tx_monitor.clone(), + }; + let log = self.log.clone(); + let monitor_handle = tokio::task::spawn(async move { + match runner.run().await { Err(e) => warn!(log, "State monitoring task failed: {}", e), Ok(()) => info!(log, "State monitoring task complete"), } }); - + self.monitor_handle = Some(monitor_handle); self.running_state = Some(RunningState { client, running_zone }); Ok(()) @@ -501,7 +729,7 @@ impl InstanceInner { /// /// This routine is safe to call even if the instance's zone was never /// started. It is also safe to call multiple times on a single instance. - async fn terminate(&mut self) -> Result<(), Error> { + async fn terminate_inner(&mut self) { let zname = propolis_zone_name(self.propolis_id()); // First fetch the running state. @@ -518,8 +746,8 @@ impl InstanceInner { // Ensure the instance is removed from the instance manager's table // so that a new instance can take its place. - self.instance_ticket.terminate(); - return Ok(()); + self.instance_ticket.deregister(); + return; }; // Take a zone bundle whenever this instance stops. @@ -548,7 +776,7 @@ impl InstanceInner { Zones::halt_and_remove_logged(&self.log, &zname).await.unwrap(); // Remove ourselves from the instance manager's map of instances. - self.instance_ticket.terminate(); + self.instance_ticket.deregister(); // See if there are any runtime objects to clean up. // @@ -557,11 +785,9 @@ impl InstanceInner { // Remove any OPTE ports from the port manager. running_state.running_zone.release_opte_ports(); - - Ok(()) } - pub async fn add_external_ip( + async fn add_external_ip_inner( &mut self, ip: &InstanceExternalIpBody, ) -> Result<(), Error> { @@ -617,7 +843,7 @@ impl InstanceInner { Ok(()) } - pub async fn delete_external_ip( + async fn delete_external_ip_inner( &mut self, ip: &InstanceExternalIpBody, ) -> Result<(), Error> { @@ -667,12 +893,11 @@ impl InstanceInner { } /// A reference to a single instance running a running Propolis server. -/// -/// Cloning this object clones the reference - it does not create another -/// instance. -#[derive(Clone)] pub struct Instance { - inner: Arc>, + tx: mpsc::Sender, + + #[allow(dead_code)] + runner_handle: tokio::task::JoinHandle<()>, } #[derive(Debug)] @@ -756,8 +981,16 @@ impl Instance { } } - let instance = InstanceInner { + let (tx, rx) = mpsc::channel(QUEUE_SIZE); + let (tx_monitor, rx_monitor) = mpsc::channel(1); + + let runner = InstanceRunner { log: log.new(o!("instance_id" => id.to_string())), + should_terminate: false, + rx, + tx_monitor, + rx_monitor, + monitor_handle: None, // NOTE: Mostly lies. properties: propolis_client::types::InstanceProperties { id, @@ -796,61 +1029,170 @@ impl Instance { instance_ticket: ticket, }; - let inner = Arc::new(Mutex::new(instance)); + let runner_handle = + tokio::task::spawn(async move { runner.run().await }); - Ok(Instance { inner }) + Ok(Instance { tx, runner_handle }) } /// Create bundle from an instance zone. pub async fn request_zone_bundle( &self, + tx: oneshot::Sender>, + ) -> Result<(), BundleError> { + self.tx + .send(InstanceRequest::RequestZoneBundle { tx }) + .await + .map_err(|err| BundleError::FailedSend(anyhow!(err)))?; + Ok(()) + } + + pub async fn current_state(&self) -> Result { + let (tx, rx) = oneshot::channel(); + self.tx + .send(InstanceRequest::CurrentState { tx }) + .await + .map_err(|_| Error::FailedSendChannelClosed)?; + Ok(rx.await?) + } + + /// Attempts to update the current state of the instance by launching a + /// Propolis process for the instance (if needed) and issuing an appropriate + /// request to Propolis to change state. + /// + /// Returns the instance's state after applying any changes required by this + /// call. Note that if the instance's Propolis is in the middle of its own + /// state transition, it may publish states that supersede the state + /// published by this routine in perhaps-surprising ways. For example, if an + /// instance begins to stop when Propolis has just begun to handle a prior + /// request to reboot, the instance's state may proceed from Stopping to + /// Rebooting to Running to Stopping to Stopped. + pub async fn put_state( + &self, + tx: oneshot::Sender>, + state: crate::params::InstanceStateRequested, + ) -> Result<(), Error> { + self.tx + .send(InstanceRequest::PutState { state, tx }) + .await + .map_err(|_| Error::FailedSendChannelClosed)?; + Ok(()) + } + + pub async fn put_migration_ids( + &self, + tx: oneshot::Sender>, + old_runtime: InstanceRuntimeState, + migration_ids: Option, + ) -> Result<(), Error> { + self.tx + .send(InstanceRequest::PutMigrationIds { + old_runtime, + migration_ids, + tx, + }) + .await + .map_err(|_| Error::FailedSendChannelClosed)?; + Ok(()) + } + + /// Rudely terminates this instance's Propolis (if it has one) and + /// immediately transitions the instance to the Destroyed state. + pub async fn terminate( + &self, + tx: oneshot::Sender>, + ) -> Result<(), Error> { + self.tx + .send(InstanceRequest::Terminate { tx }) + .await + .map_err(|_| Error::FailedSendChannelClosed)?; + Ok(()) + } + + pub async fn issue_snapshot_request( + &self, + tx: oneshot::Sender>, + disk_id: Uuid, + snapshot_id: Uuid, + ) -> Result<(), Error> { + self.tx + .send(InstanceRequest::IssueSnapshotRequest { + disk_id, + snapshot_id, + tx, + }) + .await + .map_err(|_| Error::FailedSendChannelClosed)?; + Ok(()) + } + + pub async fn add_external_ip( + &self, + tx: oneshot::Sender>, + ip: &InstanceExternalIpBody, + ) -> Result<(), Error> { + self.tx + .send(InstanceRequest::AddExternalIp { ip: *ip, tx }) + .await + .map_err(|_| Error::FailedSendChannelClosed)?; + Ok(()) + } + + pub async fn delete_external_ip( + &self, + tx: oneshot::Sender>, + ip: &InstanceExternalIpBody, + ) -> Result<(), Error> { + self.tx + .send(InstanceRequest::DeleteExternalIp { ip: *ip, tx }) + .await + .map_err(|_| Error::FailedSendChannelClosed)?; + Ok(()) + } +} + +// TODO: Move this implementation higher. I'm just keeping it here to make the +// incremental diff smaller. +impl InstanceRunner { + async fn request_zone_bundle( + &self, ) -> Result { - let inner = self.inner.lock().await; - let name = propolis_zone_name(inner.propolis_id()); - match &*inner { - InstanceInner { running_state: None, .. } => { - Err(BundleError::Unavailable { name }) - } - InstanceInner { - running_state: Some(RunningState { ref running_zone, .. }), - .. - } => { - inner - .zone_bundler + let name = propolis_zone_name(self.propolis_id()); + match &self.running_state { + None => Err(BundleError::Unavailable { name }), + Some(RunningState { ref running_zone, .. }) => { + self.zone_bundler .create(running_zone, ZoneBundleCause::ExplicitRequest) .await } } } - pub async fn current_state(&self) -> SledInstanceState { - let inner = self.inner.lock().await; - inner.state.sled_instance_state() + async fn current_state(&self) -> SledInstanceState { + self.state.sled_instance_state() } /// Ensures that a Propolis process exists for this instance, then sends it /// an instance ensure request. async fn propolis_ensure( - &self, - inner: &mut MutexGuard<'_, InstanceInner>, + &mut self, migration_params: Option, ) -> Result<(), Error> { - if let Some(running_state) = inner.running_state.as_ref() { + if let Some(running_state) = self.running_state.as_ref() { info!( - &inner.log, + &self.log, "Ensuring instance which already has a running state" ); - inner - .propolis_ensure( - &running_state.client, - &running_state.running_zone, - migration_params, - ) - .await?; + self.propolis_ensure_inner( + &running_state.client, + &running_state.running_zone, + migration_params, + ) + .await?; } else { let setup_result: Result<(), Error> = 'setup: { // Set up the Propolis zone and the objects associated with it. - let setup = match self.setup_propolis_locked(inner).await { + let setup = match self.setup_propolis_inner().await { Ok(setup) => setup, Err(e) => break 'setup Err(e), }; @@ -858,13 +1200,7 @@ impl Instance { // Direct the Propolis server to create its VM and the tasks // associated with it. On success, the zone handle moves into // this instance, preserving the zone. - inner - .ensure_propolis_and_tasks( - self.clone(), - setup, - migration_params, - ) - .await + self.ensure_propolis_and_tasks(setup, migration_params).await }; // If this instance started from scratch, and startup failed, move @@ -875,43 +1211,30 @@ impl Instance { // start a migration target simply leaves the VM running untouched // on the source. if migration_params.is_none() && setup_result.is_err() { - error!(&inner.log, "vmm setup failed: {:?}", setup_result); + error!(&self.log, "vmm setup failed: {:?}", setup_result); // This case is morally equivalent to starting Propolis and then // rudely terminating it before asking it to do anything. Update // the VMM and instance states accordingly. - inner.state.terminate_rudely(); + self.state.terminate_rudely(); } setup_result?; } Ok(()) } - /// Attempts to update the current state of the instance by launching a - /// Propolis process for the instance (if needed) and issuing an appropriate - /// request to Propolis to change state. - /// - /// Returns the instance's state after applying any changes required by this - /// call. Note that if the instance's Propolis is in the middle of its own - /// state transition, it may publish states that supersede the state - /// published by this routine in perhaps-surprising ways. For example, if an - /// instance begins to stop when Propolis has just begun to handle a prior - /// request to reboot, the instance's state may proceed from Stopping to - /// Rebooting to Running to Stopping to Stopped. - pub async fn put_state( - &self, + async fn put_state( + &mut self, state: crate::params::InstanceStateRequested, ) -> Result { use propolis_client::types::InstanceStateRequested as PropolisRequest; - let mut inner = self.inner.lock().await; let (propolis_state, next_published) = match state { InstanceStateRequested::MigrationTarget(migration_params) => { - self.propolis_ensure(&mut inner, Some(migration_params)) - .await?; + self.propolis_ensure(Some(migration_params)).await?; (None, None) } InstanceStateRequested::Running => { - self.propolis_ensure(&mut inner, None).await?; + self.propolis_ensure(None).await?; (Some(PropolisRequest::Run), None) } InstanceStateRequested::Stopped => { @@ -919,9 +1242,8 @@ impl Instance { // immediately. Since there is no Propolis to push updates when // this happens, generate an instance record bearing the // "Destroyed" state and return it to the caller. - if inner.running_state.is_none() { - inner.terminate().await?; - inner.state.terminate_rudely(); + if self.running_state.is_none() { + self.terminate().await; (None, None) } else { ( @@ -931,8 +1253,8 @@ impl Instance { } } InstanceStateRequested::Reboot => { - if inner.running_state.is_none() { - return Err(Error::InstanceNotRunning(*inner.id())); + if self.running_state.is_none() { + return Err(Error::InstanceNotRunning(*self.id())); } ( Some(PropolisRequest::Reboot), @@ -942,78 +1264,73 @@ impl Instance { }; if let Some(p) = propolis_state { - inner.propolis_state_put(p).await?; + self.propolis_state_put(p).await?; } if let Some(s) = next_published { - inner.state.transition_vmm(s, Utc::now()); + self.state.transition_vmm(s, Utc::now()); } - Ok(inner.state.sled_instance_state()) + Ok(self.state.sled_instance_state()) } - pub async fn put_migration_ids( - &self, + async fn put_migration_ids( + &mut self, old_runtime: &InstanceRuntimeState, migration_ids: &Option, ) -> Result { - let mut inner = self.inner.lock().await; - // Check that the instance's current generation matches the one the // caller expects to transition from. This helps Nexus ensure that if // multiple migration sagas launch at Propolis generation N, then only // one of them will successfully set the instance's migration IDs. - if inner.state.instance().gen != old_runtime.gen { + if self.state.instance().gen != old_runtime.gen { // Allow this transition for idempotency if the instance is // already in the requested goal state. - if inner.state.migration_ids_already_set(old_runtime, migration_ids) + if self.state.migration_ids_already_set(old_runtime, migration_ids) { - return Ok(inner.state.sled_instance_state()); + return Ok(self.state.sled_instance_state()); } return Err(Error::Transition( omicron_common::api::external::Error::conflict(format!( "wrong instance state generation: expected {}, got {}", - inner.state.instance().gen, + self.state.instance().gen, old_runtime.gen )), )); } - inner.state.set_migration_ids(migration_ids, Utc::now()); - Ok(inner.state.sled_instance_state()) + self.state.set_migration_ids(migration_ids, Utc::now()); + Ok(self.state.sled_instance_state()) } - async fn setup_propolis_locked( - &self, - inner: &mut MutexGuard<'_, InstanceInner>, - ) -> Result { + async fn setup_propolis_inner(&mut self) -> Result { // Create OPTE ports for the instance - let mut opte_ports = Vec::with_capacity(inner.requested_nics.len()); - for nic in inner.requested_nics.iter() { + let mut opte_ports = Vec::with_capacity(self.requested_nics.len()); + for nic in self.requested_nics.iter() { let (snat, ephemeral_ip, floating_ips) = if nic.primary { ( - Some(inner.source_nat), - inner.ephemeral_ip, - &inner.floating_ips[..], + Some(self.source_nat), + self.ephemeral_ip, + &self.floating_ips[..], ) } else { (None, None, &[][..]) }; - let port = inner.port_manager.create_port( + let port = self.port_manager.create_port( nic, snat, ephemeral_ip, floating_ips, - &inner.firewall_rules, - inner.dhcp_config.clone(), + &self.firewall_rules, + self.dhcp_config.clone(), )?; opte_ports.push(port); } // Create a zone for the propolis instance, using the previously // configured VNICs. - let zname = propolis_zone_name(inner.propolis_id()); + let zname = propolis_zone_name(self.propolis_id()); let mut rng = rand::rngs::StdRng::from_entropy(); - let root = inner + let root = self .storage .get_latest_resources() .await @@ -1021,15 +1338,15 @@ impl Instance { .choose(&mut rng) .ok_or_else(|| Error::U2NotFound)? .clone(); - let installed_zone = inner + let installed_zone = self .zone_builder_factory .builder() - .with_log(inner.log.clone()) - .with_underlay_vnic_allocator(&inner.vnic_allocator) + .with_log(self.log.clone()) + .with_underlay_vnic_allocator(&self.vnic_allocator) .with_zone_root_path(&root) .with_zone_image_paths(&["/opt/oxide".into()]) .with_zone_type("propolis-server") - .with_unique_name(*inner.propolis_id()) + .with_unique_name(*self.propolis_id()) .with_datasets(&[]) .with_filesystems(&[]) .with_data_links(&[]) @@ -1044,7 +1361,7 @@ impl Instance { .install() .await?; - let gateway = inner.port_manager.underlay_ip(); + let gateway = self.port_manager.underlay_ip(); // TODO: We should not be using the resolver here to lookup the Nexus IP // address. It would be preferable for Propolis, and through Propolis, @@ -1055,7 +1372,7 @@ impl Instance { // breaks. // - With a DNS resolver: the metric producer would be able to continue // sending requests to new servers as they arise. - let metric_ip = inner + let metric_ip = self .nexus_client .resolver() .lookup_ipv6(internal_dns::ServiceName::Nexus) @@ -1077,12 +1394,12 @@ impl Instance { .add_property( "listen_addr", "astring", - &inner.propolis_addr.ip().to_string(), + &self.propolis_addr.ip().to_string(), ) .add_property( "listen_port", "astring", - &inner.propolis_addr.port().to_string(), + &self.propolis_addr.port().to_string(), ) .add_property("metric_addr", "astring", &metric_addr.to_string()); @@ -1092,102 +1409,52 @@ impl Instance { .add_property_group(config), ), ); - profile.add_to_zone(&inner.log, &installed_zone).await?; + profile.add_to_zone(&self.log, &installed_zone).await?; let running_zone = RunningZone::boot(installed_zone).await?; - info!(inner.log, "Started propolis in zone: {}", zname); + info!(self.log, "Started propolis in zone: {}", zname); // This isn't strictly necessary - we wait for the HTTP server below - // but it helps distinguish "online in SMF" from "responding to HTTP // requests". let fmri = fmri_name(); - wait_for_service(Some(&zname), &fmri, inner.log.clone()) + wait_for_service(Some(&zname), &fmri, self.log.clone()) .await .map_err(|_| Error::Timeout(fmri.to_string()))?; - info!(inner.log, "Propolis SMF service is online"); + info!(self.log, "Propolis SMF service is online"); // We use a custom client builder here because the default progenitor // one has a timeout of 15s but we want to be able to wait indefinitely. let reqwest_client = reqwest::ClientBuilder::new().build().unwrap(); let client = Arc::new(PropolisClient::new_with_client( - &format!("http://{}", &inner.propolis_addr), + &format!("http://{}", &self.propolis_addr), reqwest_client, )); // Although the instance is online, the HTTP server may not be running // yet. Wait for it to respond to requests, so users of the instance // don't need to worry about initialization races. - wait_for_http_server(&inner.log, &client).await?; - info!(inner.log, "Propolis HTTP server online"); + wait_for_http_server(&self.log, &client).await?; + info!(self.log, "Propolis HTTP server online"); Ok(PropolisSetup { client, running_zone }) } - /// Rudely terminates this instance's Propolis (if it has one) and - /// immediately transitions the instance to the Destroyed state. - pub async fn terminate(&self) -> Result { - let mut inner = self.inner.lock().await; - inner.terminate().await?; - - // Rude termination is safe here because this routine took the lock - // before terminating the zone, which will cause any pending - // observations from the instance state monitor to be - inner.state.terminate_rudely(); - Ok(inner.state.sled_instance_state()) - } - - // Monitors propolis until explicitly told to disconnect. - // - // Intended to be spawned in a tokio task within [`Instance::start`]. - async fn monitor_state_task( - &self, - client: Arc, - ) -> Result<(), Error> { - let mut gen = 0; - loop { - // State monitoring always returns the most recent state/gen pair - // known to Propolis. - let response = client - .instance_state_monitor() - .body(propolis_client::types::InstanceStateMonitorRequest { - gen, - }) - .send() - .await? - .into_inner(); - - let reaction = { - // The observed state depends on what Propolis reported and on - // the `Instance`'s stored state. Take the instance lock to - // stabilize that state across this entire operation. - let mut inner = self.inner.lock().await; - let observed = ObservedPropolisState::new( - inner.state.instance(), - &response, - ); - let reaction = inner.observe_state(&observed).await?; - inner.publish_state_to_nexus().await; - reaction - }; + async fn terminate(&mut self) -> SledInstanceState { + self.terminate_inner().await; + self.state.terminate_rudely(); - if let Reaction::Terminate = reaction { - return Ok(()); - } - - // Update the generation number we're asking for, to ensure the - // Propolis will only return more recent values. - gen = response.gen + 1; - } + // This causes the "run" task to exit on the next iteration. + self.should_terminate = true; + self.state.sled_instance_state() } - pub async fn issue_snapshot_request( + async fn issue_snapshot_request( &self, disk_id: Uuid, snapshot_id: Uuid, ) -> Result<(), Error> { - let inner = self.inner.lock().await; - - if let Some(running_state) = &inner.running_state { + if let Some(running_state) = &self.running_state { running_state .client .instance_issue_crucible_snapshot_request() @@ -1198,55 +1465,49 @@ impl Instance { Ok(()) } else { - Err(Error::InstanceNotRunning(inner.properties.id)) + Err(Error::InstanceNotRunning(self.properties.id)) } } - pub async fn add_external_ip( - &self, + async fn add_external_ip( + &mut self, ip: &InstanceExternalIpBody, ) -> Result<(), Error> { - let mut inner = self.inner.lock().await; - // The internal call can either fail on adding the IP // to the list, or on the OPTE step. // Be cautious and reset state if either fails. // Note we don't need to re-ensure port manager/OPTE state // since that's the last call we make internally. - let old_eph = inner.ephemeral_ip; - let out = inner.add_external_ip(ip).await; + let old_eph = self.ephemeral_ip; + let out = self.add_external_ip_inner(ip).await; if out.is_err() { - inner.ephemeral_ip = old_eph; + self.ephemeral_ip = old_eph; if let InstanceExternalIpBody::Floating(ip) = ip { - inner.floating_ips.retain(|v| v != ip); + self.floating_ips.retain(|v| v != ip); } } - out } - pub async fn delete_external_ip( - &self, + async fn delete_external_ip( + &mut self, ip: &InstanceExternalIpBody, ) -> Result<(), Error> { - let mut inner = self.inner.lock().await; - // Similar logic to `add_external_ip`, except here we // need to readd the floating IP if it was removed. // OPTE doesn't care about the order of floating IPs. - let old_eph = inner.ephemeral_ip; - let out = inner.delete_external_ip(ip).await; + let old_eph = self.ephemeral_ip; + let out = self.delete_external_ip_inner(ip).await; if out.is_err() { - inner.ephemeral_ip = old_eph; + self.ephemeral_ip = old_eph; if let InstanceExternalIpBody::Floating(ip) = ip { - if !inner.floating_ips.contains(ip) { - inner.floating_ips.push(*ip); + if !self.floating_ips.contains(ip) { + self.floating_ips.push(*ip); } } } - out } } diff --git a/sled-agent/src/instance_manager.rs b/sled-agent/src/instance_manager.rs index b66b0400e1..80c62be234 100644 --- a/sled-agent/src/instance_manager.rs +++ b/sled-agent/src/instance_manager.rs @@ -15,6 +15,8 @@ use crate::params::{ }; use crate::zone_bundle::BundleError; use crate::zone_bundle::ZoneBundler; + +use anyhow::anyhow; use illumos_utils::dladm::Etherstub; use illumos_utils::link::VnicAllocator; use illumos_utils::opte::PortManager; @@ -29,8 +31,12 @@ use slog::Logger; use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::{Arc, Mutex}; +use tokio::sync::{mpsc, oneshot}; use uuid::Uuid; +// The depth of the request queue for the instance manager. +const QUEUE_SIZE: usize = 256; + #[derive(thiserror::Error, Debug)] pub enum Error { #[error("Instance error: {0}")] @@ -53,6 +59,17 @@ pub enum Error { #[error("Zone bundle error")] ZoneBundle(#[from] BundleError), + + #[error("Failed to send request to Instance Manager: Channel closed")] + FailedSendInstanceManagerClosed, + + #[error( + "Failed to send request from Instance Manager: Client Channel closed" + )] + FailedSendClientClosed, + + #[error("Instance Manager dropped our request")] + RequestDropped(#[from] oneshot::error::RecvError), } pub enum ReservoirMode { @@ -61,26 +78,6 @@ pub enum ReservoirMode { Percentage(u8), } -struct InstanceManagerInternal { - log: Logger, - nexus_client: NexusClientWithResolver, - - /// Last set size of the VMM reservoir (in bytes) - reservoir_size: Mutex, - - // TODO: If we held an object representing an enum of "Created OR Running" - // instance, we could avoid the methods within "instance.rs" that panic - // if the Propolis client hasn't been initialized. - /// A mapping from a Sled Agent "Instance ID" to ("Propolis ID", [Instance]). - instances: Mutex>, - - vnic_allocator: VnicAllocator, - port_manager: PortManager, - storage: StorageHandle, - zone_bundler: ZoneBundler, - zone_builder_factory: ZoneBuilderFactory, -} - pub(crate) struct InstanceManagerServices { pub nexus_client: NexusClientWithResolver, pub vnic_allocator: VnicAllocator, @@ -90,6 +87,22 @@ pub(crate) struct InstanceManagerServices { pub zone_builder_factory: ZoneBuilderFactory, } +// Describes the internals of the "InstanceManager", though most of the +// instance manager's state exists within the "InstanceManagerRunner" structure. +struct InstanceManagerInternal { + log: Logger, + tx: mpsc::Sender, + // NOTE: Arguably, this field could be "owned" by the InstanceManagerRunner. + // It was not moved there, and the reservoir functions were not converted to + // use the message-passing interface (see: "InstanceManagerRequest") because + // callers of "get/set reservoir size" are not async, and (in the case of + // getting the size) they also do not expect a "Result" type. + reservoir_size: Mutex, + + #[allow(dead_code)] + runner_handle: tokio::task::JoinHandle<()>, +} + /// All instances currently running on the sled. pub struct InstanceManager { inner: Arc, @@ -106,19 +119,34 @@ impl InstanceManager { zone_bundler: ZoneBundler, zone_builder_factory: ZoneBuilderFactory, ) -> Result { - Ok(InstanceManager { - inner: Arc::new(InstanceManagerInternal { - log: log.new(o!("component" => "InstanceManager")), - nexus_client, + let (tx, rx) = mpsc::channel(QUEUE_SIZE); + let (terminate_tx, terminate_rx) = mpsc::unbounded_channel(); + + let log = log.new(o!("component" => "InstanceManager")); + let runner = InstanceManagerRunner { + log: log.clone(), + rx, + terminate_tx, + terminate_rx, + nexus_client, + instances: BTreeMap::new(), + vnic_allocator: VnicAllocator::new("Instance", etherstub), + port_manager, + storage, + zone_bundler, + zone_builder_factory, + }; + + let runner_handle = + tokio::task::spawn(async move { runner.run().await }); + Ok(Self { + inner: Arc::new(InstanceManagerInternal { + log, + tx, // no reservoir size set on startup reservoir_size: Mutex::new(ByteCount::from_kibibytes_u32(0)), - instances: Mutex::new(BTreeMap::new()), - vnic_allocator: VnicAllocator::new("Instance", etherstub), - port_manager, - storage, - zone_bundler, - zone_builder_factory, + runner_handle, }), }) } @@ -199,6 +227,336 @@ impl InstanceManager { *self.inner.reservoir_size.lock().unwrap() } + pub async fn ensure_registered( + &self, + instance_id: Uuid, + propolis_id: Uuid, + hardware: InstanceHardware, + instance_runtime: InstanceRuntimeState, + vmm_runtime: VmmRuntimeState, + propolis_addr: SocketAddr, + ) -> Result { + let (tx, rx) = oneshot::channel(); + self.inner + .tx + .send(InstanceManagerRequest::EnsureRegistered { + instance_id, + propolis_id, + hardware, + instance_runtime, + vmm_runtime, + propolis_addr, + tx, + }) + .await + .map_err(|_| Error::FailedSendInstanceManagerClosed)?; + rx.await? + } + + pub async fn ensure_unregistered( + &self, + instance_id: Uuid, + ) -> Result { + let (tx, rx) = oneshot::channel(); + self.inner + .tx + .send(InstanceManagerRequest::EnsureUnregistered { + instance_id, + tx, + }) + .await + .map_err(|_| Error::FailedSendInstanceManagerClosed)?; + rx.await? + } + + pub async fn ensure_state( + &self, + instance_id: Uuid, + target: InstanceStateRequested, + ) -> Result { + let (tx, rx) = oneshot::channel(); + self.inner + .tx + .send(InstanceManagerRequest::EnsureState { + instance_id, + target, + tx, + }) + .await + .map_err(|_| Error::FailedSendInstanceManagerClosed)?; + rx.await? + } + + pub async fn put_migration_ids( + &self, + instance_id: Uuid, + old_runtime: &InstanceRuntimeState, + migration_ids: &Option, + ) -> Result { + let (tx, rx) = oneshot::channel(); + self.inner + .tx + .send(InstanceManagerRequest::PutMigrationIds { + instance_id, + old_runtime: old_runtime.clone(), + migration_ids: *migration_ids, + tx, + }) + .await + .map_err(|_| Error::FailedSendInstanceManagerClosed)?; + rx.await? + } + + pub async fn instance_issue_disk_snapshot_request( + &self, + instance_id: Uuid, + disk_id: Uuid, + snapshot_id: Uuid, + ) -> Result<(), Error> { + let (tx, rx) = oneshot::channel(); + self.inner + .tx + .send(InstanceManagerRequest::InstanceIssueDiskSnapshot { + instance_id, + disk_id, + snapshot_id, + tx, + }) + .await + .map_err(|_| Error::FailedSendInstanceManagerClosed)?; + rx.await? + } + + /// Create a zone bundle from a named instance zone, if it exists. + pub async fn create_zone_bundle( + &self, + name: &str, + ) -> Result { + let (tx, rx) = oneshot::channel(); + self.inner + .tx + .send(InstanceManagerRequest::CreateZoneBundle { + name: name.to_string(), + tx, + }) + .await + .map_err(|err| BundleError::FailedSend(anyhow!(err)))?; + rx.await.map_err(|err| BundleError::DroppedRequest(anyhow!(err)))? + } + + pub async fn add_external_ip( + &self, + instance_id: Uuid, + ip: &InstanceExternalIpBody, + ) -> Result<(), Error> { + let (tx, rx) = oneshot::channel(); + self.inner + .tx + .send(InstanceManagerRequest::InstanceAddExternalIp { + instance_id, + ip: *ip, + tx, + }) + .await + .map_err(|_| Error::FailedSendInstanceManagerClosed)?; + rx.await? + } + + pub async fn delete_external_ip( + &self, + instance_id: Uuid, + ip: &InstanceExternalIpBody, + ) -> Result<(), Error> { + let (tx, rx) = oneshot::channel(); + self.inner + .tx + .send(InstanceManagerRequest::InstanceDeleteExternalIp { + instance_id, + ip: *ip, + tx, + }) + .await + .map_err(|_| Error::FailedSendInstanceManagerClosed)?; + rx.await? + } +} + +// Most requests that can be sent to the "InstanceManagerRunner" task. +// +// These messages are sent by "InstanceManager"'s interface, and processed by +// the runner task. +// +// By convention, responses are sent on the "tx" oneshot. +#[derive(strum::Display)] +enum InstanceManagerRequest { + EnsureRegistered { + instance_id: Uuid, + propolis_id: Uuid, + hardware: InstanceHardware, + instance_runtime: InstanceRuntimeState, + vmm_runtime: VmmRuntimeState, + propolis_addr: SocketAddr, + tx: oneshot::Sender>, + }, + EnsureUnregistered { + instance_id: Uuid, + tx: oneshot::Sender>, + }, + EnsureState { + instance_id: Uuid, + target: InstanceStateRequested, + tx: oneshot::Sender>, + }, + PutMigrationIds { + instance_id: Uuid, + old_runtime: InstanceRuntimeState, + migration_ids: Option, + tx: oneshot::Sender>, + }, + InstanceIssueDiskSnapshot { + instance_id: Uuid, + disk_id: Uuid, + snapshot_id: Uuid, + tx: oneshot::Sender>, + }, + CreateZoneBundle { + name: String, + tx: oneshot::Sender>, + }, + InstanceAddExternalIp { + instance_id: Uuid, + ip: InstanceExternalIpBody, + tx: oneshot::Sender>, + }, + InstanceDeleteExternalIp { + instance_id: Uuid, + ip: InstanceExternalIpBody, + tx: oneshot::Sender>, + }, +} + +// Requests that the instance manager stop processing information about a +// particular instance. +struct InstanceDeregisterRequest { + id: Uuid, +} + +struct InstanceManagerRunner { + log: Logger, + + // Request channel on which most instance manager requests are made. + rx: mpsc::Receiver, + + // Request channel for "stop tracking instances", removing instances + // from "self.instances". + // + // Although this is "unbounded", in practice, it cannot be larger than the + // number of currently running instances, and it will be cleared before + // new instances may be requested. + // + // We hold both sizes of this channel, and we give clones of the + // sender to new instance objects that are created. + terminate_tx: mpsc::UnboundedSender, + terminate_rx: mpsc::UnboundedReceiver, + + nexus_client: NexusClientWithResolver, + + // TODO: If we held an object representing an enum of "Created OR Running" + // instance, we could avoid the methods within "instance.rs" that panic + // if the Propolis client hasn't been initialized. + /// A mapping from a Sled Agent "Instance ID" to ("Propolis ID", [Instance]). + instances: BTreeMap, + + vnic_allocator: VnicAllocator, + port_manager: PortManager, + storage: StorageHandle, + zone_bundler: ZoneBundler, + zone_builder_factory: ZoneBuilderFactory, +} + +impl InstanceManagerRunner { + /// Run the main loop of the InstanceManager. + /// + /// This should be spawned in a tokio task. + pub async fn run(mut self) { + use InstanceManagerRequest::*; + loop { + tokio::select! { + biased; + + // If anyone has made a request to remove an instance from our + // state tracking, we do so before processing subsequent + // requests. This ensure that "add, remove, add" of the same + // instance always works. + request = self.terminate_rx.recv() => { + match request { + Some(request) => { + self.instances.remove(&request.id); + }, + None => { + warn!(self.log, "InstanceManager's 'instance terminate' channel closed; shutting down"); + break; + }, + } + }, + request = self.rx.recv() => { + let request_variant = request.as_ref().map(|r| r.to_string()); + let result = match request { + Some(EnsureRegistered { + instance_id, + propolis_id, + hardware, + instance_runtime, + vmm_runtime, + propolis_addr, + tx, + }) => { + tx.send(self.ensure_registered(instance_id, propolis_id, hardware, instance_runtime, vmm_runtime, propolis_addr).await).map_err(|_| Error::FailedSendClientClosed) + }, + Some(EnsureUnregistered { instance_id, tx }) => { + self.ensure_unregistered(tx, instance_id).await + }, + Some(EnsureState { instance_id, target, tx }) => { + self.ensure_state(tx, instance_id, target).await + }, + Some(PutMigrationIds { instance_id, old_runtime, migration_ids, tx }) => { + self.put_migration_ids(tx, instance_id, &old_runtime, &migration_ids).await + }, + Some(InstanceIssueDiskSnapshot { instance_id, disk_id, snapshot_id, tx }) => { + self.instance_issue_disk_snapshot_request(tx, instance_id, disk_id, snapshot_id).await + }, + Some(CreateZoneBundle { name, tx }) => { + self.create_zone_bundle(tx, &name).await.map_err(Error::from) + }, + Some(InstanceAddExternalIp { instance_id, ip, tx }) => { + self.add_external_ip(tx, instance_id, &ip).await + }, + Some(InstanceDeleteExternalIp { instance_id, ip, tx }) => { + self.delete_external_ip(tx, instance_id, &ip).await + }, + None => { + warn!(self.log, "InstanceManager's request channel closed; shutting down"); + break; + }, + }; + + if let Err(err) = result { + warn!( + self.log, + "Error handling request"; + "request" => request_variant.unwrap(), + "err" => ?err + ); + } + } + } + } + } + + fn get_instance(&self, instance_id: Uuid) -> Option<&Instance> { + self.instances.get(&instance_id).map(|(_id, v)| v) + } + /// Ensures that the instance manager contains a registered instance with /// the supplied instance ID and the Propolis ID specified in /// `initial_hardware`. @@ -216,8 +574,8 @@ impl InstanceManager { /// (instance ID, Propolis ID) pair multiple times, but will fail if the /// instance is registered with a Propolis ID different from the one the /// caller supplied. - pub async fn ensure_registered( - &self, + async fn ensure_registered( + &mut self, instance_id: Uuid, propolis_id: Uuid, hardware: InstanceHardware, @@ -226,7 +584,7 @@ impl InstanceManager { propolis_addr: SocketAddr, ) -> Result { info!( - &self.inner.log, + &self.log, "ensuring instance is registered"; "instance_id" => %instance_id, "propolis_id" => %propolis_id, @@ -237,12 +595,11 @@ impl InstanceManager { ); let instance = { - let mut instances = self.inner.instances.lock().unwrap(); if let Some((existing_propolis_id, existing_instance)) = - instances.get(&instance_id) + self.instances.get(&instance_id) { if propolis_id != *existing_propolis_id { - info!(&self.inner.log, + info!(&self.log, "instance already registered with another Propolis ID"; "instance_id" => %instance_id, "existing_propolis_id" => %*existing_propolis_id); @@ -253,29 +610,26 @@ impl InstanceManager { )); } else { info!( - &self.inner.log, + &self.log, "instance already registered with requested Propolis ID" ); - existing_instance.clone() + existing_instance } } else { - info!(&self.inner.log, + info!(&self.log, "registering new instance"; "instance_id" => ?instance_id); - let instance_log = self.inner.log.new(o!()); + let instance_log = self.log.new(o!()); let ticket = - InstanceTicket::new(instance_id, self.inner.clone()); + InstanceTicket::new(instance_id, self.terminate_tx.clone()); let services = InstanceManagerServices { - nexus_client: self.inner.nexus_client.clone(), - vnic_allocator: self.inner.vnic_allocator.clone(), - port_manager: self.inner.port_manager.clone(), - storage: self.inner.storage.clone(), - zone_bundler: self.inner.zone_bundler.clone(), - zone_builder_factory: self - .inner - .zone_builder_factory - .clone(), + nexus_client: self.nexus_client.clone(), + vnic_allocator: self.vnic_allocator.clone(), + port_manager: self.port_manager.clone(), + storage: self.storage.clone(), + zone_bundler: self.zone_bundler.clone(), + zone_builder_factory: self.zone_builder_factory.clone(), }; let state = crate::instance::InstanceInitialState { @@ -293,182 +647,152 @@ impl InstanceManager { state, services, )?; - let instance_clone = instance.clone(); let _old = - instances.insert(instance_id, (propolis_id, instance)); + self.instances.insert(instance_id, (propolis_id, instance)); assert!(_old.is_none()); - instance_clone + &self.instances.get(&instance_id).unwrap().1 } }; - Ok(instance.current_state().await) + Ok(instance.current_state().await?) } /// Idempotently ensures the instance is not registered with this instance /// manager. If the instance exists and has a running Propolis, that /// Propolis is rudely terminated. - pub async fn ensure_unregistered( - &self, + async fn ensure_unregistered( + &mut self, + tx: oneshot::Sender>, instance_id: Uuid, - ) -> Result { - let instance = { - let instances = self.inner.instances.lock().unwrap(); - let instance = instances.get(&instance_id); - if let Some((_, instance)) = instance { - instance.clone() - } else { - return Ok(InstanceUnregisterResponse { - updated_runtime: None, - }); - } + ) -> Result<(), Error> { + // If the instance does not exist, we response immediately. + let Some(instance) = self.get_instance(instance_id) else { + tx.send(Ok(InstanceUnregisterResponse { updated_runtime: None })) + .map_err(|_| Error::FailedSendClientClosed)?; + return Ok(()); }; - Ok(InstanceUnregisterResponse { - updated_runtime: Some(instance.terminate().await?), - }) + // Otherwise, we pipeline the request, and send it to the instance, + // where it can receive an appropriate response. + instance.terminate(tx).await?; + Ok(()) } /// Idempotently attempts to drive the supplied instance into the supplied /// runtime state. - pub async fn ensure_state( - &self, + async fn ensure_state( + &mut self, + tx: oneshot::Sender>, instance_id: Uuid, target: InstanceStateRequested, - ) -> Result { - let instance = { - let instances = self.inner.instances.lock().unwrap(); - let instance = instances.get(&instance_id); - - if let Some((_, instance)) = instance { - instance.clone() - } else { - match target { - // If the instance isn't registered, then by definition it - // isn't running here. Allow requests to stop or destroy the - // instance to succeed to provide idempotency. This has to - // be handled here (that is, on the "instance not found" - // path) to handle the case where a stop request arrived, - // Propolis handled it, sled agent unregistered the - // instance, and only then did a second stop request - // arrive. - InstanceStateRequested::Stopped => { - return Ok(InstancePutStateResponse { - updated_runtime: None, - }); - } - _ => { - return Err(Error::NoSuchInstance(instance_id)); - } + ) -> Result<(), Error> { + let Some(instance) = self.get_instance(instance_id) else { + match target { + // If the instance isn't registered, then by definition it + // isn't running here. Allow requests to stop or destroy the + // instance to succeed to provide idempotency. This has to + // be handled here (that is, on the "instance not found" + // path) to handle the case where a stop request arrived, + // Propolis handled it, sled agent unregistered the + // instance, and only then did a second stop request + // arrive. + InstanceStateRequested::Stopped => { + tx.send(Ok(InstancePutStateResponse { + updated_runtime: None, + })) + .map_err(|_| Error::FailedSendClientClosed)?; + } + _ => { + tx.send(Err(Error::NoSuchInstance(instance_id))) + .map_err(|_| Error::FailedSendClientClosed)?; } } + return Ok(()); }; - - let new_state = instance.put_state(target).await?; - Ok(InstancePutStateResponse { updated_runtime: Some(new_state) }) + instance.put_state(tx, target).await?; + Ok(()) } /// Idempotently attempts to set the instance's migration IDs to the /// supplied IDs. - pub async fn put_migration_ids( - &self, + async fn put_migration_ids( + &mut self, + tx: oneshot::Sender>, instance_id: Uuid, old_runtime: &InstanceRuntimeState, migration_ids: &Option, - ) -> Result { + ) -> Result<(), Error> { let (_, instance) = self - .inner .instances - .lock() - .unwrap() .get(&instance_id) - .ok_or_else(|| Error::NoSuchInstance(instance_id))? - .clone(); - - Ok(instance.put_migration_ids(old_runtime, migration_ids).await?) + .ok_or_else(|| Error::NoSuchInstance(instance_id))?; + instance + .put_migration_ids(tx, old_runtime.clone(), *migration_ids) + .await?; + Ok(()) } - pub async fn instance_issue_disk_snapshot_request( + async fn instance_issue_disk_snapshot_request( &self, + tx: oneshot::Sender>, instance_id: Uuid, disk_id: Uuid, snapshot_id: Uuid, ) -> Result<(), Error> { let instance = { - let instances = self.inner.instances.lock().unwrap(); - let (_, instance) = instances + let (_, instance) = self + .instances .get(&instance_id) .ok_or(Error::NoSuchInstance(instance_id))?; - instance.clone() + instance }; instance - .issue_snapshot_request(disk_id, snapshot_id) + .issue_snapshot_request(tx, disk_id, snapshot_id) .await .map_err(Error::from) } /// Create a zone bundle from a named instance zone, if it exists. - pub async fn create_zone_bundle( + async fn create_zone_bundle( &self, + tx: oneshot::Sender>, name: &str, - ) -> Result { - // We need to find the instance and take its lock, but: - // - // 1. The instance-map lock is sync, and - // 2. we don't want to hold the instance-map lock for the entire - // bundling duration. - // - // Instead, we cheaply clone the instance through its `Arc` around the - // `InstanceInner`, which is ultimately what we want. - let Some((_propolis_id, instance)) = self - .inner - .instances - .lock() - .unwrap() - .values() - .find(|(propolis_id, _instance)| { + ) -> Result<(), BundleError> { + let Some((_propolis_id, instance)) = + self.instances.values().find(|(propolis_id, _instance)| { name == propolis_zone_name(propolis_id) }) - .cloned() else { return Err(BundleError::NoSuchZone { name: name.to_string() }); }; - instance.request_zone_bundle().await + instance.request_zone_bundle(tx).await } - pub async fn add_external_ip( + async fn add_external_ip( &self, + tx: oneshot::Sender>, instance_id: Uuid, ip: &InstanceExternalIpBody, ) -> Result<(), Error> { - let instance = { - let instances = self.inner.instances.lock().unwrap(); - instances.get(&instance_id).map(|(_id, v)| v.clone()) - }; - - let Some(instance) = instance else { + let Some(instance) = self.get_instance(instance_id) else { return Err(Error::NoSuchInstance(instance_id)); }; - - instance.add_external_ip(ip).await?; + instance.add_external_ip(tx, ip).await?; Ok(()) } - pub async fn delete_external_ip( + async fn delete_external_ip( &self, + tx: oneshot::Sender>, instance_id: Uuid, ip: &InstanceExternalIpBody, ) -> Result<(), Error> { - let instance = { - let instances = self.inner.instances.lock().unwrap(); - instances.get(&instance_id).map(|(_id, v)| v.clone()) - }; - - let Some(instance) = instance else { + let Some(instance) = self.get_instance(instance_id) else { return Err(Error::NoSuchInstance(instance_id)); }; - instance.delete_external_ip(ip).await?; + instance.delete_external_ip(tx, ip).await?; Ok(()) } } @@ -476,28 +800,32 @@ impl InstanceManager { /// Represents membership of an instance in the [`InstanceManager`]. pub struct InstanceTicket { id: Uuid, - inner: Option>, + terminate_tx: Option>, } impl InstanceTicket { // Creates a new instance ticket for instance "id" to be removed - // from "inner" on destruction. - fn new(id: Uuid, inner: Arc) -> Self { - InstanceTicket { id, inner: Some(inner) } + // from the manger on destruction. + fn new( + id: Uuid, + terminate_tx: mpsc::UnboundedSender, + ) -> Self { + InstanceTicket { id, terminate_tx: Some(terminate_tx) } } /// Idempotently removes this instance from the tracked set of /// instances. This acts as an "upcall" for instances to remove /// themselves after stopping. - pub fn terminate(&mut self) { - if let Some(inner) = self.inner.take() { - inner.instances.lock().unwrap().remove(&self.id); + pub fn deregister(&mut self) { + if let Some(terminate_tx) = self.terminate_tx.take() { + let _ = + terminate_tx.send(InstanceDeregisterRequest { id: self.id }); } } } impl Drop for InstanceTicket { fn drop(&mut self) { - self.terminate(); + self.deregister(); } } diff --git a/sled-agent/src/zone_bundle.rs b/sled-agent/src/zone_bundle.rs index 70b9da7708..f9e1c7b3be 100644 --- a/sled-agent/src/zone_bundle.rs +++ b/sled-agent/src/zone_bundle.rs @@ -615,6 +615,12 @@ pub enum BundleError { #[error("Failed to join zone bundling task")] Task(#[from] tokio::task::JoinError), + #[error("Failed to send request to instance/instance manager")] + FailedSend(anyhow::Error), + + #[error("Instance/Instance Manager dropped our request")] + DroppedRequest(anyhow::Error), + #[error("Failed to create bundle: {0}")] BundleFailed(#[from] anyhow::Error), From 6003a5aae2f0a44c108f0e76556b7c215ba4c270 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Thu, 15 Feb 2024 06:08:39 +0000 Subject: [PATCH 07/12] Update taiki-e/install-action digest to 14422f8 (#5072) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`717ed1c` -> `14422f8`](https://togithub.com/taiki-e/install-action/compare/717ed1c...14422f8) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index 68d816fc2d..c263eecca3 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@717ed1cb83959ef327137c2f806e1d8597bfca9f # v2 + uses: taiki-e/install-action@14422f84f07a2d547893af56258f5e59333262df # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From 97dfaf09ef8fa5202c2c78ca8561e9910dbf9e56 Mon Sep 17 00:00:00 2001 From: Rain Date: Wed, 14 Feb 2024 22:22:27 -0800 Subject: [PATCH 08/12] [nexus-db-queries] remove uses of filter_target (#5052) `filter_target` is required for partial indexes but is easy to misuse for full indexes. To work around that, define an `as_partial_index` method, switch to it, and ban general uses of `filter_target` via clippy. `as_partial_index` adds a `WHERE false` clause. See the comments included in the PR for why that works. The rest of the PR: * Makes sure clippy's `disallowed_` lints are active. `-A clippy::style` disables them. * Adds some missing tests (thanks @jmpesp) * Fixes a `RunnableQuery` that often did not return anything, replacing it with `RunnableQueryNoReturn`. --- clippy.toml | 13 + dev-tools/xtask/src/main.rs | 17 +- nexus/db-queries/src/db/datastore/silo.rs | 2 +- .../db-queries/src/db/datastore/silo_group.rs | 10 +- nexus/db-queries/src/db/datastore/snapshot.rs | 7 +- nexus/db-queries/src/db/mod.rs | 2 + nexus/db-queries/src/db/on_conflict_ext.rs | 329 ++++++++++++++++++ nexus/tests/integration_tests/snapshots.rs | 40 ++- 8 files changed, 404 insertions(+), 16 deletions(-) create mode 100644 clippy.toml create mode 100644 nexus/db-queries/src/db/on_conflict_ext.rs diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 0000000000..ffa3ffac70 --- /dev/null +++ b/clippy.toml @@ -0,0 +1,13 @@ +disallowed-methods = [ + # `ON CONFLICT ... WHERE ... DO UPDATE` is easy to misuse. + # + # If you mean a conditional upsert, import + # diesel::query_dsl::methods::FilterDsl, then use + # `do_update().set(...).filter(...)`. (This is not part of the + # documentation as of Diesel 2.1.4, but it works.) + # + # If you mean to specify a bound on a partial index, use the + # `IncompleteOnConflictExt::as_partial_index` in `nexus-db-queries`. + # See the documentation of that method for more. + "diesel::upsert::DecoratableTarget::filter_target", +] diff --git a/dev-tools/xtask/src/main.rs b/dev-tools/xtask/src/main.rs index 93d91799bc..4ab42736ca 100644 --- a/dev-tools/xtask/src/main.rs +++ b/dev-tools/xtask/src/main.rs @@ -46,6 +46,9 @@ fn cmd_clippy() -> Result<()> { // Make sure we check everything. .arg("--all-targets") .arg("--") + // For a list of lints, see + // https://rust-lang.github.io/rust-clippy/master. + // // We disallow warnings by default. .arg("--deny") .arg("warnings") @@ -53,7 +56,19 @@ fn cmd_clippy() -> Result<()> { // override belongs in src/lib.rs, and it is there, but that doesn't // reliably work due to rust-lang/rust-clippy#6610. .arg("--allow") - .arg("clippy::style"); + .arg("clippy::style") + // But continue to warn on anything in the "disallowed_" namespace. + // (These will be turned into errors by `--deny warnings` above.) + .arg("--warn") + .arg("clippy::disallowed_macros") + .arg("--warn") + .arg("clippy::disallowed_methods") + .arg("--warn") + .arg("clippy::disallowed_names") + .arg("--warn") + .arg("clippy::disallowed_script_idents") + .arg("--warn") + .arg("clippy::disallowed_types"); eprintln!( "running: {:?} {}", diff --git a/nexus/db-queries/src/db/datastore/silo.rs b/nexus/db-queries/src/db/datastore/silo.rs index a88a27872f..6cfda3c093 100644 --- a/nexus/db-queries/src/db/datastore/silo.rs +++ b/nexus/db-queries/src/db/datastore/silo.rs @@ -250,7 +250,7 @@ impl DataStore { .await?; if let Some(query) = silo_admin_group_ensure_query { - query.get_result_async(&conn).await?; + query.execute_async(&conn).await?; } if let Some(queries) = silo_admin_group_role_assignment_queries diff --git a/nexus/db-queries/src/db/datastore/silo_group.rs b/nexus/db-queries/src/db/datastore/silo_group.rs index 29fcb7490b..b8ef759116 100644 --- a/nexus/db-queries/src/db/datastore/silo_group.rs +++ b/nexus/db-queries/src/db/datastore/silo_group.rs @@ -8,13 +8,14 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; -use crate::db::datastore::RunnableQuery; +use crate::db::datastore::RunnableQueryNoReturn; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::model::SiloGroup; use crate::db::model::SiloGroupMembership; use crate::db::pagination::paginated; +use crate::db::IncompleteOnConflictExt; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; @@ -34,16 +35,15 @@ impl DataStore { opctx: &OpContext, authz_silo: &authz::Silo, silo_group: SiloGroup, - ) -> Result, Error> { + ) -> Result { opctx.authorize(authz::Action::CreateChild, authz_silo).await?; use db::schema::silo_group::dsl; Ok(diesel::insert_into(dsl::silo_group) .values(silo_group) .on_conflict((dsl::silo_id, dsl::external_id)) - .filter_target(dsl::time_deleted.is_null()) - .do_nothing() - .returning(SiloGroup::as_returning())) + .as_partial_index() + .do_nothing()) } pub async fn silo_group_ensure( diff --git a/nexus/db-queries/src/db/datastore/snapshot.rs b/nexus/db-queries/src/db/datastore/snapshot.rs index 7a9eb8d2bc..7a3f84bbb2 100644 --- a/nexus/db-queries/src/db/datastore/snapshot.rs +++ b/nexus/db-queries/src/db/datastore/snapshot.rs @@ -20,6 +20,7 @@ use crate::db::model::SnapshotState; use crate::db::pagination::paginated; use crate::db::update_and_check::UpdateAndCheck; use crate::db::update_and_check::UpdateStatus; +use crate::db::IncompleteOnConflictExt; use crate::transaction_retry::OptionalError; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; @@ -69,7 +70,7 @@ impl DataStore { // As written below, // // .on_conflict((dsl::project_id, dsl::name)) - // .filter_target(dsl::time_deleted.is_null()) + // .as_partial_index() // .do_update() // .set(dsl::time_modified.eq(dsl::time_modified)) // @@ -79,7 +80,7 @@ impl DataStore { // (marked with >>): // // .on_conflict((dsl::project_id, dsl::name)) - // .filter_target(dsl::time_deleted.is_null()) + // .as_partial_index() // .do_update() // .set(dsl::time_modified.eq(dsl::time_modified)) // >> .filter(dsl::id.eq(snapshot.id())) @@ -118,7 +119,7 @@ impl DataStore { diesel::insert_into(dsl::snapshot) .values(snapshot) .on_conflict((dsl::project_id, dsl::name)) - .filter_target(dsl::time_deleted.is_null()) + .as_partial_index() .do_update() .set(dsl::time_modified.eq(dsl::time_modified)), ) diff --git a/nexus/db-queries/src/db/mod.rs b/nexus/db-queries/src/db/mod.rs index e21ba2e3a8..184ebe5f8a 100644 --- a/nexus/db-queries/src/db/mod.rs +++ b/nexus/db-queries/src/db/mod.rs @@ -21,6 +21,7 @@ pub(crate) mod error; mod explain; pub mod fixed_data; pub mod lookup; +mod on_conflict_ext; // Public for doctests. pub mod pagination; mod pool; @@ -44,6 +45,7 @@ pub use nexus_db_model::schema; pub use crate::db::error::TransactionError; pub use config::Config; pub use datastore::DataStore; +pub use on_conflict_ext::IncompleteOnConflictExt; pub use pool::{DbConnection, Pool}; pub use saga_recovery::{recover, CompletionTask, RecoveryTask}; pub use saga_types::SecId; diff --git a/nexus/db-queries/src/db/on_conflict_ext.rs b/nexus/db-queries/src/db/on_conflict_ext.rs new file mode 100644 index 0000000000..25e05d1b0f --- /dev/null +++ b/nexus/db-queries/src/db/on_conflict_ext.rs @@ -0,0 +1,329 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use diesel::dsl::sql; +use diesel::expression::SqlLiteral; +use diesel::prelude::*; +use diesel::sql_types::Bool; +use diesel::upsert::IncompleteOnConflict; + +/// An extension trait for incomplete `INSERT INTO ... ON CONFLICT` +/// expressions. +/// +/// In Diesel, these are represented by [`IncompleteOnConflict`]. This trait is +/// implemented for expressions of that type. +pub trait IncompleteOnConflictExt { + /// The output of [`IncompleteOnConflictExt::as_partial_index`]. + type AsPartialIndexOutput; + + /// Adds a `WHERE false` clause to the `ON CONFLICT` expression, which + /// informs the database that the set of columns is a partial index. + /// + /// This is a replacement for [`DecoratableTarget::filter_target`] which is + /// easier to use, and significantly harder to misuse. In Omicron, we have + /// implemented a general ban on `filter_target` because it's so easy to + /// silently introduce bugs with it. + /// + /// # Background + /// + /// ## 1. Partial indexes + /// + /// Most users of databases are aware of *unique indexes*: a set of columns + /// that uniquely identifies a row in a given table. For example, the + /// primary key of a table is always a unique index. + /// + /// Databases like CockroachDB also support *[partial indexes]*: a unique + /// index formed only on the set of columns that meet a given condition. In + /// Omicron, we have several partial indexes of the form (as described in + /// `dbinit.sql`): + /// + /// ```sql + /// CREATE UNIQUE INDEX IF NOT EXISTS lookup_silo_group_by_silo ON omicron.public.silo_group ( + /// silo_id, + /// external_id + /// ) WHERE + /// time_deleted IS NULL; + /// ``` + /// + /// This index means that in the `omicron.public.silo_group` table, each + /// pair of `silo_id` and `external_id` must be unique. However, the + /// uniqueness is only enforced for rows where `time_deleted` is `NULL`. + /// (Logically, this makes sense: rows that have been soft-deleted should + /// not have an influence on uniqueness constraints.) + /// + /// ## 2. `INSERT INTO ... ON CONFLICT` + /// + /// In general, inserting a new row into an SQL table looks like: + /// + /// ```sql + /// INSERT INTO table_name (column1, column2, ...) VALUES (value1, value2, ...); + /// ``` + /// + /// By default, if you try to insert a row that would violate a unique + /// constraint, the database will raise an error. However, you can + /// customize this behavior with [the `ON CONFLICT` clause]. + /// + /// For example, for a table with five columns `pkey` (primary key), + /// `external_id` (a UUID), `data`, `time_created`, and `time_deleted`, you + /// can write: + /// + /// ```sql + /// INSERT INTO my_table (pkey, external_id, data, time_created, time_deleted) + /// VALUES (1, '70b10c38-36aa-4be0-8ec5-e7923438b3b8', 'foo', now(), NULL) + /// ON CONFLICT (pkey) + /// DO UPDATE SET data = 'foo'; + /// ``` + /// + /// In this case: + /// + /// * If the row doesn't exist, a new row is inserted with the data, and + /// `time_created` set to the current time. + /// * If the row does exist, the `data` column is updated to `'foo'` but + /// `time_created` is not changed. + /// + /// ## 3. Partial indexes and `ON CONFLICT` + /// + /// Now consider what happens if there's a partial index on `my_table`, + /// ensuring that `external_id` is unique if the row hasn't been + /// soft-deleted: + /// + /// ```sql + /// CREATE UNIQUE INDEX IF NOT EXISTS my_table_external_id + /// ON my_table (external_id) + /// WHERE time_deleted IS NULL; + /// ``` + /// + /// You can now try to write a query which takes into account the conflict + /// on `external_id`: + /// + /// ```sql + /// INSERT INTO my_table (pkey, external_id, data, time_created, time_deleted) + /// VALUES (1, '70b10c38-36aa-4be0-8ec5-e7923438b3b8', 'foo', now(), NULL) + /// ON CONFLICT (external_id) + /// DO UPDATE SET data = 'foo'; + /// ``` + /// + /// But this query does not work, and Cockroach errors out with: + /// + /// ```text + /// ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification + /// ``` + /// + /// The reason for that is that simply specifying the columns is not + /// enough. Because the index is _partial_, Cockroach requires that the + /// `WHERE` clause of the index be specified as well. + /// + /// ```sql + /// INSERT INTO my_table (pkey, external_id, data, time_created, time_deleted) + /// VALUES (1, '70b10c38-36aa-4be0-8ec5-e7923438b3b8', 'foo', now(), NULL) + /// ON CONFLICT (external_id) + /// WHERE time_deleted IS NULL + /// DO UPDATE SET data = 'foo'; + /// ``` + /// + /// Importantly, the `WHERE` clause of the index doesn't have to exactly + /// _match_ the condition on the partial index; it can be anything that + /// _logically implies_ the condition on the partial index. With something + /// like `time_deleted IS NULL` the value of that is not exactly clear, but + /// you can imagine a partial index on something like `col >= 10`, and + /// write `ON CONFLICT (...) WHERE col >= 20`. This is allowed because `col + /// >= 20` implies `col >= 10`. (But `WHERE col >= 5` is not allowed.) + /// + /// ## 4. A similar syntax with a different meaning + /// + /// There's a similar syntax which does something completely different: `ON + /// CONFLICT ... DO UPDATE ... WHERE`. This syntax does a conditional + /// update, whether or not the conflict is in a partial index. For example, + /// `ON CONFLICT (pkey) DO UPDATE SET data = 'foo' WHERE data IS NULL` will + /// update `data` to `'foo'` if the row already exists, but only if `data` + /// is `NULL`. + /// + /// Now, what happens if you accidentally mix the two up, and write + /// something like: `ON CONFLICT (pkey) WHERE data IS NULL DO UPDATE SET + /// data = 'foo'`? You might imagine that this query is rejected, for two + /// reasons: + /// + /// 1. `pkey` is a full index, so you might expect a blanket ban on `WHERE` + /// clauses. + /// 2. `data IS NULL` is irrelevant to `pkey`. + /// + /// But in reality, this query is not only accepted---the `WHERE` clause is + /// **completely, silently ignored**. `data` will be set to `'foo'` no + /// matter if it is `NULL` or not. (This is true as of both CockroachDB + /// 23.2.0 and PostgreSQL 16.2). + /// + /// This seems pretty bad! Why does it happen? + /// + /// ## 5. A digression into logical systems + /// + /// The reason for that ties back into the idea that in `ON CONFLICT ... + /// WHERE`, the `WHERE` clause can be anything that _logically implies_ the + /// index. + /// + /// Let's take a step back and think about what logical implication (often + /// represented by `->`) means. In classical logic, `p -> q` is interpreted + /// as *[material implication]*, and is logically equivalent to `!p || q`. + /// + /// Consider what happens if `q` is a _tautology_, i.e. it is always true. + /// In that case, no matter what `p` is, `p -> q` is always true. + /// + /// Applying this to our SQL example: Since `pkey` is a full index, it is + /// equivalent to a partial index with `WHERE true`. In other words, `q` is + /// always true. This means that `p` (the `WHERE` clause) can be anything. + /// As a result, Cockroach completely ignores the `WHERE` clause. + /// + /// **This seems really bad!** Our intuitive understanding of "p implies q" + /// typically has some kind of notion of _relevance_ attached to it: we + /// expect that `p` being true has to have some bearing on q. But material + /// implication does not capture this idea at all. (Put a pin in this, + /// we're going to come back to it shortly.) + /// + /// ## 6. Towards addressing this + /// + /// To summarize where we're at: + /// + /// * `ON CONFLICT ... WHERE ... DO UPDATE` is easy to mix up with `ON + /// CONFLICT ... DO UPDATE ... WHERE`. + /// * `ON CONFLICT ... WHERE ... DO UPDATE` on a full index silently + /// accepts _anything_ in the `WHERE` clause. + /// * `ON CONFLICT ... WHERE ... DO UPDATE` is *required* for a partial + /// index. + /// + /// How can we prevent misuse while still supporting legitimate uses? + /// + /// One option is to look at [Diesel](https://diesel.rs/), our Rust ORM and + /// query builder. In Diesel, the usual way to add a `WHERE` clause is via + /// [the `filter` method]. But the special case of `ON CONFLICT ... WHERE` + /// is expressed via a separate [`filter_target` method]. + /// + /// Our first inclination might be to just ban `filter_target` entirely, + /// using Clippy's [`disallowed_methods`] lint. This would work great if + /// all we had was full indexes! But we do necessarily have partial + /// indexes, and we must do something about them. + /// + /// That's where `as_partial_index` comes in. + /// + /// ## 7. Back to logical systems + /// + /// How does `as_partial_index` address this problem? Let's go back to + /// material implication: `p -> q` is logically equivalent to `!p || q`. + /// + /// So far, we've discussed the case where `q` is a tautology. But let's + /// look at the other side of it: what if `p` is a _contradiction_? In + /// other words, what if `p` is always false? In that case, it is clear + /// that `p -> q` is always true, no matter what `q` is. + /// + /// It's worth sitting with this for a moment. What this means is that you + /// can make statements that are genuinely nonsense, like "if 2 + 2 = 5, + /// then the sky is green", but are valid in classical logic. + /// + /// The fact that contradictions are "poisonous" to classical logic is a + /// well-known problem, called the [principle of explosion]. This is not + /// new---Aristotle was likely aware of it, and it was first described + /// fully by William of Soissons in the 12th century. Intuitively, **this + /// is even worse** than the earlier case where `q` is a tautology: at + /// least there, `q` is actually true so it doesn't feel too bad that `p` + /// doesn't matter. + /// + /// ## 8. Putting it all together + /// + /// The good news about the principle of explosion, though, is that this + /// gives us a path out. Remember that `ON CONFLICT ... WHERE ...` requires + /// that the bit after `WHERE` logically imply the partial index. Since a + /// contradiction implies anything, a clause like `WHERE 1 = 2`, or `WHERE + /// 'x' = 'y'`, works. + /// + /// In SQL, the simplest contradiction is simply `false`. So we use `WHERE + /// false` here. The effect of this is that it causes the database to + /// automatically infer the right partial index constraints. + /// + /// So the solution we came up in the end is to: + /// + /// * Ban `filter_target` via Clippy's `disallowed_methods` lint, to avoid + /// misuse in cases where there isn't a partial index. + /// * Define `as_partial_index` as a method that adds `WHERE false` to the + /// `ON CONFLICT` expression. + /// * For the cases where there really is a partial index, use + /// `as_partial_index`. + /// + /// `as_partial_index` is also accepted in cases where there's a full + /// index. That isn't a huge problem because a stray `WHERE false` is + /// pretty harmless. The goal is to prevent misuse of `filter_target`, and + /// that part is still upheld. + /// + /// --- + /// + /// ## Further notes + /// + /// The [paradoxes of material implication] are one of the central topics + /// in philosophical logic. Many non-classical logical systems have been + /// proposed to address this, including [paraconsistent logics] and + /// [relevance logics]. The common thread among these systems is that they + /// define implication differently from classical logic, in a way that + /// tries to match our common understanding better. + /// + /// Above, we focused on classical logic. SQL uses the non-classical + /// [three-valued logic] called **K3**. This logic defines implication in + /// the same way, and with the same issues, as material implication in + /// classical logic. + /// + /// With Cockroach 23.2.0 and earlier versions, for a conflict on a full + /// index, you can even specify non-existent columns: `ON CONFLICT ... + /// WHERE non_existent = 1`. This is rejected by Postgres, and has been + /// acknowledged as a bug in Cockroach. + /// + /// There is also an `ON CONFLICT ON CONSTRAINT` syntax which allows users + /// to specify the name of the constraint. That syntax only works for full + /// (non-partial) indexes. + /// + /// Other references: + /// + /// * [Omicron issue + /// #5047](https://github.com/oxidecomputer/omicron/issues/5047) + /// * [CockroachDB issue + /// #119197](https://github.com/cockroachdb/cockroach/issues/119117) + /// + /// [partial indexes]: https://www.cockroachlabs.com/docs/stable/partial-indexes.html + /// [the `ON_CONFLICT` clause]: + /// https://www.cockroachlabs.com/docs/stable/insert#on-conflict-clause + /// [material implication]: + /// https://en.wikipedia.org/wiki/Material_conditional + /// [Diesel]: https://diesel.rs/ + /// [the `filter` method]: + /// https://docs.rs/diesel/2.1.4/diesel/query_dsl/methods/trait.FilterDsl.html#tymethod.filter + /// [`filter_target` method]: + /// https://docs.rs/diesel/2.1.4/diesel/query_dsl/trait.FilterTarget.html#tymethod.filter_targehttps://docs.rs/diesel/2.1.4/diesel/upsert/trait.DecoratableTarget.html#tymethod.filter_targett + /// [`disallowed_methods`]: + /// https://rust-lang.github.io/rust-clippy/master/index.html#disallowed_methods + /// [principle of explosion]: + /// https://en.wikipedia.org/wiki/Principle_of_explosion + /// [paradoxes of material implication]: + /// https://en.wikipedia.org/wiki/Paradoxes_of_material_implication + /// [paraconsistent logics]: + /// https://en.wikipedia.org/wiki/Paraconsistent_logic + /// [relevance logics]: https://en.wikipedia.org/wiki/Relevance_logic + /// [three-valued logic]: https://en.wikipedia.org/wiki/Three-valued_logic + fn as_partial_index(self) -> Self::AsPartialIndexOutput; +} + +impl IncompleteOnConflictExt + for IncompleteOnConflict +where + Target: DecoratableTarget>, +{ + type AsPartialIndexOutput = + IncompleteOnConflict>; + + fn as_partial_index(self) -> Self::AsPartialIndexOutput { + // Bypass this clippy warning in this one place only. + #[allow(clippy::disallowed_methods)] + { + // For why "false", see the doc comment above. + self.filter_target(sql::("false")) + } + } +} + +type DecoratedAsPartialIndex = + >>::FilterOutput; diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index 3731a80668..63ea81f13f 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -904,13 +904,13 @@ async fn test_create_snapshot_record_idempotent( let project_id = create_project_and_pool(&client).await; let disk_id = Uuid::new_v4(); + let snapshot_name = + external::Name::try_from("snapshot".to_string()).unwrap(); let snapshot = db::model::Snapshot { identity: db::model::SnapshotIdentity { id: Uuid::new_v4(), - name: external::Name::try_from("snapshot".to_string()) - .unwrap() - .into(), + name: snapshot_name.clone().into(), description: "snapshot".into(), time_created: Utc::now(), @@ -961,9 +961,7 @@ async fn test_create_snapshot_record_idempotent( let dupe_snapshot = db::model::Snapshot { identity: db::model::SnapshotIdentity { id: Uuid::new_v4(), - name: external::Name::try_from("snapshot".to_string()) - .unwrap() - .into(), + name: snapshot_name.clone().into(), description: "snapshot".into(), time_created: Utc::now(), @@ -1057,6 +1055,36 @@ async fn test_create_snapshot_record_idempotent( ) .await .unwrap(); + + // Test that a new snapshot can be added with the same name as a deleted + // one. + + let new_snapshot = db::model::Snapshot { + identity: db::model::SnapshotIdentity { + id: Uuid::new_v4(), + name: snapshot_name.into(), + description: "snapshot".into(), + + time_created: Utc::now(), + time_modified: Utc::now(), + time_deleted: None, + }, + + project_id, + disk_id, + volume_id: Uuid::new_v4(), + destination_volume_id: Uuid::new_v4(), + + gen: db::model::Generation::new(), + state: db::model::SnapshotState::Creating, + block_size: db::model::BlockSize::Traditional, + size: external::ByteCount::try_from(1024u32).unwrap().into(), + }; + + let _ = datastore + .project_ensure_snapshot(&opctx, &authz_project, new_snapshot) + .await + .unwrap(); } #[nexus_test] From 00bc421e6c8449eda79a707eea860e8cc63339ca Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Thu, 15 Feb 2024 06:29:15 +0000 Subject: [PATCH 09/12] Update Rust crate regress to 0.8.0 (#4976) --- Cargo.lock | 40 ++++++++++++++++++++++++--------------- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 4 ++-- 3 files changed, 28 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c3ac516f82..0359b81dfc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -655,7 +655,7 @@ dependencies = [ "omicron-common", "omicron-workspace-hack", "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", - "regress", + "regress 0.8.0", "reqwest", "schemars", "serde", @@ -1946,7 +1946,7 @@ dependencies = [ "progenitor-client 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", "quote", "rand 0.8.5", - "regress", + "regress 0.8.0", "reqwest", "rustfmt-wrapper", "schemars", @@ -2862,9 +2862,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ "ahash", "allocator-api2", @@ -3368,7 +3368,7 @@ dependencies = [ "opte-ioctl", "oxide-vpc", "oxlog", - "regress", + "regress 0.8.0", "schemars", "serde", "serde_json", @@ -3410,7 +3410,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" dependencies = [ "equivalent", - "hashbrown 0.14.2", + "hashbrown 0.14.3", "serde", ] @@ -3504,7 +3504,7 @@ dependencies = [ "installinator-common", "omicron-workspace-hack", "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", - "regress", + "regress 0.8.0", "reqwest", "schemars", "serde", @@ -3979,7 +3979,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7" dependencies = [ - "hashbrown 0.14.2", + "hashbrown 0.14.3", ] [[package]] @@ -4302,7 +4302,7 @@ dependencies = [ "omicron-passwords", "omicron-workspace-hack", "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", - "regress", + "regress 0.8.0", "reqwest", "schemars", "serde", @@ -4872,7 +4872,7 @@ dependencies = [ "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", "proptest", "rand 0.8.5", - "regress", + "regress 0.8.0", "reqwest", "schemars", "semver 1.0.21", @@ -5367,7 +5367,7 @@ dependencies = [ "getrandom 0.2.10", "group", "hashbrown 0.13.2", - "hashbrown 0.14.2", + "hashbrown 0.14.3", "hex", "hmac", "hyper 0.14.27", @@ -5649,7 +5649,7 @@ dependencies = [ "omicron-workspace-hack", "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", "rand 0.8.5", - "regress", + "regress 0.8.0", "reqwest", "serde", "serde_json", @@ -7063,6 +7063,16 @@ dependencies = [ "memchr", ] +[[package]] +name = "regress" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f5f39ba4513916c1b2657b72af6ec671f091cd637992f58d0ede5cae4e5dea0" +dependencies = [ + "hashbrown 0.14.3", + "memchr", +] + [[package]] name = "relative-path" version = "1.9.0" @@ -8138,7 +8148,7 @@ dependencies = [ "omicron-common", "omicron-workspace-hack", "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", - "regress", + "regress 0.8.0", "reqwest", "schemars", "serde", @@ -9712,7 +9722,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "regress", + "regress 0.7.1", "schemars", "serde_json", "syn 2.0.48", @@ -10447,7 +10457,7 @@ dependencies = [ "ipnetwork", "omicron-workspace-hack", "progenitor 0.5.0 (git+https://github.com/oxidecomputer/progenitor?branch=main)", - "regress", + "regress 0.8.0", "reqwest", "schemars", "serde", diff --git a/Cargo.toml b/Cargo.toml index 66fbbc018d..a1307a9a20 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -321,7 +321,7 @@ rcgen = "0.12.1" reedline = "0.28.0" ref-cast = "1.0" regex = "1.10.3" -regress = "0.7.1" +regress = "0.8.0" reqwest = { version = "0.11", default-features = false } ring = "0.17.7" rpassword = "7.3.1" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 686cc3f218..5786c52b43 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -54,7 +54,7 @@ gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway- generic-array = { version = "0.14.7", default-features = false, features = ["more_lengths", "zeroize"] } getrandom = { version = "0.2.10", default-features = false, features = ["js", "rdrand", "std"] } group = { version = "0.13.0", default-features = false, features = ["alloc"] } -hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14.2", features = ["raw"] } +hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14.3", features = ["raw"] } hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13.2" } hex = { version = "0.4.3", features = ["serde"] } hmac = { version = "0.12.1", default-features = false, features = ["reset"] } @@ -162,7 +162,7 @@ gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway- generic-array = { version = "0.14.7", default-features = false, features = ["more_lengths", "zeroize"] } getrandom = { version = "0.2.10", default-features = false, features = ["js", "rdrand", "std"] } group = { version = "0.13.0", default-features = false, features = ["alloc"] } -hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14.2", features = ["raw"] } +hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14.3", features = ["raw"] } hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13.2" } hex = { version = "0.4.3", features = ["serde"] } hmac = { version = "0.12.1", default-features = false, features = ["reset"] } From fcc74ba37fd2f59c9ce6c18570cfadd6ebf691f8 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Thu, 15 Feb 2024 07:06:53 +0000 Subject: [PATCH 10/12] chore(deps): update rust crate toml to 0.8.10 (#4994) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 54 +++++++++++++++++++++++---------------- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 6 ++--- 3 files changed, 35 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0359b81dfc..4cb2106f9e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -815,7 +815,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "922d6ea3081d68b9e3e09557204bff47f9b5406a4a304dc917e187f8cafd582b" dependencies = [ "serde", - "toml 0.8.9", + "toml 0.8.10", ] [[package]] @@ -1577,7 +1577,7 @@ dependencies = [ "slog", "thiserror", "tokio", - "toml 0.8.9", + "toml 0.8.10", ] [[package]] @@ -1873,7 +1873,7 @@ dependencies = [ "tempfile", "thiserror", "tokio", - "toml 0.8.9", + "toml 0.8.10", "trust-dns-client", "trust-dns-proto", "trust-dns-resolver", @@ -1953,7 +1953,7 @@ dependencies = [ "serde", "serde_json", "slog", - "toml 0.8.9", + "toml 0.8.10", "uuid", ] @@ -1996,7 +1996,7 @@ dependencies = [ "slog-term", "tokio", "tokio-rustls 0.25.0", - "toml 0.8.9", + "toml 0.8.10", "usdt 0.3.5", "uuid", "version_check", @@ -2168,7 +2168,7 @@ dependencies = [ "serde", "serde_json", "tokio", - "toml 0.8.9", + "toml 0.8.10", "trust-dns-resolver", "uuid", ] @@ -3376,7 +3376,7 @@ dependencies = [ "smf", "thiserror", "tokio", - "toml 0.8.9", + "toml 0.8.10", "uuid", "zone", ] @@ -4108,7 +4108,7 @@ dependencies = [ "slog", "thiserror", "tokio", - "toml 0.8.9", + "toml 0.8.10", ] [[package]] @@ -4887,7 +4887,7 @@ dependencies = [ "thiserror", "tokio", "tokio-postgres", - "toml 0.8.9", + "toml 0.8.10", "uuid", ] @@ -4921,7 +4921,7 @@ dependencies = [ "subprocess", "tokio", "tokio-postgres", - "toml 0.8.9", + "toml 0.8.10", ] [[package]] @@ -4963,7 +4963,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-tungstenite", - "toml 0.8.9", + "toml 0.8.10", "uuid", ] @@ -5168,7 +5168,7 @@ dependencies = [ "tar", "thiserror", "tokio", - "toml 0.8.9", + "toml 0.8.10", "walkdir", ] @@ -5279,7 +5279,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "toml 0.8.9", + "toml 0.8.10", "usdt 0.5.0", "uuid", "zeroize", @@ -5426,7 +5426,6 @@ dependencies = [ "toml 0.7.8", "toml_datetime", "toml_edit 0.19.15", - "toml_edit 0.21.1", "tracing", "trust-dns-proto", "unicode-bidi", @@ -5746,7 +5745,7 @@ dependencies = [ "subprocess", "thiserror", "tokio", - "toml 0.8.9", + "toml 0.8.10", "uuid", ] @@ -8455,7 +8454,7 @@ dependencies = [ "sprockets-rot", "thiserror", "tokio", - "toml 0.8.9", + "toml 0.8.10", ] [[package]] @@ -9333,14 +9332,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6a4b9e8023eb94392d3dca65d717c53abc5dad49c07cb65bb8fcd87115fa325" +checksum = "9a9aad4a3066010876e8dcf5a8a06e70a558751117a145c6ce2b82c2e2054290" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.21.1", + "toml_edit 0.22.4", ] [[package]] @@ -9370,6 +9369,17 @@ name = "toml_edit" version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +dependencies = [ + "indexmap 2.2.2", + "toml_datetime", + "winnow", +] + +[[package]] +name = "toml_edit" +version = "0.22.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c9ffdf896f8daaabf9b66ba8e77ea1ed5ed0f72821b398aba62352e95062951" dependencies = [ "indexmap 2.2.2", "serde", @@ -9657,7 +9667,7 @@ dependencies = [ "slog", "tar", "tokio", - "toml 0.8.9", + "toml 0.8.10", "tough", "url", "zip", @@ -10327,7 +10337,7 @@ dependencies = [ "textwrap 0.16.0", "tokio", "tokio-util", - "toml 0.8.9", + "toml 0.8.10", "toml_edit 0.21.1", "tui-tree-widget", "unicode-width", @@ -10435,7 +10445,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "toml 0.8.9", + "toml 0.8.10", "tough", "trust-dns-resolver", "tufaceous", diff --git a/Cargo.toml b/Cargo.toml index a1307a9a20..a3ec148c7e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -391,7 +391,7 @@ tokio-postgres = { version = "0.7", features = [ "with-chrono-0_4", "with-uuid-1 tokio-stream = "0.1.14" tokio-tungstenite = "0.20" tokio-util = { version = "0.7.10", features = ["io", "io-util"] } -toml = "0.8.9" +toml = "0.8.10" toml_edit = "0.21.1" tough = { version = "0.16.0", features = [ "http" ] } trust-dns-client = "0.22" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 5786c52b43..c5c2a4940a 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -108,7 +108,6 @@ tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serd tokio-stream = { version = "0.1.14", features = ["net"] } tokio-util = { version = "0.7.10", features = ["codec", "io-util"] } toml = { version = "0.7.8" } -toml_edit-647d43efb71741da = { package = "toml_edit", version = "0.21.1", features = ["serde"] } tracing = { version = "0.1.37", features = ["log"] } trust-dns-proto = { version = "0.22.0" } unicode-bidi = { version = "0.3.13" } @@ -217,7 +216,6 @@ tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serd tokio-stream = { version = "0.1.14", features = ["net"] } tokio-util = { version = "0.7.10", features = ["codec", "io-util"] } toml = { version = "0.7.8" } -toml_edit-647d43efb71741da = { package = "toml_edit", version = "0.21.1", features = ["serde"] } tracing = { version = "0.1.37", features = ["log"] } trust-dns-proto = { version = "0.22.0" } unicode-bidi = { version = "0.3.13" } @@ -280,7 +278,7 @@ mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } -toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] } +toml_edit = { version = "0.19.15", features = ["serde"] } [target.x86_64-unknown-illumos.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } @@ -290,6 +288,6 @@ mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.19.0" } rustix = { version = "0.38.31", features = ["fs", "termios"] } toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } -toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] } +toml_edit = { version = "0.19.15", features = ["serde"] } ### END HAKARI SECTION From f6df7325d0ccf62896e05d0782e188ab3ac0408e Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Thu, 15 Feb 2024 09:27:40 +0000 Subject: [PATCH 11/12] chore(deps): update rust crate indicatif to 0.17.8 (#5074) --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4cb2106f9e..b4c65564f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3416,9 +3416,9 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb28741c9db9a713d93deb3bb9515c20788cef5815265bee4980e87bde7e0f25" +checksum = "763a5a8f45087d6bcea4222e7b72c291a054edf80e4ef6efd2a4979878c7bea3" dependencies = [ "console", "instant", diff --git a/Cargo.toml b/Cargo.toml index a3ec148c7e..43fe4d8d8a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -228,7 +228,7 @@ hyper-rustls = "0.26.0" hyper-staticfile = "0.9.5" illumos-utils = { path = "illumos-utils" } indexmap = "2.2.2" -indicatif = { version = "0.17.7", features = ["rayon"] } +indicatif = { version = "0.17.8", features = ["rayon"] } installinator = { path = "installinator" } installinator-artifactd = { path = "installinator-artifactd" } installinator-artifact-client = { path = "clients/installinator-artifact-client" } From ae39f2633e177bae3c53bddf9e9fc196facdcb61 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Thu, 15 Feb 2024 11:22:56 +0000 Subject: [PATCH 12/12] chore(deps): update rust crate indexmap to 2.2.3 (#5073) --- Cargo.lock | 38 +++++++++++++++++++------------------- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 4 ++-- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b4c65564f5..2f13d1e990 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1975,7 +1975,7 @@ dependencies = [ "hostname", "http 0.2.11", "hyper 0.14.27", - "indexmap 2.2.2", + "indexmap 2.2.3", "multer", "openapiv3", "paste", @@ -2778,7 +2778,7 @@ dependencies = [ "debug-ignore", "fixedbitset", "guppy-workspace-hack", - "indexmap 2.2.2", + "indexmap 2.2.3", "itertools 0.12.1", "nested", "once_cell", @@ -2810,7 +2810,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.11", - "indexmap 2.2.2", + "indexmap 2.2.3", "slab", "tokio", "tokio-util", @@ -3405,9 +3405,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" +checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -5371,7 +5371,7 @@ dependencies = [ "hex", "hmac", "hyper 0.14.27", - "indexmap 2.2.2", + "indexmap 2.2.3", "inout", "ipnetwork", "itertools 0.10.5", @@ -5495,7 +5495,7 @@ version = "0.4.0" source = "git+https://github.com/oxidecomputer/openapi-lint?branch=main#ef442ee4343e97b6d9c217d3e7533962fe7d7236" dependencies = [ "heck 0.4.1", - "indexmap 2.2.2", + "indexmap 2.2.3", "lazy_static", "openapiv3", "regex", @@ -5507,7 +5507,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc02deea53ffe807708244e5914f6b099ad7015a207ee24317c22112e17d9c5c" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.3", "serde", "serde_json", ] @@ -5764,7 +5764,7 @@ dependencies = [ "expectorate", "futures", "highway", - "indexmap 2.2.2", + "indexmap 2.2.3", "itertools 0.12.1", "omicron-common", "omicron-test-utils", @@ -6139,7 +6139,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.2.2", + "indexmap 2.2.3", "serde", "serde_derive", ] @@ -6556,7 +6556,7 @@ dependencies = [ "getopts", "heck 0.4.1", "http 0.2.11", - "indexmap 2.2.2", + "indexmap 2.2.3", "openapiv3", "proc-macro2", "quote", @@ -6578,7 +6578,7 @@ dependencies = [ "getopts", "heck 0.4.1", "http 0.2.11", - "indexmap 2.2.2", + "indexmap 2.2.3", "openapiv3", "proc-macro2", "quote", @@ -7951,7 +7951,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.2.2", + "indexmap 2.2.3", "serde", "serde_json", "serde_with_macros", @@ -7976,7 +7976,7 @@ version = "0.9.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.3", "itoa", "ryu", "serde", @@ -9357,7 +9357,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.3", "serde", "serde_spanned", "toml_datetime", @@ -9370,7 +9370,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.3", "toml_datetime", "winnow", ] @@ -9381,7 +9381,7 @@ version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c9ffdf896f8daaabf9b66ba8e77ea1ed5ed0f72821b398aba62352e95062951" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.3", "serde", "serde_spanned", "toml_datetime", @@ -9900,7 +9900,7 @@ dependencies = [ "derive-where", "either", "futures", - "indexmap 2.2.2", + "indexmap 2.2.3", "indicatif", "libsw", "linear-map", @@ -10313,7 +10313,7 @@ dependencies = [ "crossterm", "futures", "humantime", - "indexmap 2.2.2", + "indexmap 2.2.3", "indicatif", "itertools 0.12.1", "omicron-common", diff --git a/Cargo.toml b/Cargo.toml index 43fe4d8d8a..3dae9211e3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -227,7 +227,7 @@ hyper = "0.14" hyper-rustls = "0.26.0" hyper-staticfile = "0.9.5" illumos-utils = { path = "illumos-utils" } -indexmap = "2.2.2" +indexmap = "2.2.3" indicatif = { version = "0.17.8", features = ["rayon"] } installinator = { path = "installinator" } installinator-artifactd = { path = "installinator-artifactd" } diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index c5c2a4940a..a52f63fec1 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -59,7 +59,7 @@ hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13.2" } hex = { version = "0.4.3", features = ["serde"] } hmac = { version = "0.12.1", default-features = false, features = ["reset"] } hyper = { version = "0.14.27", features = ["full"] } -indexmap = { version = "2.2.2", features = ["serde"] } +indexmap = { version = "2.2.3", features = ["serde"] } inout = { version = "0.1.3", default-features = false, features = ["std"] } ipnetwork = { version = "0.20.0", features = ["schemars"] } itertools = { version = "0.10.5" } @@ -166,7 +166,7 @@ hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13.2" } hex = { version = "0.4.3", features = ["serde"] } hmac = { version = "0.12.1", default-features = false, features = ["reset"] } hyper = { version = "0.14.27", features = ["full"] } -indexmap = { version = "2.2.2", features = ["serde"] } +indexmap = { version = "2.2.3", features = ["serde"] } inout = { version = "0.1.3", default-features = false, features = ["std"] } ipnetwork = { version = "0.20.0", features = ["schemars"] } itertools = { version = "0.10.5" }