diff --git a/Cargo.lock b/Cargo.lock index 84669a13e7..075b5697c1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1929,6 +1929,7 @@ dependencies = [ "camino-tempfile", "chrono", "clap", + "dns-server-api", "dns-service-client", "dropshot", "expectorate", @@ -1958,6 +1959,17 @@ dependencies = [ "uuid", ] +[[package]] +name = "dns-server-api" +version = "0.1.0" +dependencies = [ + "chrono", + "dropshot", + "omicron-workspace-hack", + "schemars", + "serde", +] + [[package]] name = "dns-service-client" version = "0.1.0" @@ -3606,7 +3618,7 @@ dependencies = [ "hex-literal", "http 0.2.12", "illumos-utils", - "installinator-artifact-client", + "installinator-client", "installinator-common", "ipcc", "itertools 0.12.1", @@ -3638,43 +3650,35 @@ dependencies = [ ] [[package]] -name = "installinator-artifact-client" +name = "installinator-api" version = "0.1.0" dependencies = [ + "anyhow", + "dropshot", + "hyper 0.14.28", "installinator-common", + "omicron-common", "omicron-workspace-hack", - "progenitor", - "regress", - "reqwest", "schemars", "serde", - "serde_json", "slog", - "update-engine", "uuid", ] [[package]] -name = "installinator-artifactd" +name = "installinator-client" version = "0.1.0" dependencies = [ - "anyhow", - "async-trait", - "clap", - "dropshot", - "expectorate", - "hyper 0.14.28", "installinator-common", - "omicron-common", - "omicron-test-utils", "omicron-workspace-hack", - "openapi-lint", - "openapiv3", + "progenitor", + "regress", + "reqwest", "schemars", "serde", "serde_json", "slog", - "subprocess", + "update-engine", "uuid", ] @@ -6106,9 +6110,11 @@ dependencies = [ "atomicwrites", "camino", "clap", + "dns-server-api", "dropshot", "fs-err", "indent_write", + "installinator-api", "nexus-internal-api", "omicron-workspace-hack", "openapi-lint", @@ -6117,6 +6123,7 @@ dependencies = [ "serde_json", "similar", "supports-color", + "wicketd-api", ] [[package]] @@ -11107,6 +11114,8 @@ name = "wicket-common" version = "0.1.0" dependencies = [ "anyhow", + "dpd-client", + "dropshot", "gateway-client", "maplit", "omicron-common", @@ -11118,7 +11127,9 @@ dependencies = [ "serde_json", "sha2", "sled-hardware-types", + "slog", "thiserror", + "tokio", "toml 0.8.14", "update-engine", ] @@ -11176,8 +11187,8 @@ dependencies = [ "hyper 0.14.28", "illumos-utils", "installinator", - "installinator-artifact-client", - "installinator-artifactd", + "installinator-api", + "installinator-client", "installinator-common", "internal-dns", "itertools 0.12.1", @@ -11217,9 +11228,27 @@ dependencies = [ "uuid", "wicket", "wicket-common", + "wicketd-api", "wicketd-client", ] +[[package]] +name = "wicketd-api" +version = "0.1.0" +dependencies = [ + "bootstrap-agent-client", + "dropshot", + "gateway-client", + "omicron-common", + "omicron-passwords", + "omicron-workspace-hack", + "schemars", + "serde", + "sled-hardware-types", + "slog", + "wicket-common", +] + [[package]] name = "wicketd-client" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index e5783b39eb..cc6c3c13e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,7 @@ members = [ "clients/dns-service-client", "clients/dpd-client", "clients/gateway-client", - "clients/installinator-artifact-client", + "clients/installinator-client", "clients/nexus-client", "clients/oxide-client", "clients/oximeter-client", @@ -26,12 +26,13 @@ members = [ "dev-tools/releng", "dev-tools/xtask", "dns-server", + "dns-server-api", "end-to-end-tests", "gateway-cli", "gateway-test-utils", "gateway", "illumos-utils", - "installinator-artifactd", + "installinator-api", "installinator-common", "installinator", "internal-dns-cli", @@ -86,8 +87,9 @@ members = [ "wicket-dbg", "wicket", "wicketd", + "wicketd-api", "workspace-hack", - "zone-setup", + "zone-setup", "wicketd-api", "dns-server-api", ] default-members = [ @@ -100,7 +102,7 @@ default-members = [ "clients/dns-service-client", "clients/dpd-client", "clients/gateway-client", - "clients/installinator-artifact-client", + "clients/installinator-client", "clients/nexus-client", "clients/oxide-client", "clients/oximeter-client", @@ -119,13 +121,14 @@ default-members = [ # hakari to not work as well and build times to be longer. # See omicron#4392. "dns-server", + "dns-server-api", # Do not include end-to-end-tests in the list of default members, as its # tests only work on a deployed control plane. "gateway-cli", "gateway-test-utils", "gateway", "illumos-utils", - "installinator-artifactd", + "installinator-api", "installinator-common", "installinator", "internal-dns-cli", @@ -180,6 +183,7 @@ default-members = [ "wicket-dbg", "wicket", "wicketd", + "wicketd-api", "workspace-hack", "zone-setup", ] @@ -279,6 +283,7 @@ derive-where = "1.2.7" diesel = { version = "2.1.6", features = ["postgres", "r2d2", "chrono", "serde_json", "network-address", "uuid"] } diesel-dtrace = { git = "https://github.com/oxidecomputer/diesel-dtrace", branch = "main" } dns-server = { path = "dns-server" } +dns-server-api = { path = "dns-server-api" } dns-service-client = { path = "clients/dns-service-client" } dpd-client = { path = "clients/dpd-client" } dropshot = { git = "https://github.com/oxidecomputer/dropshot", branch = "main", features = [ "usdt-probes" ] } @@ -318,8 +323,8 @@ indent_write = "2.2.0" indexmap = "2.2.6" indicatif = { version = "0.17.8", features = ["rayon"] } installinator = { path = "installinator" } -installinator-artifactd = { path = "installinator-artifactd" } -installinator-artifact-client = { path = "clients/installinator-artifact-client" } +installinator-api = { path = "installinator-api" } +installinator-client = { path = "clients/installinator-client" } installinator-common = { path = "installinator-common" } internal-dns = { path = "internal-dns" } ipcc = { path = "ipcc" } @@ -530,6 +535,7 @@ walkdir = "2.5" whoami = "1.5" wicket = { path = "wicket" } wicket-common = { path = "wicket-common" } +wicketd-api = { path = "wicketd-api" } wicketd-client = { path = "clients/wicketd-client" } zeroize = { version = "1.7.0", features = ["zeroize_derive", "std"] } zip = { version = "0.6.6", default-features = false, features = ["deflate","bzip2"] } diff --git a/clients/installinator-artifact-client/Cargo.toml b/clients/installinator-client/Cargo.toml similarity index 92% rename from clients/installinator-artifact-client/Cargo.toml rename to clients/installinator-client/Cargo.toml index f1e896864f..ca2de0476a 100644 --- a/clients/installinator-artifact-client/Cargo.toml +++ b/clients/installinator-client/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "installinator-artifact-client" +name = "installinator-client" version = "0.1.0" edition = "2021" license = "MPL-2.0" diff --git a/clients/installinator-artifact-client/src/lib.rs b/clients/installinator-client/src/lib.rs similarity index 91% rename from clients/installinator-artifact-client/src/lib.rs rename to clients/installinator-client/src/lib.rs index 96806c2cab..a39ff3ff80 100644 --- a/clients/installinator-artifact-client/src/lib.rs +++ b/clients/installinator-client/src/lib.rs @@ -2,10 +2,10 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Interface for making API requests to installinator-artifactd. +//! Interface for installinator to make API requests. progenitor::generate_api!( - spec = "../../openapi/installinator-artifactd.json", + spec = "../../openapi/installinator.json", inner_type = slog::Logger, pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { slog::debug!(log, "client request"; diff --git a/clients/wicketd-client/src/lib.rs b/clients/wicketd-client/src/lib.rs index 6198c6cf9e..bb377de31e 100644 --- a/clients/wicketd-client/src/lib.rs +++ b/clients/wicketd-client/src/lib.rs @@ -27,21 +27,11 @@ progenitor::generate_api!( RackNetworkConfigV2 = { derives = [PartialEq, Eq, PartialOrd, Ord] }, RackOperationStatus = { derives = [PartialEq, Eq, PartialOrd, Ord] }, RackResetId = { derives = [PartialEq, Eq, PartialOrd, Ord] }, - RackV1Inventory = { derives = [PartialEq, Eq, PartialOrd, Ord]}, RotImageDetails = { derives = [PartialEq, Eq, PartialOrd, Ord]}, - RotInventory = { derives = [PartialEq, Eq, PartialOrd, Ord]}, - RotSlot = { derives = [PartialEq, Eq, PartialOrd, Ord]}, - RotState = { derives = [PartialEq, Eq, PartialOrd, Ord]}, - SpComponentCaboose = { derives = [PartialEq, Eq, PartialOrd, Ord] }, - SpComponentInfo = { derives = [PartialEq, Eq, PartialOrd, Ord]}, - SpIgnition = { derives = [PartialEq, Eq, PartialOrd, Ord]}, - SpIgnitionSystemType= { derives = [PartialEq, Eq, PartialOrd, Ord]}, - SpInventory = { derives = [PartialEq, Eq, PartialOrd, Ord]}, - SpState = { derives = [PartialEq, Eq, PartialOrd, Ord] }, - StartUpdateOptions = { derives = [Default]}, UplinkConfig = { derives = [PartialEq, Eq, PartialOrd, Ord] }, }, replace = { + AbortUpdateOptions = wicket_common::rack_update::AbortUpdateOptions, AllowedSourceIps = omicron_common::api::internal::shared::AllowedSourceIps, Baseboard = sled_hardware_types::Baseboard, BgpAuthKey = wicket_common::rack_setup::BgpAuthKey, @@ -52,9 +42,11 @@ progenitor::generate_api!( BgpPeerAuthKind = wicket_common::rack_setup::BgpPeerAuthKind, BgpPeerConfig = omicron_common::api::internal::shared::BgpPeerConfig, BootstrapSledDescription = wicket_common::rack_setup::BootstrapSledDescription, + ClearUpdateStateOptions = wicket_common::rack_update::ClearUpdateStateOptions, ClearUpdateStateResponse = wicket_common::rack_update::ClearUpdateStateResponse, CurrentRssUserConfigInsensitive = wicket_common::rack_setup::CurrentRssUserConfigInsensitive, Duration = std::time::Duration, + EventReportForUplinkPreflightCheckSpec = wicket_common::preflight_check::EventReport, EventReportForWicketdEngineSpec = wicket_common::update_events::EventReport, GetBgpAuthKeyInfoResponse = wicket_common::rack_setup::GetBgpAuthKeyInfoResponse, ImportExportPolicy = omicron_common::api::internal::shared::ImportExportPolicy, @@ -67,15 +59,31 @@ progenitor::generate_api!( PortSpeed = omicron_common::api::internal::shared::PortSpeed, ProgressEventForGenericSpec = update_engine::events::ProgressEvent, ProgressEventForInstallinatorSpec = installinator_common::ProgressEvent, + ProgressEventForUplinkPreflightSpec = wicket_common::preflight_check::ProgressEvent, ProgressEventForWicketdEngineSpec = wicket_common::update_events::ProgressEvent, PutRssUserConfigInsensitive = wicket_common::rack_setup::PutRssUserConfigInsensitive, + RackV1Inventory = wicket_common::inventory::RackV1Inventory, + RotInventory = wicket_common::inventory::RotInventory, + RotSlot = wicket_common::inventory::RotSlot, + RotState = wicket_common::inventory::RotState, RouteConfig = omicron_common::api::internal::shared::RouteConfig, - SpIdentifier = wicket_common::rack_update::SpIdentifier, - SpType = wicket_common::rack_update::SpType, + SpComponentCaboose = wicket_common::inventory::SpComponentCaboose, + SpComponentInfo = wicket_common::inventory::SpComponentInfo, + SpIdentifier = wicket_common::inventory::SpIdentifier, + SpIgnition = wicket_common::inventory::SpIgnition, + SpIgnitionSystemType = wicket_common::inventory::SpIgnitionSystemType, + SpInventory = wicket_common::inventory::SpInventory, + SpState = wicket_common::inventory::SpState, + SpType = wicket_common::inventory::SpType, + StartUpdateOptions = wicket_common::rack_update::StartUpdateOptions, StepEventForGenericSpec = update_engine::events::StepEvent, + StepEventForUplinkPreflightSpec = wicket_common::preflight_check::StepEvent, StepEventForInstallinatorSpec = installinator_common::StepEvent, StepEventForWicketdEngineSpec = wicket_common::update_events::StepEvent, SwitchLocation = omicron_common::api::internal::shared::SwitchLocation, + UpdateSimulatedResult = wicket_common::rack_update::UpdateSimulatedResult, + UpdateTestError = wicket_common::rack_update::UpdateTestError, + UplinkPreflightStepId = wicket_common::preflight_check::UplinkPreflightStepId, UserSpecifiedBgpPeerConfig = wicket_common::rack_setup::UserSpecifiedBgpPeerConfig, UserSpecifiedImportExportPolicy = wicket_common::rack_setup::UserSpecifiedImportExportPolicy, UserSpecifiedPortConfig = wicket_common::rack_setup::UserSpecifiedPortConfig, diff --git a/dev-tools/openapi-manager/Cargo.toml b/dev-tools/openapi-manager/Cargo.toml index b50aeec69f..aa0cfacfd5 100644 --- a/dev-tools/openapi-manager/Cargo.toml +++ b/dev-tools/openapi-manager/Cargo.toml @@ -12,9 +12,11 @@ anyhow.workspace = true atomicwrites.workspace = true camino.workspace = true clap.workspace = true +dns-server-api.workspace = true dropshot.workspace = true fs-err.workspace = true indent_write.workspace = true +installinator-api.workspace = true nexus-internal-api.workspace = true omicron-workspace-hack.workspace = true openapiv3.workspace = true @@ -23,3 +25,4 @@ owo-colors.workspace = true serde_json.workspace = true similar.workspace = true supports-color.workspace = true +wicketd-api.workspace = true diff --git a/dev-tools/openapi-manager/src/spec.rs b/dev-tools/openapi-manager/src/spec.rs index 37330d6922..7099b82a4e 100644 --- a/dev-tools/openapi-manager/src/spec.rs +++ b/dev-tools/openapi-manager/src/spec.rs @@ -14,6 +14,28 @@ use openapiv3::OpenAPI; /// All APIs managed by openapi-manager. pub fn all_apis() -> Vec { vec![ + ApiSpec { + title: "Internal DNS".to_string(), + version: "0.0.1".to_string(), + description: "API for the internal DNS server".to_string(), + boundary: ApiBoundary::Internal, + api_description: + dns_server_api::dns_server_api::stub_api_description, + filename: "dns-server.json".to_string(), + extra_validation: None, + }, + ApiSpec { + title: "Installinator API".to_string(), + version: "0.0.1".to_string(), + description: "API for installinator to fetch artifacts \ + and report progress" + .to_string(), + boundary: ApiBoundary::Internal, + api_description: + installinator_api::installinator_api::stub_api_description, + filename: "installinator.json".to_string(), + extra_validation: None, + }, ApiSpec { title: "Nexus internal API".to_string(), version: "0.0.1".to_string(), @@ -24,6 +46,16 @@ pub fn all_apis() -> Vec { filename: "nexus-internal.json".to_string(), extra_validation: None, }, + ApiSpec { + title: "Oxide Technician Port Control Service".to_string(), + version: "0.0.1".to_string(), + description: "API for use by the technician port TUI: wicket" + .to_string(), + boundary: ApiBoundary::Internal, + api_description: wicketd_api::wicketd_api_mod::stub_api_description, + filename: "wicketd.json".to_string(), + extra_validation: None, + }, // Add your APIs here! Please keep this list sorted by filename. ] } diff --git a/dns-server-api/Cargo.toml b/dns-server-api/Cargo.toml new file mode 100644 index 0000000000..c87af14e0d --- /dev/null +++ b/dns-server-api/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "dns-server-api" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[lints] +workspace = true + +[dependencies] +chrono.workspace = true +dropshot.workspace = true +omicron-workspace-hack.workspace = true +schemars.workspace = true +serde.workspace = true diff --git a/dns-server-api/src/lib.rs b/dns-server-api/src/lib.rs new file mode 100644 index 0000000000..2c59caf0c5 --- /dev/null +++ b/dns-server-api/src/lib.rs @@ -0,0 +1,160 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Dropshot API for configuring DNS namespace. +//! +//! ## Shape of the API +//! +//! The DNS configuration API has just two endpoints: PUT and GET of the entire +//! DNS configuration. This is pretty anti-REST. But it's important to think +//! about how this server fits into the rest of the system. When changes are +//! made to DNS data, they're grouped together and assigned a monotonically +//! increasing generation number. The DNS data is first stored into CockroachDB +//! and then propagated from a distributed fleet of Nexus instances to a +//! distributed fleet of these DNS servers. If we accepted individual updates to +//! DNS names, then propagating a particular change would be non-atomic, and +//! Nexus would have to do a lot more work to ensure (1) that all changes were +//! propagated (even if it crashes) and (2) that they were propagated in the +//! correct order (even if two Nexus instances concurrently propagate separate +//! changes). +//! +//! This DNS server supports hosting multiple zones. We could imagine supporting +//! separate endpoints to update the DNS data for a particular zone. That feels +//! nicer (although it's not clear what it would buy us). But as with updates to +//! multiple names, Nexus's job is potentially much easier if the entire state +//! for all zones is updated at once. (Otherwise, imagine how Nexus would +//! implement _renaming_ one zone to another without loss of service. With +//! a combined endpoint and generation number for all zones, all that's necessary +//! is to configure a new zone with all the same names, and then remove the old +//! zone later in another update. That can be managed by the same mechanism in +//! Nexus that manages regular name updates. On the other hand, if there were +//! separate endpoints with separate generation numbers, then Nexus has more to +//! keep track of in order to do the rename safely.) +//! +//! See RFD 367 for more on DNS propagation. +//! +//! ## ETags and Conditional Requests +//! +//! It's idiomatic in HTTP use ETags and conditional requests to provide +//! synchronization. We could define an ETag to be just the current generation +//! number of the server and honor standard `if-match` headers to fail requests +//! where the generation number doesn't match what the client expects. This +//! would be fine, but it's rather annoying: +//! +//! 1. When the client wants to propagate generation X, the client would have +//! make an extra request just to fetch the current ETag, just so it can put +//! it into the conditional request. +//! +//! 2. If some other client changes the configuration in the meantime, the +//! conditional request would fail and the client would have to take another +//! lap (fetching the current config and potentially making another +//! conditional PUT). +//! +//! 3. This approach would make synchronization opt-in. If a client (or just +//! one errant code path) neglected to set the if-match header, we could do +//! the wrong thing and cause the system to come to rest with the wrong DNS +//! data. +//! +//! Since the semantics here are so simple (we only ever want to move the +//! generation number forward), we don't bother with ETags or conditional +//! requests. Instead we have the server implement the behavior we want, which +//! is that when a request comes in to update DNS data to generation X, the +//! server replies with one of: +//! +//! (1) the update has been applied and the server is now running generation X +//! (client treats this as success) +//! +//! (2) the update was not applied because the server is already at generation X +//! (client treats this as success) +//! +//! (3) the update was not applied because the server is already at a newer +//! generation +//! (client probably starts the whole propagation process over because its +//! current view of the world is out of date) +//! +//! This way, the DNS data can never move backwards and the client only ever has +//! to make one request. +//! +//! ## Concurrent updates +//! +//! Given that we've got just one API to update the all DNS zones, and given +//! that might therefore take a minute for a large zone, and also that there may +//! be multiple Nexus instances trying to do it at the same time, we need to +//! think a bit about what should happen if two Nexus do try to do it at the same +//! time. Spoiler: we immediately fail any request to update the DNS data if +//! there's already an update in progress. +//! +//! What else could we do? We could queue the incoming request behind the +//! in-progress one. How large do we allow that queue to grow? At some point +//! we'll need to stop queueing them. So why bother at all? + +use std::{ + collections::HashMap, + net::{Ipv4Addr, Ipv6Addr}, +}; + +use dropshot::{HttpError, HttpResponseOk, RequestContext}; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[dropshot::api_description] +pub trait DnsServerApi { + type Context; + + #[endpoint( + method = GET, + path = "/config", + )] + async fn dns_config_get( + rqctx: RequestContext, + ) -> Result, HttpError>; + + #[endpoint( + method = PUT, + path = "/config", + )] + async fn dns_config_put( + rqctx: RequestContext, + rq: dropshot::TypedBody, + ) -> Result; +} + +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] +pub struct DnsConfigParams { + pub generation: u64, + pub time_created: chrono::DateTime, + pub zones: Vec, +} + +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] +pub struct DnsConfig { + pub generation: u64, + pub time_created: chrono::DateTime, + pub time_applied: chrono::DateTime, + pub zones: Vec, +} + +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] +pub struct DnsConfigZone { + pub zone_name: String, + pub records: HashMap>, +} + +#[allow(clippy::upper_case_acronyms)] +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] +#[serde(tag = "type", content = "data")] +pub enum DnsRecord { + A(Ipv4Addr), + AAAA(Ipv6Addr), + SRV(SRV), +} + +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] +#[serde(rename = "Srv")] +pub struct SRV { + pub prio: u16, + pub weight: u16, + pub port: u16, + pub target: String, +} diff --git a/dns-server/Cargo.toml b/dns-server/Cargo.toml index 237d2a2fbb..d11dabaf85 100644 --- a/dns-server/Cargo.toml +++ b/dns-server/Cargo.toml @@ -12,6 +12,7 @@ anyhow.workspace = true camino.workspace = true chrono.workspace = true clap.workspace = true +dns-server-api.workspace = true dns-service-client.workspace = true dropshot.workspace = true http.workspace = true diff --git a/dns-server/src/bin/apigen.rs b/dns-server/src/bin/apigen.rs deleted file mode 100644 index e130ee0211..0000000000 --- a/dns-server/src/bin/apigen.rs +++ /dev/null @@ -1,29 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! Generate the OpenAPI spec for the DNS server - -use anyhow::{bail, Result}; -use dns_server::http_server::api; -use std::fs::File; -use std::io; - -fn usage(args: &[String]) -> String { - format!("{} [output path]", args[0]) -} - -fn main() -> Result<()> { - let args: Vec = std::env::args().collect(); - - let mut out = match args.len() { - 1 => Box::new(io::stdout()) as Box, - 2 => Box::new(File::create(args[1].clone())?) as Box, - _ => bail!(usage(&args)), - }; - - let api = api(); - let openapi = api.openapi("Internal DNS", "v0.1.0"); - openapi.write(&mut out)?; - Ok(()) -} diff --git a/dns-server/src/dns_server.rs b/dns-server/src/dns_server.rs index 01a8430b62..5c761f2aa3 100644 --- a/dns-server/src/dns_server.rs +++ b/dns-server/src/dns_server.rs @@ -7,12 +7,12 @@ //! The facilities here handle binding a UDP socket, receiving DNS messages on //! that socket, and replying to them. -use crate::dns_types::DnsRecord; use crate::storage; use crate::storage::QueryError; use crate::storage::Store; use anyhow::anyhow; use anyhow::Context; +use dns_server_api::DnsRecord; use pretty_hex::*; use serde::Deserialize; use slog::{debug, error, info, o, trace, Logger}; @@ -234,12 +234,7 @@ fn dns_record_to_record( Ok(aaaa) } - DnsRecord::SRV(crate::dns_types::SRV { - prio, - weight, - port, - target, - }) => { + DnsRecord::SRV(dns_server_api::SRV { prio, weight, port, target }) => { let tgt = Name::from_str(&target).map_err(|error| { RequestError::ServFail(anyhow!( "serialization failed due to bad SRV target {:?}: {:#}", diff --git a/dns-server/src/dns_types.rs b/dns-server/src/dns_types.rs deleted file mode 100644 index 941124feb6..0000000000 --- a/dns-server/src/dns_types.rs +++ /dev/null @@ -1,50 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! types describing DNS records and configuration - -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use std::net::Ipv4Addr; -use std::net::Ipv6Addr; - -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] -pub struct DnsConfigParams { - pub generation: u64, - pub time_created: chrono::DateTime, - pub zones: Vec, -} - -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] -pub struct DnsConfig { - pub generation: u64, - pub time_created: chrono::DateTime, - pub time_applied: chrono::DateTime, - pub zones: Vec, -} - -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] -pub struct DnsConfigZone { - pub zone_name: String, - pub records: HashMap>, -} - -#[allow(clippy::upper_case_acronyms)] -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] -#[serde(tag = "type", content = "data")] -pub enum DnsRecord { - A(Ipv4Addr), - AAAA(Ipv6Addr), - SRV(SRV), -} - -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] -#[serde(rename = "Srv")] -pub struct SRV { - pub prio: u16, - pub weight: u16, - pub port: u16, - pub target: String, -} diff --git a/dns-server/src/http_server.rs b/dns-server/src/http_server.rs index e50346d828..84ffbc90e9 100644 --- a/dns-server/src/http_server.rs +++ b/dns-server/src/http_server.rs @@ -4,102 +4,12 @@ //! Dropshot server for configuring DNS namespace -// Shape of the API -// ------------------------------ -// -// The DNS configuration API has just two endpoints: PUT and GET of the entire -// DNS configuration. This is pretty anti-REST. But it's important to think -// about how this server fits into the rest of the system. When changes are -// made to DNS data, they're grouped together and assigned a monotonically -// increasing generation number. The DNS data is first stored into CockroachDB -// and then propagated from a distributed fleet of Nexus instances to a -// distributed fleet of these DNS servers. If we accepted individual updates to -// DNS names, then propagating a particular change would be non-atomic, and -// Nexus would have to do a lot more work to ensure (1) that all changes were -// propagated (even if it crashes) and (2) that they were propagated in the -// correct order (even if two Nexus instances concurrently propagate separate -// changes). -// -// This DNS server supports hosting multiple zones. We could imagine supporting -// separate endpoints to update the DNS data for a particular zone. That feels -// nicer (although it's not clear what it would buy us). But as with updates to -// multiple names, Nexus's job is potentially much easier if the entire state -// for all zones is updated at once. (Otherwise, imagine how Nexus would -// implement _renaming_ one zone to another without loss of service. With -// a combined endpoint and generation number for all zones, all that's necessary -// is to configure a new zone with all the same names, and then remove the old -// zone later in another update. That can be managed by the same mechanism in -// Nexus that manages regular name updates. On the other hand, if there were -// separate endpoints with separate generation numbers, then Nexus has more to -// keep track of in order to do the rename safely.) -// -// See RFD 367 for more on DNS propagation. -// -// -// ETags and Conditional Requests -// ------------------------------ -// -// It's idiomatic in HTTP use ETags and conditional requests to provide -// synchronization. We could define an ETag to be just the current generation -// number of the server and honor standard `if-match` headers to fail requests -// where the generation number doesn't match what the client expects. This -// would be fine, but it's rather annoying: -// -// (1) When the client wants to propagate generation X, the client would have -// make an extra request just to fetch the current ETag, just so it can put -// it into the conditional request. -// -// (2) If some other client changes the configuration in the meantime, the -// conditional request would fail and the client would have to take another -// lap (fetching the current config and potentially making another -// conditional PUT). -// -// (3) This approach would make synchronization opt-in. If a client (or just -// one errant code path) neglected to set the if-match header, we could do -// the wrong thing and cause the system to come to rest with the wrong DNS -// data. -// -// Since the semantics here are so simple (we only ever want to move the -// generation number forward), we don't bother with ETags or conditional -// requests. Instead we have the server implement the behavior we want, which -// is that when a request comes in to update DNS data to generation X, the -// server replies with one of: -// -// (1) the update has been applied and the server is now running generation X -// (client treats this as success) -// -// (2) the update was not applied because the server is already at generation X -// (client treats this as success) -// -// (3) the update was not applied because the server is already at a newer -// generation -// (client probably starts the whole propagation process over because its -// current view of the world is out of date) -// -// This way, the DNS data can never move backwards and the client only ever has -// to make one request. -// -// -// Concurrent updates -// ------------------ -// -// Given that we've got just one API to update the all DNS zones, and given -// that might therefore take a minute for a large zone, and also that there may -// be multiple Nexus instances trying to do it at the same time, we need to -// think a bit about what should happen if two Nexus do try to do it at the same -// time. Spoiler: we immediately fail any request to update the DNS data if -// there's already an update in progress. -// -// What else could we do? We could queue the incoming request behind the -// in-progress one. How large do we allow that queue to grow? At some point -// we'll need to stop queueing them. So why bother at all? - -use crate::dns_types::{DnsConfig, DnsConfigParams}; use crate::storage::{self, UpdateError}; +use dns_server_api::{DnsConfig, DnsConfigParams, DnsServerApi}; use dns_service_client::{ ERROR_CODE_BAD_UPDATE_GENERATION, ERROR_CODE_UPDATE_IN_PROGRESS, }; -use dropshot::{endpoint, RequestContext}; +use dropshot::RequestContext; pub struct Context { store: storage::Store, @@ -112,41 +22,40 @@ impl Context { } pub fn api() -> dropshot::ApiDescription { - let mut api = dropshot::ApiDescription::new(); - - api.register(dns_config_get).expect("register dns_config_get"); - api.register(dns_config_put).expect("register dns_config_update"); - api + dns_server_api::dns_server_api::api_description::() + .expect("registered DNS server entrypoints") } -#[endpoint( - method = GET, - path = "/config", -)] -async fn dns_config_get( - rqctx: RequestContext, -) -> Result, dropshot::HttpError> { - let apictx = rqctx.context(); - let config = apictx.store.dns_config().await.map_err(|e| { - dropshot::HttpError::for_internal_error(format!( - "internal error: {:?}", - e - )) - })?; - Ok(dropshot::HttpResponseOk(config)) -} +enum DnsServerApiImpl {} + +impl DnsServerApi for DnsServerApiImpl { + type Context = Context; -#[endpoint( - method = PUT, - path = "/config", -)] -async fn dns_config_put( - rqctx: RequestContext, - rq: dropshot::TypedBody, -) -> Result { - let apictx = rqctx.context(); - apictx.store.dns_config_update(&rq.into_inner(), &rqctx.request_id).await?; - Ok(dropshot::HttpResponseUpdatedNoContent()) + async fn dns_config_get( + rqctx: RequestContext, + ) -> Result, dropshot::HttpError> { + let apictx = rqctx.context(); + let config = apictx.store.dns_config().await.map_err(|e| { + dropshot::HttpError::for_internal_error(format!( + "internal error: {:?}", + e + )) + })?; + Ok(dropshot::HttpResponseOk(config)) + } + + async fn dns_config_put( + rqctx: RequestContext, + rq: dropshot::TypedBody, + ) -> Result + { + let apictx = rqctx.context(); + apictx + .store + .dns_config_update(&rq.into_inner(), &rqctx.request_id) + .await?; + Ok(dropshot::HttpResponseUpdatedNoContent()) + } } impl From for dropshot::HttpError { diff --git a/dns-server/src/lib.rs b/dns-server/src/lib.rs index ea8625a667..a2b1fda0d7 100644 --- a/dns-server/src/lib.rs +++ b/dns-server/src/lib.rs @@ -43,7 +43,6 @@ //! the persistent DNS data pub mod dns_server; -pub mod dns_types; pub mod http_server; pub mod storage; diff --git a/dns-server/src/storage.rs b/dns-server/src/storage.rs index 21fb9ebdc6..85b2e79b8b 100644 --- a/dns-server/src/storage.rs +++ b/dns-server/src/storage.rs @@ -92,9 +92,9 @@ // backwards-compatible way (but obviously one wouldn't get the scaling benefits // while continuing to use the old API). -use crate::dns_types::{DnsConfig, DnsConfigParams, DnsConfigZone, DnsRecord}; use anyhow::{anyhow, Context}; use camino::Utf8PathBuf; +use dns_server_api::{DnsConfig, DnsConfigParams, DnsConfigZone, DnsRecord}; use serde::{Deserialize, Serialize}; use sled::transaction::ConflictableTransactionError; use slog::{debug, error, info, o, warn}; @@ -777,13 +777,13 @@ impl<'a, 'b> Drop for UpdateGuard<'a, 'b> { #[cfg(test)] mod test { use super::{Config, Store, UpdateError}; - use crate::dns_types::DnsConfigParams; - use crate::dns_types::DnsConfigZone; - use crate::dns_types::DnsRecord; use crate::storage::QueryError; use anyhow::Context; use camino::Utf8PathBuf; use camino_tempfile::Utf8TempDir; + use dns_server_api::DnsConfigParams; + use dns_server_api::DnsConfigZone; + use dns_server_api::DnsRecord; use omicron_test_utils::dev::test_setup_log; use std::collections::BTreeSet; use std::collections::HashMap; diff --git a/dns-server/tests/openapi_test.rs b/dns-server/tests/openapi_test.rs deleted file mode 100644 index 490680eda4..0000000000 --- a/dns-server/tests/openapi_test.rs +++ /dev/null @@ -1,27 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -use expectorate::assert_contents; -use omicron_test_utils::dev::test_cmds::assert_exit_code; -use omicron_test_utils::dev::test_cmds::path_to_executable; -use omicron_test_utils::dev::test_cmds::run_command; -use omicron_test_utils::dev::test_cmds::EXIT_SUCCESS; -use openapiv3::OpenAPI; -use subprocess::Exec; - -const CMD_API_GEN: &str = env!("CARGO_BIN_EXE_apigen"); - -#[test] -fn test_dns_server_openapi() { - let exec = Exec::cmd(path_to_executable(CMD_API_GEN)); - let (exit_status, stdout, stderr) = run_command(exec); - assert_exit_code(exit_status, EXIT_SUCCESS, &stderr); - - let spec: OpenAPI = - serde_json::from_str(&stdout).expect("stdout was not valid OpenAPI"); - let errors = openapi_lint::validate(&spec); - assert!(errors.is_empty(), "{}", errors.join("\n\n")); - - assert_contents("../openapi/dns-server.json", &stdout); -} diff --git a/installinator-artifactd/Cargo.toml b/installinator-api/Cargo.toml similarity index 55% rename from installinator-artifactd/Cargo.toml rename to installinator-api/Cargo.toml index 236ea7a51c..52db4362c6 100644 --- a/installinator-artifactd/Cargo.toml +++ b/installinator-api/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "installinator-artifactd" +name = "installinator-api" version = "0.1.0" edition = "2021" license = "MPL-2.0" @@ -9,24 +9,12 @@ workspace = true [dependencies] anyhow.workspace = true -async-trait.workspace = true -clap.workspace = true dropshot.workspace = true hyper.workspace = true +installinator-common.workspace = true +omicron-common.workspace = true +omicron-workspace-hack.workspace = true schemars.workspace = true serde.workspace = true -serde_json.workspace = true slog.workspace = true uuid.workspace = true - -installinator-common.workspace = true -omicron-common.workspace = true -omicron-workspace-hack.workspace = true - -[dev-dependencies] -expectorate.workspace = true -omicron-test-utils.workspace = true -openapiv3.workspace = true -openapi-lint.workspace = true -serde_json.workspace = true -subprocess.workspace = true diff --git a/installinator-api/src/lib.rs b/installinator-api/src/lib.rs new file mode 100644 index 0000000000..cd87643a66 --- /dev/null +++ b/installinator-api/src/lib.rs @@ -0,0 +1,167 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! The REST API that installinator is a client of. +//! +//! Note that most of our APIs are named by their server. This one is instead +//! named by the client, since it is expected that multiple services will +//! implement it. + +use anyhow::{anyhow, Result}; +use dropshot::{ + ConfigDropshot, FreeformBody, HandlerTaskMode, HttpError, + HttpResponseHeaders, HttpResponseOk, HttpResponseUpdatedNoContent, + HttpServerStarter, Path, RequestContext, TypedBody, +}; +use hyper::{header, Body, StatusCode}; +use installinator_common::EventReport; +use omicron_common::update::ArtifactHashId; +use schemars::JsonSchema; +use serde::Deserialize; +use uuid::Uuid; + +#[derive(Debug, Deserialize, JsonSchema)] +pub struct ReportQuery { + /// A unique identifier for the update. + pub update_id: Uuid, +} + +#[dropshot::api_description] +pub trait InstallinatorApi { + type Context; + + /// Fetch an artifact by hash. + #[endpoint { + method = GET, + path = "/artifacts/by-hash/{kind}/{hash}", + }] + async fn get_artifact_by_hash( + rqctx: RequestContext, + path: Path, + ) -> Result>, HttpError>; + + /// Report progress and completion to the server. + /// + /// This method requires an `update_id` path parameter. This update ID is + /// matched against the server currently performing an update. If the + /// server is unaware of the update ID, it will return an HTTP 422 + /// Unprocessable Entity code. + #[endpoint { + method = POST, + path = "/report-progress/{update_id}", + }] + async fn report_progress( + rqctx: RequestContext, + path: Path, + report: TypedBody, + ) -> Result; +} + +/// Add a content length header to a response. +/// +/// Intended to be called by `get_artifact_by_hash` implementations. +pub fn body_to_artifact_response( + size: u64, + body: Body, +) -> HttpResponseHeaders> { + let mut response = + HttpResponseHeaders::new_unnamed(HttpResponseOk(body.into())); + let headers = response.headers_mut(); + headers.append(header::CONTENT_LENGTH, size.into()); + response +} + +/// The result of processing an installinator event report. +#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)] +#[must_use] +pub enum EventReportStatus { + /// This report was processed by the server. + Processed, + + /// The update ID was not recognized by the server. + UnrecognizedUpdateId, + + /// The progress receiver is closed. + ReceiverClosed, +} + +impl EventReportStatus { + /// Convert this status to an HTTP result. + /// + /// Intended to be called by `report_progress` implementations. + pub fn to_http_result( + self, + update_id: Uuid, + ) -> Result { + match self { + EventReportStatus::Processed => Ok(HttpResponseUpdatedNoContent()), + EventReportStatus::UnrecognizedUpdateId => { + Err(HttpError::for_client_error( + None, + StatusCode::UNPROCESSABLE_ENTITY, + format!( + "update ID {update_id} unrecognized by this server" + ), + )) + } + EventReportStatus::ReceiverClosed => { + Err(HttpError::for_client_error( + None, + StatusCode::GONE, + format!("update ID {update_id}: receiver closed"), + )) + } + } + } +} + +/// Creates a default `ConfigDropshot` for the installinator API. +pub fn default_config(bind_address: std::net::SocketAddr) -> ConfigDropshot { + ConfigDropshot { + bind_address, + // Even though the installinator sets an upper bound on the number of + // items in a progress report, they can get pretty large if they + // haven't gone through for a bit. Ensure that hitting the max request + // size won't cause a failure by setting a generous upper bound for the + // request size. + // + // TODO: replace with an endpoint-specific option once + // https://github.com/oxidecomputer/dropshot/pull/618 lands and is + // available in omicron. + request_body_max_bytes: 4 * 1024 * 1024, + default_handler_task_mode: HandlerTaskMode::Detached, + } +} + +/// Make an `HttpServerStarter` for the installinator API with default settings. +pub fn make_server_starter( + context: T::Context, + bind_address: std::net::SocketAddr, + log: &slog::Logger, +) -> Result> { + let dropshot_config = dropshot::ConfigDropshot { + bind_address, + // Even though the installinator sets an upper bound on the number + // of items in a progress report, they can get pretty large if they + // haven't gone through for a bit. Ensure that hitting the max + // request size won't cause a failure by setting a generous upper + // bound for the request size. + // + // TODO: replace with an endpoint-specific option once + // https://github.com/oxidecomputer/dropshot/pull/618 lands and is + // available in omicron. + request_body_max_bytes: 4 * 1024 * 1024, + default_handler_task_mode: HandlerTaskMode::Detached, + }; + + let api = crate::installinator_api::api_description::()?; + let server = + dropshot::HttpServerStarter::new(&dropshot_config, api, context, &log) + .map_err(|error| { + anyhow!(error) + .context("failed to create installinator artifact server") + })?; + + Ok(server) +} diff --git a/installinator-artifactd/src/bin/installinator-artifactd.rs b/installinator-artifactd/src/bin/installinator-artifactd.rs deleted file mode 100644 index abe63bbe31..0000000000 --- a/installinator-artifactd/src/bin/installinator-artifactd.rs +++ /dev/null @@ -1,38 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! Executable that generates OpenAPI definitions for the installinator artifact server. - -use anyhow::Result; -use clap::Parser; -use omicron_common::cmd::CmdError; - -#[derive(Debug, Parser)] -#[clap(name = "installinator-artifactd")] -enum Args { - /// Print the external OpenAPI Spec document and exit - Openapi, - // NOTE: this server is not intended to be run as a standalone service. Instead, it should be - // embedded as part of other servers (e.g. wicketd). -} - -fn main() { - if let Err(cmd_error) = do_run() { - omicron_common::cmd::fatal(cmd_error); - } -} - -fn do_run() -> Result<(), CmdError> { - let args = Args::parse(); - - match args { - Args::Openapi => { - installinator_artifactd::run_openapi().map_err(|error| { - CmdError::Failure( - error.context("failed to generate OpenAPI spec"), - ) - }) - } - } -} diff --git a/installinator-artifactd/src/context.rs b/installinator-artifactd/src/context.rs deleted file mode 100644 index beea2593aa..0000000000 --- a/installinator-artifactd/src/context.rs +++ /dev/null @@ -1,13 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -// Copyright 2023 Oxide Computer Company - -//! User provided dropshot server context - -use crate::store::ArtifactStore; - -pub struct ServerContext { - pub(crate) artifact_store: ArtifactStore, -} diff --git a/installinator-artifactd/src/http_entrypoints.rs b/installinator-artifactd/src/http_entrypoints.rs deleted file mode 100644 index 13163e007b..0000000000 --- a/installinator-artifactd/src/http_entrypoints.rs +++ /dev/null @@ -1,115 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -// Copyright 2022 Oxide Computer Company - -use dropshot::{ - endpoint, ApiDescription, ApiDescriptionRegisterError, FreeformBody, - HttpError, HttpResponseHeaders, HttpResponseOk, - HttpResponseUpdatedNoContent, Path, RequestContext, TypedBody, -}; -use hyper::{header, Body, StatusCode}; -use installinator_common::EventReport; -use omicron_common::update::ArtifactHashId; -use schemars::JsonSchema; -use serde::Deserialize; -use uuid::Uuid; - -use crate::{context::ServerContext, EventReportStatus}; - -type ArtifactServerApiDesc = ApiDescription; - -/// Return a description of the artifact server api for use in generating an OpenAPI spec -pub fn api() -> ArtifactServerApiDesc { - fn register_endpoints( - api: &mut ArtifactServerApiDesc, - ) -> Result<(), ApiDescriptionRegisterError> { - api.register(get_artifact_by_hash)?; - api.register(report_progress)?; - Ok(()) - } - - let mut api = ArtifactServerApiDesc::new(); - if let Err(err) = register_endpoints(&mut api) { - panic!("failed to register entrypoints: {}", err); - } - api -} - -/// Fetch an artifact by hash. -#[endpoint { - method = GET, - path = "/artifacts/by-hash/{kind}/{hash}", -}] -async fn get_artifact_by_hash( - rqctx: RequestContext, - path: Path, -) -> Result>, HttpError> { - match rqctx - .context() - .artifact_store - .get_artifact_by_hash(&path.into_inner()) - .await - { - Some((size, body)) => Ok(body_to_artifact_response(size, body)), - None => { - Err(HttpError::for_not_found(None, "Artifact not found".into())) - } - } -} - -#[derive(Debug, Deserialize, JsonSchema)] -pub(crate) struct ReportQuery { - /// A unique identifier for the update. - pub(crate) update_id: Uuid, -} - -/// Report progress and completion to the server. -/// -/// This method requires an `update_id` path parameter. This update ID is -/// matched against the server currently performing an update. If the server -/// is unaware of the update ID, it will return an HTTP 422 Unprocessable Entity -/// code. -#[endpoint { - method = POST, - path = "/report-progress/{update_id}", -}] -async fn report_progress( - rqctx: RequestContext, - path: Path, - report: TypedBody, -) -> Result { - let update_id = path.into_inner().update_id; - match rqctx - .context() - .artifact_store - .report_progress(update_id, report.into_inner()) - .await? - { - EventReportStatus::Processed => Ok(HttpResponseUpdatedNoContent()), - EventReportStatus::UnrecognizedUpdateId => { - Err(HttpError::for_client_error( - None, - StatusCode::UNPROCESSABLE_ENTITY, - format!("update ID {update_id} unrecognized by this server"), - )) - } - EventReportStatus::ReceiverClosed => Err(HttpError::for_client_error( - None, - StatusCode::GONE, - format!("update ID {update_id}: receiver closed"), - )), - } -} - -fn body_to_artifact_response( - size: u64, - body: Body, -) -> HttpResponseHeaders> { - let mut response = - HttpResponseHeaders::new_unnamed(HttpResponseOk(body.into())); - let headers = response.headers_mut(); - headers.append(header::CONTENT_LENGTH, size.into()); - response -} diff --git a/installinator-artifactd/src/lib.rs b/installinator-artifactd/src/lib.rs deleted file mode 100644 index c54ed78a97..0000000000 --- a/installinator-artifactd/src/lib.rs +++ /dev/null @@ -1,29 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -// Copyright 2023 Oxide Computer Company - -mod context; -mod http_entrypoints; -mod server; -mod store; - -pub use context::ServerContext; -pub use server::ArtifactServer; -pub use store::{ArtifactGetter, EventReportStatus}; - -use anyhow::Result; - -/// Run the OpenAPI generator for the API; which emits the OpenAPI spec -/// to stdout. -pub fn run_openapi() -> Result<()> { - http_entrypoints::api() - .openapi("Oxide Installinator Artifact Server", "0.0.1") - .description("API for use by the installinator to retrieve artifacts") - .contact_url("https://oxide.computer") - .contact_email("api@oxide.computer") - .write(&mut std::io::stdout())?; - - Ok(()) -} diff --git a/installinator-artifactd/src/server.rs b/installinator-artifactd/src/server.rs deleted file mode 100644 index 88b622b756..0000000000 --- a/installinator-artifactd/src/server.rs +++ /dev/null @@ -1,74 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -// Copyright 2023 Oxide Computer Company - -//! The installinator artifact server. - -use std::net::SocketAddrV6; - -use anyhow::{anyhow, Result}; -use dropshot::{HandlerTaskMode, HttpServer}; - -use crate::{ - context::ServerContext, - store::{ArtifactGetter, ArtifactStore}, -}; - -/// The installinator artifact server. -#[derive(Debug)] -pub struct ArtifactServer { - address: SocketAddrV6, - log: slog::Logger, - store: ArtifactStore, -} - -impl ArtifactServer { - /// Creates a new artifact server with the given address. - pub fn new( - getter: Getter, - address: SocketAddrV6, - log: &slog::Logger, - ) -> Self { - let log = log.new(slog::o!("component" => "installinator artifactd")); - let store = ArtifactStore::new(getter, &log); - Self { address, log, store } - } - - /// Starts the artifact server. - /// - /// This returns an `HttpServer`, which can be awaited to completion. - pub fn start(self) -> Result> { - let context = ServerContext { artifact_store: self.store }; - - let dropshot_config = dropshot::ConfigDropshot { - bind_address: std::net::SocketAddr::V6(self.address), - // Even though the installinator sets an upper bound on the number - // of items in a progress report, they can get pretty large if they - // haven't gone through for a bit. Ensure that hitting the max - // request size won't cause a failure by setting a generous upper - // bound for the request size. - // - // TODO: replace with an endpoint-specific option once - // https://github.com/oxidecomputer/dropshot/pull/618 lands and is - // available in omicron. - request_body_max_bytes: 4 * 1024 * 1024, - default_handler_task_mode: HandlerTaskMode::Detached, - }; - - let api = crate::http_entrypoints::api(); - let server = dropshot::HttpServerStarter::new( - &dropshot_config, - api, - context, - &self.log, - ) - .map_err(|error| { - anyhow!(error) - .context("failed to create installinator artifact server") - })?; - - Ok(server.start()) - } -} diff --git a/installinator-artifactd/src/store.rs b/installinator-artifactd/src/store.rs deleted file mode 100644 index 12e2880893..0000000000 --- a/installinator-artifactd/src/store.rs +++ /dev/null @@ -1,79 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -// Copyright 2023 Oxide Computer Company - -use std::fmt; - -use async_trait::async_trait; -use dropshot::HttpError; -use hyper::Body; -use installinator_common::EventReport; -use omicron_common::update::ArtifactHashId; -use slog::Logger; -use uuid::Uuid; - -/// Represents a way to fetch artifacts. -#[async_trait] -pub trait ArtifactGetter: fmt::Debug + Send + Sync + 'static { - /// Gets an artifact by hash, returning it as a [`Body`]. - async fn get_by_hash(&self, id: &ArtifactHashId) -> Option<(u64, Body)>; - - /// Reports update progress events from the installinator. - async fn report_progress( - &self, - update_id: Uuid, - report: EventReport, - ) -> Result; -} - -/// The status returned by [`ArtifactGetter::report_progress`]. -#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)] -#[must_use] -pub enum EventReportStatus { - /// This report was processed by the server. - Processed, - - /// The update ID was not recognized by the server. - UnrecognizedUpdateId, - - /// The progress receiver is closed. - ReceiverClosed, -} - -/// The artifact store -- a simple wrapper around a dynamic [`ArtifactGetter`] that does some basic -/// logging. -#[derive(Debug)] -pub(crate) struct ArtifactStore { - log: Logger, - getter: Box, - // TODO: implement this -} - -impl ArtifactStore { - pub(crate) fn new( - getter: Getter, - log: &Logger, - ) -> Self { - let log = log.new(slog::o!("component" => "artifact store")); - Self { log, getter: Box::new(getter) } - } - - pub(crate) async fn get_artifact_by_hash( - &self, - id: &ArtifactHashId, - ) -> Option<(u64, Body)> { - slog::debug!(self.log, "Artifact requested by hash: {:?}", id); - self.getter.get_by_hash(id).await - } - - pub(crate) async fn report_progress( - &self, - update_id: Uuid, - report: EventReport, - ) -> Result { - slog::debug!(self.log, "Report for {update_id}: {report:?}"); - self.getter.report_progress(update_id, report).await - } -} diff --git a/installinator-artifactd/tests/integration_tests/mod.rs b/installinator-artifactd/tests/integration_tests/mod.rs deleted file mode 100644 index ebb67c3880..0000000000 --- a/installinator-artifactd/tests/integration_tests/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -mod openapi; diff --git a/installinator-artifactd/tests/integration_tests/openapi.rs b/installinator-artifactd/tests/integration_tests/openapi.rs deleted file mode 100644 index 09441731d0..0000000000 --- a/installinator-artifactd/tests/integration_tests/openapi.rs +++ /dev/null @@ -1,39 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -use std::path::PathBuf; - -use expectorate::assert_contents; -use omicron_test_utils::dev::test_cmds::{ - assert_exit_code, path_to_executable, run_command, EXIT_SUCCESS, -}; -use openapiv3::OpenAPI; -use subprocess::Exec; - -// name of executable -const CMD_SERVER: &str = env!("CARGO_BIN_EXE_installinator-artifactd"); - -fn path_to_server() -> PathBuf { - path_to_executable(CMD_SERVER) -} - -#[test] -fn test_server_openapi() { - let exec = Exec::cmd(path_to_server()).arg("openapi"); - let (exit_status, stdout_text, stderr_text) = run_command(exec); - assert_exit_code(exit_status, EXIT_SUCCESS, &stderr_text); - assert_contents("tests/output/cmd-server-openapi-stderr", &stderr_text); - - let spec: OpenAPI = serde_json::from_str(&stdout_text) - .expect("stdout was not valid OpenAPI"); - - // Check for lint errors. - let errors = openapi_lint::validate(&spec); - assert!(errors.is_empty(), "{}", errors.join("\n\n")); - - // Confirm that the output hasn't changed. It's expected that we'll change - // this file as the API evolves, but pay attention to the diffs to ensure - // that the changes match your expectations. - assert_contents("../openapi/installinator-artifactd.json", &stdout_text); -} diff --git a/installinator-artifactd/tests/mod.rs b/installinator-artifactd/tests/mod.rs deleted file mode 100644 index 66fee5d99c..0000000000 --- a/installinator-artifactd/tests/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! Integration tests for the installinator artifact server. -//! -//! Why use this weird layer of indirection, you might ask? Cargo chooses to -//! compile *each file* within the "tests/" subdirectory as a separate crate. -//! This means that doing "file-granularity" conditional compilation is -//! difficult, since a file like "test_for_illumos_only.rs" would get compiled -//! and tested regardless of the contents of "mod.rs". -//! -//! However, by lumping all tests into a submodule, all integration tests are -//! joined into a single crate, which itself can filter individual files -//! by (for example) choice of target OS. - -mod integration_tests; diff --git a/installinator-artifactd/tests/output/cmd-server-openapi-stderr b/installinator-artifactd/tests/output/cmd-server-openapi-stderr deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/installinator/Cargo.toml b/installinator/Cargo.toml index c21c3f2ee2..00dfb6440b 100644 --- a/installinator/Cargo.toml +++ b/installinator/Cargo.toml @@ -20,7 +20,7 @@ futures.workspace = true hex.workspace = true http.workspace = true illumos-utils.workspace = true -installinator-artifact-client.workspace = true +installinator-client.workspace = true installinator-common.workspace = true ipcc.workspace = true itertools.workspace = true diff --git a/installinator/src/artifact.rs b/installinator/src/artifact.rs index 734759a2c2..12e85e0938 100644 --- a/installinator/src/artifact.rs +++ b/installinator/src/artifact.rs @@ -7,7 +7,7 @@ use std::net::SocketAddr; use anyhow::{Context, Result}; use clap::Args; use futures::StreamExt; -use installinator_artifact_client::ClientError; +use installinator_client::ClientError; use installinator_common::EventReport; use ipcc::{InstallinatorImageId, Ipcc}; use omicron_common::update::{ArtifactHash, ArtifactHashId}; @@ -63,7 +63,7 @@ impl ArtifactIdOpts { #[derive(Debug)] pub(crate) struct ArtifactClient { log: slog::Logger, - client: installinator_artifact_client::Client, + client: installinator_client::Client, } impl ArtifactClient { @@ -81,8 +81,7 @@ impl ArtifactClient { let log = log.new( slog::o!("component" => "ArtifactClient", "peer" => addr.to_string()), ); - let client = - installinator_artifact_client::Client::new(&endpoint, log.clone()); + let client = installinator_client::Client::new(&endpoint, log.clone()); Self { log, client } } diff --git a/installinator/src/errors.rs b/installinator/src/errors.rs index 1349cf7d89..577d0d6f4d 100644 --- a/installinator/src/errors.rs +++ b/installinator/src/errors.rs @@ -4,7 +4,7 @@ use std::{net::SocketAddr, time::Duration}; -use installinator_artifact_client::ClientError; +use installinator_client::ClientError; use thiserror::Error; #[derive(Debug, Error)] diff --git a/installinator/src/mock_peers.rs b/installinator/src/mock_peers.rs index 434276649f..ccb35a2f06 100644 --- a/installinator/src/mock_peers.rs +++ b/installinator/src/mock_peers.rs @@ -16,7 +16,7 @@ use std::{ use anyhow::{bail, Result}; use async_trait::async_trait; use bytes::Bytes; -use installinator_artifact_client::{ClientError, ResponseValue}; +use installinator_client::{ClientError, ResponseValue}; use installinator_common::EventReport; use omicron_common::update::ArtifactHashId; use proptest::prelude::*; @@ -342,7 +342,7 @@ impl MockPeer { tokio::time::sleep(after).await; _ = sender .send(Err(ClientError::ErrorResponse(ResponseValue::new( - installinator_artifact_client::types::Error { + installinator_client::types::Error { error_code: None, message: format!("not-found error after {after:?}"), request_id: "mock-request-id".to_owned(), @@ -356,7 +356,7 @@ impl MockPeer { tokio::time::sleep(after).await; _ = sender .send(Err(ClientError::ErrorResponse(ResponseValue::new( - installinator_artifact_client::types::Error { + installinator_client::types::Error { error_code: None, message: format!("forbidden error after {after:?}"), request_id: "mock-request-id".to_owned(), @@ -526,7 +526,7 @@ impl PeersImpl for MockReportPeers { Ok(()) } else if peer == Self::invalid_peer() { Err(ClientError::ErrorResponse(ResponseValue::new( - installinator_artifact_client::types::Error { + installinator_client::types::Error { error_code: None, message: "invalid peer => HTTP 422".to_owned(), request_id: "mock-request-id".to_owned(), diff --git a/installinator/src/peers.rs b/installinator/src/peers.rs index 644507da4b..3d2e05077d 100644 --- a/installinator/src/peers.rs +++ b/installinator/src/peers.rs @@ -16,7 +16,7 @@ use buf_list::BufList; use bytes::Bytes; use display_error_chain::DisplayErrorChain; use futures::{Stream, StreamExt}; -use installinator_artifact_client::ClientError; +use installinator_client::ClientError; use installinator_common::{ EventReport, InstallinatorProgressMetadata, StepContext, StepProgress, }; diff --git a/openapi/dns-server.json b/openapi/dns-server.json index 1b02199b76..0252c1538a 100644 --- a/openapi/dns-server.json +++ b/openapi/dns-server.json @@ -2,7 +2,12 @@ "openapi": "3.0.3", "info": { "title": "Internal DNS", - "version": "v0.1.0" + "description": "API for the internal DNS server", + "contact": { + "url": "https://oxide.computer", + "email": "api@oxide.computer" + }, + "version": "0.0.1" }, "paths": { "/config": { diff --git a/openapi/installinator-artifactd.json b/openapi/installinator.json similarity index 99% rename from openapi/installinator-artifactd.json rename to openapi/installinator.json index 61f555e10d..0631344b25 100644 --- a/openapi/installinator-artifactd.json +++ b/openapi/installinator.json @@ -1,8 +1,8 @@ { "openapi": "3.0.3", "info": { - "title": "Oxide Installinator Artifact Server", - "description": "API for use by the installinator to retrieve artifacts", + "title": "Installinator API", + "description": "API for installinator to fetch artifacts and report progress", "contact": { "url": "https://oxide.computer", "email": "api@oxide.computer" diff --git a/openapi/wicketd.json b/openapi/wicketd.json index 7d50a38268..2098f0bca4 100644 --- a/openapi/wicketd.json +++ b/openapi/wicketd.json @@ -85,7 +85,7 @@ }, "/baseboard": { "get": { - "summary": "Report the configured baseboard details", + "summary": "Report the configured baseboard details.", "operationId": "get_baseboard", "responses": { "200": { @@ -214,8 +214,8 @@ }, "/inventory": { "get": { - "summary": "A status endpoint used to report high level information known to wicketd.", - "description": "This endpoint can be polled to see if there have been state changes in the system that are useful to report to wicket.\nWicket, and possibly other callers, will retrieve the changed information, with follow up calls.", + "summary": "A status endpoint used to report high level information known to", + "description": "wicketd.\nThis endpoint can be polled to see if there have been state changes in the system that are useful to report to wicket.\nWicket, and possibly other callers, will retrieve the changed information, with follow up calls.", "operationId": "get_inventory", "requestBody": { "content": { @@ -274,8 +274,8 @@ }, "/preflight/uplink": { "get": { - "summary": "An endpoint to get the report for the most recent (or still running)", - "description": "preflight uplink check.", + "summary": "Get the report for the most recent (or still running) preflight uplink", + "description": "check.", "operationId": "get_preflight_uplink_report", "responses": { "200": { @@ -283,7 +283,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/EventReportForGenericSpec" + "$ref": "#/components/schemas/EventReportForUplinkPreflightCheckSpec" } } } @@ -297,7 +297,7 @@ } }, "post": { - "summary": "An endpoint to start a preflight check for uplink configuration.", + "summary": "Start a preflight check for uplink configuration.", "operationId": "post_start_preflight_uplink_check", "requestBody": { "content": { @@ -637,7 +637,7 @@ }, "/reload-config": { "post": { - "summary": "An endpoint instructing wicketd to reload its SMF config properties.", + "summary": "Instruct wicketd to reload its SMF config properties.", "description": "The only expected client of this endpoint is `curl` from wicketd's SMF `refresh` method, but other clients hitting it is harmless.", "operationId": "post_reload_config", "responses": { @@ -1390,7 +1390,7 @@ "request_id" ] }, - "EventReportForGenericSpec": { + "EventReportForUplinkPreflightCheckSpec": { "description": "A report produced from an [`EventBuffer`](crate::EventBuffer).\n\nRemote reports can be passed into a [`StepContext`](crate::StepContext), in which case they show up as nested events.", "type": "object", "properties": { @@ -1405,7 +1405,7 @@ "description": "A list of progress events, or whether we're currently waiting for a progress event.\n\nCurrently, this produces one progress event for each top-level and nested event in progress.", "type": "array", "items": { - "$ref": "#/components/schemas/ProgressEventForGenericSpec" + "$ref": "#/components/schemas/ProgressEventForUplinkPreflightCheckSpec" } }, "root_execution_id": { @@ -1418,7 +1418,7 @@ "description": "A list of step events.\n\nStep events include success and failure events.", "type": "array", "items": { - "$ref": "#/components/schemas/StepEventForGenericSpec" + "$ref": "#/components/schemas/StepEventForUplinkPreflightCheckSpec" } } }, @@ -1547,7 +1547,7 @@ "type": "object", "properties": { "force_refresh": { - "description": "If true, refresh the state of these SPs from MGS prior to returning (instead of returning cached data).", + "description": "Refresh the state of these SPs from MGS prior to returning (instead of returning cached data).", "type": "array", "items": { "$ref": "#/components/schemas/SpIdentifier" @@ -1892,6 +1892,42 @@ "total_elapsed" ] }, + "ProgressEventForUplinkPreflightCheckSpec": { + "type": "object", + "properties": { + "data": { + "description": "The kind of event this is.", + "allOf": [ + { + "$ref": "#/components/schemas/ProgressEventKindForUplinkPreflightCheckSpec" + } + ] + }, + "execution_id": { + "description": "The execution ID.", + "type": "string", + "format": "uuid" + }, + "spec": { + "description": "The specification that this event belongs to.\n\nThis is typically the name of the type `S` for which `StepSpec` is implemented.\n\nThis can be used with `Self::from_generic` to deserialize generic metadata.", + "type": "string" + }, + "total_elapsed": { + "description": "Total time elapsed since the start of execution.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + } + }, + "required": [ + "data", + "execution_id", + "spec", + "total_elapsed" + ] + }, "ProgressEventForWicketdEngineSpec": { "type": "object", "properties": { @@ -2114,6 +2150,193 @@ } ] }, + "ProgressEventKindForUplinkPreflightCheckSpec": { + "oneOf": [ + { + "description": "The update engine is waiting for a progress message.\n\nThe update engine sends this message immediately after a [`StepEvent`] corresponding to a new step.", + "type": "object", + "properties": { + "attempt": { + "description": "The attempt number currently being executed.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "attempt_elapsed": { + "description": "Total time elapsed since the start of the attempt.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "kind": { + "type": "string", + "enum": [ + "waiting_for_progress" + ] + }, + "step": { + "description": "Information about the step.", + "allOf": [ + { + "$ref": "#/components/schemas/StepInfoWithMetadataForUplinkPreflightCheckSpec" + } + ] + }, + "step_elapsed": { + "description": "Total time elapsed since the start of the step. Includes prior attempts.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + } + }, + "required": [ + "attempt", + "attempt_elapsed", + "kind", + "step", + "step_elapsed" + ] + }, + { + "type": "object", + "properties": { + "attempt": { + "description": "The attempt number currently being executed.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "attempt_elapsed": { + "description": "Total time elapsed since the start of the attempt.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "kind": { + "type": "string", + "enum": [ + "progress" + ] + }, + "metadata": { + "description": "Metadata that was returned with progress.", + "type": "string" + }, + "progress": { + "nullable": true, + "description": "Current progress.", + "allOf": [ + { + "$ref": "#/components/schemas/ProgressCounter" + } + ] + }, + "step": { + "description": "Information about the step.", + "allOf": [ + { + "$ref": "#/components/schemas/StepInfoWithMetadataForUplinkPreflightCheckSpec" + } + ] + }, + "step_elapsed": { + "description": "Total time elapsed since the start of the step. Includes prior attempts.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + } + }, + "required": [ + "attempt", + "attempt_elapsed", + "kind", + "metadata", + "step", + "step_elapsed" + ] + }, + { + "type": "object", + "properties": { + "attempt": { + "description": "The attempt number currently being executed.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "attempt_elapsed": { + "description": "The time it took for this attempt to complete.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "event": { + "description": "The event that occurred.", + "allOf": [ + { + "$ref": "#/components/schemas/ProgressEventForGenericSpec" + } + ] + }, + "kind": { + "type": "string", + "enum": [ + "nested" + ] + }, + "step": { + "description": "Information about the step.", + "allOf": [ + { + "$ref": "#/components/schemas/StepInfoWithMetadataForUplinkPreflightCheckSpec" + } + ] + }, + "step_elapsed": { + "description": "Total time elapsed since the start of the step. Includes prior attempts.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + } + }, + "required": [ + "attempt", + "attempt_elapsed", + "event", + "kind", + "step", + "step_elapsed" + ] + }, + { + "description": "Future variants that might be unknown.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "unknown" + ] + } + }, + "required": [ + "kind" + ] + } + ] + }, "ProgressEventKindForWicketdEngineSpec": { "oneOf": [ { @@ -3382,6 +3605,25 @@ "total_component_steps" ] }, + "StepComponentSummaryForUplinkPreflightCheckSpec": { + "type": "object", + "properties": { + "component": { + "description": "The component.", + "type": "string" + }, + "total_component_steps": { + "description": "The number of steps present in this component.", + "type": "integer", + "format": "uint", + "minimum": 0 + } + }, + "required": [ + "component", + "total_component_steps" + ] + }, "StepComponentSummaryForWicketdEngineSpec": { "type": "object", "properties": { @@ -3448,14 +3690,14 @@ "total_elapsed" ] }, - "StepEventForWicketdEngineSpec": { + "StepEventForUplinkPreflightCheckSpec": { "type": "object", "properties": { "data": { "description": "The kind of event this is.", "allOf": [ { - "$ref": "#/components/schemas/StepEventKindForWicketdEngineSpec" + "$ref": "#/components/schemas/StepEventKindForUplinkPreflightCheckSpec" } ] }, @@ -3491,19 +3733,62 @@ "total_elapsed" ] }, - "StepEventKindForGenericSpec": { - "oneOf": [ - { - "description": "No steps were defined, and the executor exited without doing anything.\n\nThis is a terminal event: it is guaranteed that no more events will be seen after this one.", - "type": "object", - "properties": { - "kind": { - "type": "string", - "enum": [ - "no_steps_defined" - ] + "StepEventForWicketdEngineSpec": { + "type": "object", + "properties": { + "data": { + "description": "The kind of event this is.", + "allOf": [ + { + "$ref": "#/components/schemas/StepEventKindForWicketdEngineSpec" } - }, + ] + }, + "event_index": { + "description": "A monotonically increasing index for this `StepEvent`.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "execution_id": { + "description": "The execution ID.", + "type": "string", + "format": "uuid" + }, + "spec": { + "description": "The specification that this event belongs to.\n\nThis is typically the name of the type `S` for which `StepSpec` is implemented.\n\nThis can be used along with `Self::from_generic` to identify which specification to deserialize generic metadata against. For example:\n\n```rust,ignore if event.spec == \"MySpec\" { // event is likely generated from a MySpec engine. let event = Event::::from_generic(event)?; // ... } ```", + "type": "string" + }, + "total_elapsed": { + "description": "Total time elapsed since the start of execution.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + } + }, + "required": [ + "data", + "event_index", + "execution_id", + "spec", + "total_elapsed" + ] + }, + "StepEventKindForGenericSpec": { + "oneOf": [ + { + "description": "No steps were defined, and the executor exited without doing anything.\n\nThis is a terminal event: it is guaranteed that no more events will be seen after this one.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "no_steps_defined" + ] + } + }, "required": [ "kind" ] @@ -3976,7 +4261,7 @@ } ] }, - "StepEventKindForWicketdEngineSpec": { + "StepEventKindForUplinkPreflightCheckSpec": { "oneOf": [ { "description": "No steps were defined, and the executor exited without doing anything.\n\nThis is a terminal event: it is guaranteed that no more events will be seen after this one.", @@ -4001,14 +4286,14 @@ "description": "A list of components, along with the number of items each component has.", "type": "array", "items": { - "$ref": "#/components/schemas/StepComponentSummaryForWicketdEngineSpec" + "$ref": "#/components/schemas/StepComponentSummaryForUplinkPreflightCheckSpec" } }, "first_step": { "description": "Information about the first step.", "allOf": [ { - "$ref": "#/components/schemas/StepInfoWithMetadataForWicketdEngineSpec" + "$ref": "#/components/schemas/StepInfoWithMetadataForUplinkPreflightCheckSpec" } ] }, @@ -4022,7 +4307,7 @@ "description": "The list of steps that will be executed.", "type": "array", "items": { - "$ref": "#/components/schemas/StepInfoForWicketdEngineSpec" + "$ref": "#/components/schemas/StepInfoForUplinkPreflightCheckSpec" } } }, @@ -4062,13 +4347,14 @@ "type": "string" }, "metadata": { - "description": "Progress-related metadata associated with this attempt." + "description": "Progress-related metadata associated with this attempt.", + "type": "string" }, "step": { "description": "Information about the step.", "allOf": [ { - "$ref": "#/components/schemas/StepInfoWithMetadataForWicketdEngineSpec" + "$ref": "#/components/schemas/StepInfoWithMetadataForUplinkPreflightCheckSpec" } ] }, @@ -4123,7 +4409,7 @@ "description": "Information about the step.", "allOf": [ { - "$ref": "#/components/schemas/StepInfoWithMetadataForWicketdEngineSpec" + "$ref": "#/components/schemas/StepInfoWithMetadataForUplinkPreflightCheckSpec" } ] }, @@ -4173,7 +4459,7 @@ "description": "The next step that is being started.", "allOf": [ { - "$ref": "#/components/schemas/StepInfoWithMetadataForWicketdEngineSpec" + "$ref": "#/components/schemas/StepInfoWithMetadataForUplinkPreflightCheckSpec" } ] }, @@ -4181,7 +4467,7 @@ "description": "The outcome of the step.", "allOf": [ { - "$ref": "#/components/schemas/StepOutcomeForWicketdEngineSpec" + "$ref": "#/components/schemas/StepOutcomeForUplinkPreflightCheckSpec" } ] }, @@ -4189,7 +4475,7 @@ "description": "Information about the step that just completed.", "allOf": [ { - "$ref": "#/components/schemas/StepInfoWithMetadataForWicketdEngineSpec" + "$ref": "#/components/schemas/StepInfoWithMetadataForUplinkPreflightCheckSpec" } ] }, @@ -4240,7 +4526,7 @@ "description": "The outcome of the last step.", "allOf": [ { - "$ref": "#/components/schemas/StepOutcomeForWicketdEngineSpec" + "$ref": "#/components/schemas/StepOutcomeForUplinkPreflightCheckSpec" } ] }, @@ -4248,7 +4534,7 @@ "description": "Information about the last step that completed.", "allOf": [ { - "$ref": "#/components/schemas/StepInfoWithMetadataForWicketdEngineSpec" + "$ref": "#/components/schemas/StepInfoWithMetadataForUplinkPreflightCheckSpec" } ] }, @@ -4293,7 +4579,7 @@ "description": "Information about the step that failed.", "allOf": [ { - "$ref": "#/components/schemas/StepInfoWithMetadataForWicketdEngineSpec" + "$ref": "#/components/schemas/StepInfoWithMetadataForUplinkPreflightCheckSpec" } ] }, @@ -4340,7 +4626,7 @@ "description": "Information about the step that was running at the time execution was aborted.", "allOf": [ { - "$ref": "#/components/schemas/StepInfoWithMetadataForWicketdEngineSpec" + "$ref": "#/components/schemas/StepInfoWithMetadataForUplinkPreflightCheckSpec" } ] }, @@ -4422,7 +4708,7 @@ "description": "Information about the step that's occurring.", "allOf": [ { - "$ref": "#/components/schemas/StepInfoWithMetadataForWicketdEngineSpec" + "$ref": "#/components/schemas/StepInfoWithMetadataForUplinkPreflightCheckSpec" } ] }, @@ -4461,162 +4747,17 @@ } ] }, - "StepInfoForGenericSpec": { - "description": "Serializable information about a step.", - "type": "object", - "properties": { - "component": { - "description": "The component that this step is part of." - }, - "component_index": { - "description": "The index of the step within the component.", - "type": "integer", - "format": "uint", - "minimum": 0 - }, - "description": { - "description": "The description for this step.", - "type": "string" - }, - "id": { - "description": "An identifier for this step." - }, - "index": { - "description": "The index of the step within all steps to be executed.", - "type": "integer", - "format": "uint", - "minimum": 0 - }, - "total_component_steps": { - "description": "The total number of steps in this component.", - "type": "integer", - "format": "uint", - "minimum": 0 - } - }, - "required": [ - "component", - "component_index", - "description", - "id", - "index", - "total_component_steps" - ] - }, - "StepInfoForWicketdEngineSpec": { - "description": "Serializable information about a step.", - "type": "object", - "properties": { - "component": { - "description": "The component that this step is part of.", - "allOf": [ - { - "$ref": "#/components/schemas/UpdateComponent" - } - ] - }, - "component_index": { - "description": "The index of the step within the component.", - "type": "integer", - "format": "uint", - "minimum": 0 - }, - "description": { - "description": "The description for this step.", - "type": "string" - }, - "id": { - "description": "An identifier for this step.", - "allOf": [ - { - "$ref": "#/components/schemas/UpdateStepId" - } - ] - }, - "index": { - "description": "The index of the step within all steps to be executed.", - "type": "integer", - "format": "uint", - "minimum": 0 - }, - "total_component_steps": { - "description": "The total number of steps in this component.", - "type": "integer", - "format": "uint", - "minimum": 0 - } - }, - "required": [ - "component", - "component_index", - "description", - "id", - "index", - "total_component_steps" - ] - }, - "StepInfoWithMetadataForGenericSpec": { - "description": "Serializable information about a step.", - "type": "object", - "properties": { - "info": { - "description": "Information about this step.", - "allOf": [ - { - "$ref": "#/components/schemas/StepInfoForGenericSpec" - } - ] - }, - "metadata": { - "nullable": true, - "description": "Additional metadata associated with this step." - } - }, - "required": [ - "info" - ] - }, - "StepInfoWithMetadataForWicketdEngineSpec": { - "description": "Serializable information about a step.", - "type": "object", - "properties": { - "info": { - "description": "Information about this step.", - "allOf": [ - { - "$ref": "#/components/schemas/StepInfoForWicketdEngineSpec" - } - ] - }, - "metadata": { - "nullable": true, - "description": "Additional metadata associated with this step." - } - }, - "required": [ - "info" - ] - }, - "StepOutcomeForGenericSpec": { + "StepEventKindForWicketdEngineSpec": { "oneOf": [ { - "description": "The step completed successfully.", + "description": "No steps were defined, and the executor exited without doing anything.\n\nThis is a terminal event: it is guaranteed that no more events will be seen after this one.", "type": "object", "properties": { "kind": { "type": "string", "enum": [ - "success" + "no_steps_defined" ] - }, - "message": { - "nullable": true, - "description": "An optional message associated with this step.", - "type": "string" - }, - "metadata": { - "nullable": true, - "description": "Optional completion metadata associated with the step." } }, "required": [ @@ -4624,37 +4765,740 @@ ] }, { - "description": "The step completed with a warning.", + "description": "Execution was started.\n\nThis is an initial event -- it is always expected to be the first event received from the event stream.", "type": "object", "properties": { + "components": { + "description": "A list of components, along with the number of items each component has.", + "type": "array", + "items": { + "$ref": "#/components/schemas/StepComponentSummaryForWicketdEngineSpec" + } + }, + "first_step": { + "description": "Information about the first step.", + "allOf": [ + { + "$ref": "#/components/schemas/StepInfoWithMetadataForWicketdEngineSpec" + } + ] + }, "kind": { "type": "string", "enum": [ - "warning" + "execution_started" ] }, - "message": { - "description": "A warning message.", - "type": "string" - }, - "metadata": { - "nullable": true, - "description": "Optional completion metadata associated with the step." + "steps": { + "description": "The list of steps that will be executed.", + "type": "array", + "items": { + "$ref": "#/components/schemas/StepInfoForWicketdEngineSpec" + } } }, "required": [ + "components", + "first_step", "kind", - "message" + "steps" ] }, { - "description": "The step was skipped with a message.", + "description": "Progress was reset along an attempt, and this attempt is going down a different path.", "type": "object", "properties": { - "kind": { - "type": "string", - "enum": [ - "skipped" + "attempt": { + "description": "The current attempt number.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "attempt_elapsed": { + "description": "The amount of time this attempt has taken so far.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "kind": { + "type": "string", + "enum": [ + "progress_reset" + ] + }, + "message": { + "description": "A message assocaited with the reset.", + "type": "string" + }, + "metadata": { + "description": "Progress-related metadata associated with this attempt." + }, + "step": { + "description": "Information about the step.", + "allOf": [ + { + "$ref": "#/components/schemas/StepInfoWithMetadataForWicketdEngineSpec" + } + ] + }, + "step_elapsed": { + "description": "Total time elapsed since the start of the step. Includes prior attempts.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + } + }, + "required": [ + "attempt", + "attempt_elapsed", + "kind", + "message", + "metadata", + "step", + "step_elapsed" + ] + }, + { + "description": "An attempt failed and this step is being retried.", + "type": "object", + "properties": { + "attempt_elapsed": { + "description": "The amount of time the previous attempt took.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "kind": { + "type": "string", + "enum": [ + "attempt_retry" + ] + }, + "message": { + "description": "A message associated with the retry.", + "type": "string" + }, + "next_attempt": { + "description": "The attempt number for the next attempt.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "step": { + "description": "Information about the step.", + "allOf": [ + { + "$ref": "#/components/schemas/StepInfoWithMetadataForWicketdEngineSpec" + } + ] + }, + "step_elapsed": { + "description": "Total time elapsed since the start of the step. Includes prior attempts.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + } + }, + "required": [ + "attempt_elapsed", + "kind", + "message", + "next_attempt", + "step", + "step_elapsed" + ] + }, + { + "description": "A step is complete and the next step has been started.", + "type": "object", + "properties": { + "attempt": { + "description": "The attempt number that completed.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "attempt_elapsed": { + "description": "The time it took for this attempt to complete.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "kind": { + "type": "string", + "enum": [ + "step_completed" + ] + }, + "next_step": { + "description": "The next step that is being started.", + "allOf": [ + { + "$ref": "#/components/schemas/StepInfoWithMetadataForWicketdEngineSpec" + } + ] + }, + "outcome": { + "description": "The outcome of the step.", + "allOf": [ + { + "$ref": "#/components/schemas/StepOutcomeForWicketdEngineSpec" + } + ] + }, + "step": { + "description": "Information about the step that just completed.", + "allOf": [ + { + "$ref": "#/components/schemas/StepInfoWithMetadataForWicketdEngineSpec" + } + ] + }, + "step_elapsed": { + "description": "Total time elapsed since the start of the step. Includes prior attempts.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + } + }, + "required": [ + "attempt", + "attempt_elapsed", + "kind", + "next_step", + "outcome", + "step", + "step_elapsed" + ] + }, + { + "description": "Execution is complete.\n\nThis is a terminal event: it is guaranteed that no more events will be seen after this one.", + "type": "object", + "properties": { + "attempt_elapsed": { + "description": "The time it took for this attempt to complete.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "kind": { + "type": "string", + "enum": [ + "execution_completed" + ] + }, + "last_attempt": { + "description": "The attempt number that completed.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "last_outcome": { + "description": "The outcome of the last step.", + "allOf": [ + { + "$ref": "#/components/schemas/StepOutcomeForWicketdEngineSpec" + } + ] + }, + "last_step": { + "description": "Information about the last step that completed.", + "allOf": [ + { + "$ref": "#/components/schemas/StepInfoWithMetadataForWicketdEngineSpec" + } + ] + }, + "step_elapsed": { + "description": "Total time elapsed since the start of the step. Includes prior attempts.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + } + }, + "required": [ + "attempt_elapsed", + "kind", + "last_attempt", + "last_outcome", + "last_step", + "step_elapsed" + ] + }, + { + "description": "Execution failed.\n\nThis is a terminal event: it is guaranteed that no more events will be seen after this one.", + "type": "object", + "properties": { + "attempt_elapsed": { + "description": "The time it took for this attempt to complete.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "causes": { + "description": "A chain of causes associated with the failure.", + "type": "array", + "items": { + "type": "string" + } + }, + "failed_step": { + "description": "Information about the step that failed.", + "allOf": [ + { + "$ref": "#/components/schemas/StepInfoWithMetadataForWicketdEngineSpec" + } + ] + }, + "kind": { + "type": "string", + "enum": [ + "execution_failed" + ] + }, + "message": { + "description": "A message associated with the failure.", + "type": "string" + }, + "step_elapsed": { + "description": "Total time elapsed since the start of the step. Includes prior attempts.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "total_attempts": { + "description": "The total number of attempts that were performed before the step failed.", + "type": "integer", + "format": "uint", + "minimum": 0 + } + }, + "required": [ + "attempt_elapsed", + "causes", + "failed_step", + "kind", + "message", + "step_elapsed", + "total_attempts" + ] + }, + { + "description": "Execution aborted by an external user.\n\nThis is a terminal event: it is guaranteed that no more events will be seen after this one.", + "type": "object", + "properties": { + "aborted_step": { + "description": "Information about the step that was running at the time execution was aborted.", + "allOf": [ + { + "$ref": "#/components/schemas/StepInfoWithMetadataForWicketdEngineSpec" + } + ] + }, + "attempt": { + "description": "The attempt that was running at the time the step was aborted.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "attempt_elapsed": { + "description": "The time it took for this attempt to complete.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "kind": { + "type": "string", + "enum": [ + "execution_aborted" + ] + }, + "message": { + "description": "A message associated with the abort.", + "type": "string" + }, + "step_elapsed": { + "description": "Total time elapsed since the start of the step. Includes prior attempts.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + } + }, + "required": [ + "aborted_step", + "attempt", + "attempt_elapsed", + "kind", + "message", + "step_elapsed" + ] + }, + { + "description": "A nested step event occurred.", + "type": "object", + "properties": { + "attempt": { + "description": "The current attempt number.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "attempt_elapsed": { + "description": "The time it took for this attempt to complete.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "event": { + "description": "The event that occurred.", + "allOf": [ + { + "$ref": "#/components/schemas/StepEventForGenericSpec" + } + ] + }, + "kind": { + "type": "string", + "enum": [ + "nested" + ] + }, + "step": { + "description": "Information about the step that's occurring.", + "allOf": [ + { + "$ref": "#/components/schemas/StepInfoWithMetadataForWicketdEngineSpec" + } + ] + }, + "step_elapsed": { + "description": "Total time elapsed since the start of the step. Includes prior attempts.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + } + }, + "required": [ + "attempt", + "attempt_elapsed", + "event", + "kind", + "step", + "step_elapsed" + ] + }, + { + "description": "Future variants that might be unknown.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "unknown" + ] + } + }, + "required": [ + "kind" + ] + } + ] + }, + "StepInfoForGenericSpec": { + "description": "Serializable information about a step.", + "type": "object", + "properties": { + "component": { + "description": "The component that this step is part of." + }, + "component_index": { + "description": "The index of the step within the component.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "description": { + "description": "The description for this step.", + "type": "string" + }, + "id": { + "description": "An identifier for this step." + }, + "index": { + "description": "The index of the step within all steps to be executed.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "total_component_steps": { + "description": "The total number of steps in this component.", + "type": "integer", + "format": "uint", + "minimum": 0 + } + }, + "required": [ + "component", + "component_index", + "description", + "id", + "index", + "total_component_steps" + ] + }, + "StepInfoForUplinkPreflightCheckSpec": { + "description": "Serializable information about a step.", + "type": "object", + "properties": { + "component": { + "description": "The component that this step is part of.", + "type": "string" + }, + "component_index": { + "description": "The index of the step within the component.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "description": { + "description": "The description for this step.", + "type": "string" + }, + "id": { + "description": "An identifier for this step.", + "allOf": [ + { + "$ref": "#/components/schemas/UplinkPreflightStepId" + } + ] + }, + "index": { + "description": "The index of the step within all steps to be executed.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "total_component_steps": { + "description": "The total number of steps in this component.", + "type": "integer", + "format": "uint", + "minimum": 0 + } + }, + "required": [ + "component", + "component_index", + "description", + "id", + "index", + "total_component_steps" + ] + }, + "StepInfoForWicketdEngineSpec": { + "description": "Serializable information about a step.", + "type": "object", + "properties": { + "component": { + "description": "The component that this step is part of.", + "allOf": [ + { + "$ref": "#/components/schemas/UpdateComponent" + } + ] + }, + "component_index": { + "description": "The index of the step within the component.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "description": { + "description": "The description for this step.", + "type": "string" + }, + "id": { + "description": "An identifier for this step.", + "allOf": [ + { + "$ref": "#/components/schemas/UpdateStepId" + } + ] + }, + "index": { + "description": "The index of the step within all steps to be executed.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "total_component_steps": { + "description": "The total number of steps in this component.", + "type": "integer", + "format": "uint", + "minimum": 0 + } + }, + "required": [ + "component", + "component_index", + "description", + "id", + "index", + "total_component_steps" + ] + }, + "StepInfoWithMetadataForGenericSpec": { + "description": "Serializable information about a step.", + "type": "object", + "properties": { + "info": { + "description": "Information about this step.", + "allOf": [ + { + "$ref": "#/components/schemas/StepInfoForGenericSpec" + } + ] + }, + "metadata": { + "nullable": true, + "description": "Additional metadata associated with this step." + } + }, + "required": [ + "info" + ] + }, + "StepInfoWithMetadataForUplinkPreflightCheckSpec": { + "description": "Serializable information about a step.", + "type": "object", + "properties": { + "info": { + "description": "Information about this step.", + "allOf": [ + { + "$ref": "#/components/schemas/StepInfoForUplinkPreflightCheckSpec" + } + ] + }, + "metadata": { + "nullable": true, + "description": "Additional metadata associated with this step.", + "type": "string", + "enum": [ + null + ] + } + }, + "required": [ + "info" + ] + }, + "StepInfoWithMetadataForWicketdEngineSpec": { + "description": "Serializable information about a step.", + "type": "object", + "properties": { + "info": { + "description": "Information about this step.", + "allOf": [ + { + "$ref": "#/components/schemas/StepInfoForWicketdEngineSpec" + } + ] + }, + "metadata": { + "nullable": true, + "description": "Additional metadata associated with this step." + } + }, + "required": [ + "info" + ] + }, + "StepOutcomeForGenericSpec": { + "oneOf": [ + { + "description": "The step completed successfully.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "success" + ] + }, + "message": { + "nullable": true, + "description": "An optional message associated with this step.", + "type": "string" + }, + "metadata": { + "nullable": true, + "description": "Optional completion metadata associated with the step." + } + }, + "required": [ + "kind" + ] + }, + { + "description": "The step completed with a warning.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "warning" + ] + }, + "message": { + "description": "A warning message.", + "type": "string" + }, + "metadata": { + "nullable": true, + "description": "Optional completion metadata associated with the step." + } + }, + "required": [ + "kind", + "message" + ] + }, + { + "description": "The step was skipped with a message.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "skipped" ] }, "message": { @@ -4673,6 +5517,94 @@ } ] }, + "StepOutcomeForUplinkPreflightCheckSpec": { + "oneOf": [ + { + "description": "The step completed successfully.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "success" + ] + }, + "message": { + "nullable": true, + "description": "An optional message associated with this step.", + "type": "string" + }, + "metadata": { + "nullable": true, + "description": "Optional completion metadata associated with the step.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "kind" + ] + }, + { + "description": "The step completed with a warning.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "warning" + ] + }, + "message": { + "description": "A warning message.", + "type": "string" + }, + "metadata": { + "nullable": true, + "description": "Optional completion metadata associated with the step.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "kind", + "message" + ] + }, + { + "description": "The step was skipped with a message.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "skipped" + ] + }, + "message": { + "description": "Message associated with the skip.", + "type": "string" + }, + "metadata": { + "nullable": true, + "description": "Optional metadata associated with the skip.", + "type": "string", + "enum": [ + null + ] + } + }, + "required": [ + "kind", + "message" + ] + } + ] + }, "StepOutcomeForWicketdEngineSpec": { "oneOf": [ { @@ -5063,6 +5995,136 @@ "address" ] }, + "UplinkPreflightStepId": { + "oneOf": [ + { + "type": "object", + "properties": { + "id": { + "type": "string", + "enum": [ + "configure_switch" + ] + } + }, + "required": [ + "id" + ] + }, + { + "type": "object", + "properties": { + "id": { + "type": "string", + "enum": [ + "wait_for_l1_link" + ] + } + }, + "required": [ + "id" + ] + }, + { + "type": "object", + "properties": { + "id": { + "type": "string", + "enum": [ + "configure_address" + ] + } + }, + "required": [ + "id" + ] + }, + { + "type": "object", + "properties": { + "id": { + "type": "string", + "enum": [ + "configure_routing" + ] + } + }, + "required": [ + "id" + ] + }, + { + "type": "object", + "properties": { + "id": { + "type": "string", + "enum": [ + "check_external_dns_connectivity" + ] + } + }, + "required": [ + "id" + ] + }, + { + "type": "object", + "properties": { + "id": { + "type": "string", + "enum": [ + "check_external_ntp_connectivity" + ] + } + }, + "required": [ + "id" + ] + }, + { + "type": "object", + "properties": { + "id": { + "type": "string", + "enum": [ + "cleanup_routing" + ] + } + }, + "required": [ + "id" + ] + }, + { + "type": "object", + "properties": { + "id": { + "type": "string", + "enum": [ + "cleanup_address" + ] + } + }, + "required": [ + "id" + ] + }, + { + "type": "object", + "properties": { + "id": { + "type": "string", + "enum": [ + "cleanup_l1" + ] + } + }, + "required": [ + "id" + ] + } + ] + }, "UserSpecifiedBgpPeerConfig": { "description": "User-specified version of [`BgpPeerConfig`].\n\nThis is similar to [`BgpPeerConfig`], except it doesn't have the sensitive `md5_auth_key` parameter, instead requiring that the user provide the key separately.\n\n[`BgpPeerConfig`]: omicron_common::api::internal::shared::BgpPeerConfig", "type": "object", diff --git a/tools/generate-wicketd-api.sh b/tools/generate-wicketd-api.sh deleted file mode 100755 index 3fbddee5af..0000000000 --- a/tools/generate-wicketd-api.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash - -./target/debug/wicketd openapi > openapi/wicketd.json diff --git a/wicket-common/Cargo.toml b/wicket-common/Cargo.toml index 9a82b3d8bd..3c24cea805 100644 --- a/wicket-common/Cargo.toml +++ b/wicket-common/Cargo.toml @@ -9,6 +9,8 @@ workspace = true [dependencies] anyhow.workspace = true +dpd-client.workspace = true +dropshot.workspace = true gateway-client.workspace = true maplit.workspace = true omicron-common.workspace = true @@ -20,7 +22,9 @@ serde.workspace = true serde_json.workspace = true sha2.workspace = true sled-hardware-types.workspace = true +slog.workspace = true thiserror.workspace = true +tokio.workspace = true update-engine.workspace = true [dev-dependencies] diff --git a/wicket-common/src/example.rs b/wicket-common/src/example.rs index 16b5df6768..bb70273b45 100644 --- a/wicket-common/src/example.rs +++ b/wicket-common/src/example.rs @@ -6,7 +6,6 @@ use std::{collections::BTreeSet, net::Ipv6Addr}; -use gateway_client::types::{SpIdentifier, SpType}; use maplit::{btreemap, btreeset}; use omicron_common::{ address::{IpRange, Ipv4Range}, @@ -19,11 +18,14 @@ use omicron_common::{ }; use sled_hardware_types::Baseboard; -use crate::rack_setup::{ - BgpAuthKeyId, BootstrapSledDescription, CurrentRssUserConfigInsensitive, - PutRssUserConfigInsensitive, UserSpecifiedBgpPeerConfig, - UserSpecifiedImportExportPolicy, UserSpecifiedPortConfig, - UserSpecifiedRackNetworkConfig, +use crate::{ + inventory::{SpIdentifier, SpType}, + rack_setup::{ + BgpAuthKeyId, BootstrapSledDescription, + CurrentRssUserConfigInsensitive, PutRssUserConfigInsensitive, + UserSpecifiedBgpPeerConfig, UserSpecifiedImportExportPolicy, + UserSpecifiedPortConfig, UserSpecifiedRackNetworkConfig, + }, }; /// A collection of example data structures. diff --git a/wicketd/src/inventory.rs b/wicket-common/src/inventory.rs similarity index 77% rename from wicketd/src/inventory.rs rename to wicket-common/src/inventory.rs index e1465147b5..f7b42e4ec0 100644 --- a/wicketd/src/inventory.rs +++ b/wicket-common/src/inventory.rs @@ -2,17 +2,25 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Rack inventory for display by wicket - -use gateway_client::types::{ - RotSlot, SpComponentCaboose, SpComponentInfo, SpIdentifier, SpIgnition, - SpState, +// Re-export these types from gateway_client, so that users are oblivious to +// where these types come from. +pub use gateway_client::types::{ + RotSlot, RotState, SpComponentCaboose, SpComponentInfo, + SpComponentPresence, SpIdentifier, SpIgnition, SpIgnitionSystemType, + SpState, SpType, }; use schemars::JsonSchema; -use serde::Serialize; +use serde::{Deserialize, Serialize}; + +/// The current state of the v1 Rack as known to wicketd +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] +#[serde(tag = "inventory", rename_all = "snake_case")] +pub struct RackV1Inventory { + pub sps: Vec, +} /// SP-related data -#[derive(Debug, Clone, Serialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] #[serde(tag = "sp_inventory", rename_all = "snake_case")] pub struct SpInventory { pub id: SpIdentifier, @@ -42,7 +50,7 @@ impl SpInventory { } /// RoT-related data that isn't already supplied in [`SpState`]. -#[derive(Debug, Clone, Serialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] #[serde(tag = "sp_inventory", rename_all = "snake_case")] pub struct RotInventory { pub active: RotSlot, @@ -53,10 +61,3 @@ pub struct RotInventory { pub caboose_stage0: Option>, pub caboose_stage0next: Option>, } - -/// The current state of the v1 Rack as known to wicketd -#[derive(Clone, Debug, Serialize, JsonSchema)] -#[serde(tag = "inventory", rename_all = "snake_case")] -pub struct RackV1Inventory { - pub sps: Vec, -} diff --git a/wicket-common/src/lib.rs b/wicket-common/src/lib.rs index c5ddabdb1c..7ad676e976 100644 --- a/wicket-common/src/lib.rs +++ b/wicket-common/src/lib.rs @@ -7,6 +7,8 @@ use std::time::Duration; pub mod example; +pub mod inventory; +pub mod preflight_check; pub mod rack_setup; pub mod rack_update; pub mod update_events; diff --git a/wicket-common/src/preflight_check.rs b/wicket-common/src/preflight_check.rs new file mode 100644 index 0000000000..e13be0a9d7 --- /dev/null +++ b/wicket-common/src/preflight_check.rs @@ -0,0 +1,79 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::net::IpAddr; + +use dpd_client::types::PortId; +use oxnet::IpNet; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use update_engine::StepSpec; + +#[derive(Debug, Error)] +pub enum UplinkPreflightTerminalError { + #[error("invalid port name: {0}")] + InvalidPortName(String), + #[error("failed to connect to dpd to check for current configuration")] + GetCurrentConfig(#[source] DpdError), + #[error("uplink already configured - is rack already initialized?")] + UplinkAlreadyConfigured, + #[error("failed to create port {port_id:?}")] + ConfigurePort { + #[source] + err: DpdError, + port_id: PortId, + }, + #[error( + "failed to remove host OS route {destination} -> {nexthop}: {err}" + )] + RemoveHostRoute { err: String, destination: IpNet, nexthop: IpAddr }, + #[error("failed to remove uplink SMF property {property:?}: {err}")] + RemoveSmfProperty { property: String, err: String }, + #[error("failed to refresh uplink service config: {0}")] + RefreshUplinkSmf(String), + #[error("failed to clear settings for port {port_id:?}")] + UnconfigurePort { + #[source] + err: DpdError, + port_id: PortId, + }, +} + +impl update_engine::AsError for UplinkPreflightTerminalError { + fn as_error(&self) -> &(dyn std::error::Error + 'static) { + self + } +} + +type DpdError = dpd_client::Error; + +#[derive(JsonSchema)] +pub enum UplinkPreflightCheckSpec {} + +impl StepSpec for UplinkPreflightCheckSpec { + type Component = String; + type StepId = UplinkPreflightStepId; + type StepMetadata = (); + type ProgressMetadata = String; + type CompletionMetadata = Vec; + type SkippedMetadata = (); + type Error = UplinkPreflightTerminalError; +} + +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, JsonSchema)] +#[serde(tag = "id", rename_all = "snake_case")] +pub enum UplinkPreflightStepId { + ConfigureSwitch, + WaitForL1Link, + ConfigureAddress, + ConfigureRouting, + CheckExternalDnsConnectivity, + CheckExternalNtpConnectivity, + CleanupRouting, + CleanupAddress, + CleanupL1, +} + +update_engine::define_update_engine!(pub UplinkPreflightCheckSpec); diff --git a/wicket-common/src/rack_setup.rs b/wicket-common/src/rack_setup.rs index 33fbcb65b3..7fd83e522a 100644 --- a/wicket-common/src/rack_setup.rs +++ b/wicket-common/src/rack_setup.rs @@ -4,8 +4,6 @@ // Copyright 2024 Oxide Computer Company -pub use gateway_client::types::SpIdentifier as GatewaySpIdentifier; -pub use gateway_client::types::SpType as GatewaySpType; use omicron_common::address; use omicron_common::api::external::ImportExportPolicy; use omicron_common::api::external::Name; @@ -36,6 +34,8 @@ use std::net::Ipv4Addr; use std::net::Ipv6Addr; use std::str::FromStr; +use crate::inventory::SpIdentifier; + /// The subset of `RackInitializeRequest` that the user fills in as clear text /// (e.g., via an uploaded config file). #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] @@ -84,10 +84,7 @@ pub struct PutRssUserConfigInsensitive { Ord, )] pub struct BootstrapSledDescription { - // TODO: We currently use gateway-client's SpIdentifier here, not our own, - // to avoid wicketd-client getting an "SpIdentifier2". We really do need to - // unify this type once and forever. - pub id: GatewaySpIdentifier, + pub id: SpIdentifier, pub baseboard: Baseboard, /// The sled's bootstrap address, if the host is on and we've discovered it /// on the bootstrap network. diff --git a/wicket-common/src/rack_update.rs b/wicket-common/src/rack_update.rs index 4fa3ea371c..e5d96db726 100644 --- a/wicket-common/src/rack_update.rs +++ b/wicket-common/src/rack_update.rs @@ -4,90 +4,79 @@ // Copyright 2023 Oxide Computer Company -use std::{collections::BTreeSet, fmt}; +use std::{collections::BTreeSet, time::Duration}; +use dropshot::HttpError; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -// TODO: unify this with the one in gateway http_entrypoints.rs. -#[derive( - Debug, - Clone, - Copy, - PartialEq, - Eq, - PartialOrd, - Ord, - Serialize, - Deserialize, - JsonSchema, -)] -pub struct SpIdentifier { - #[serde(rename = "type")] - pub type_: SpType, - pub slot: u32, -} +use crate::inventory::SpIdentifier; -impl From for gateway_client::types::SpIdentifier { - fn from(value: SpIdentifier) -> Self { - Self { type_: value.type_.into(), slot: value.slot } - } -} +#[derive(Clone, Debug, Default, JsonSchema, Deserialize, Serialize)] +pub struct StartUpdateOptions { + /// If passed in, fails the update with a simulated error. + pub test_error: Option, -impl From for SpIdentifier { - fn from(value: gateway_client::types::SpIdentifier) -> Self { - Self { type_: value.type_.into(), slot: value.slot } - } -} + /// If passed in, creates a test step that lasts these many seconds long. + /// + /// This is used for testing. + pub test_step_seconds: Option, -#[derive( - Debug, - Clone, - Copy, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - JsonSchema, -)] -#[serde(rename_all = "lowercase")] -pub enum SpType { - Switch, - Sled, - Power, + /// If passed in, simulates a result for the RoT Bootloader update. + /// + /// This is used for testing. + pub test_simulate_rot_bootloader_result: Option, + + /// If passed in, simulates a result for the RoT update. + /// + /// This is used for testing. + pub test_simulate_rot_result: Option, + + /// If passed in, simulates a result for the SP update. + /// + /// This is used for testing. + pub test_simulate_sp_result: Option, + + /// If true, skip the check on the current RoT version and always update it + /// regardless of whether the update appears to be neeeded. + pub skip_rot_bootloader_version_check: bool, + + /// If true, skip the check on the current RoT version and always update it + /// regardless of whether the update appears to be neeeded. + pub skip_rot_version_check: bool, + + /// If true, skip the check on the current SP version and always update it + /// regardless of whether the update appears to be neeeded. + pub skip_sp_version_check: bool, } -impl From for gateway_client::types::SpType { - fn from(value: SpType) -> Self { - match value { - SpType::Switch => Self::Switch, - SpType::Sled => Self::Sled, - SpType::Power => Self::Power, - } - } +/// A simulated result for a component update. +/// +/// Used by [`StartUpdateOptions`]. +#[derive(Clone, Debug, JsonSchema, Deserialize, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum UpdateSimulatedResult { + Success, + Warning, + Skipped, + Failure, } -impl From for SpType { - fn from(value: gateway_client::types::SpType) -> Self { - match value { - gateway_client::types::SpType::Switch => Self::Switch, - gateway_client::types::SpType::Sled => Self::Sled, - gateway_client::types::SpType::Power => Self::Power, - } - } +#[derive(Clone, Debug, JsonSchema, Deserialize, Serialize)] +pub struct ClearUpdateStateOptions { + /// If passed in, fails the clear update state operation with a simulated + /// error. + pub test_error: Option, } -impl fmt::Display for SpType { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - SpType::Switch => write!(f, "switch"), - SpType::Sled => write!(f, "sled"), - SpType::Power => write!(f, "power"), - } - } +#[derive(Clone, Debug, JsonSchema, Deserialize, Serialize)] +pub struct AbortUpdateOptions { + /// The message to abort the update with. + pub message: String, + + /// If passed in, fails the force cancel update operation with a simulated + /// error. + pub test_error: Option, } #[derive( @@ -100,3 +89,47 @@ pub struct ClearUpdateStateResponse { /// The SPs that had no update state to clear. pub no_update_data: BTreeSet, } + +#[derive( + Copy, Clone, Debug, JsonSchema, Deserialize, Serialize, PartialEq, Eq, +)] +#[serde(rename_all = "snake_case", tag = "kind", content = "content")] +pub enum UpdateTestError { + /// Simulate an error where the operation fails to complete. + Fail, + + /// Simulate an issue where the operation times out. + Timeout { + /// The number of seconds to time out after. + secs: u64, + }, +} + +impl UpdateTestError { + pub async fn into_http_error( + self, + log: &slog::Logger, + reason: &str, + ) -> HttpError { + let message = self.into_error_string(log, reason).await; + HttpError::for_bad_request(None, message) + } + + pub async fn into_error_string( + self, + log: &slog::Logger, + reason: &str, + ) -> String { + match self { + UpdateTestError::Fail => { + format!("Simulated failure while {reason}") + } + UpdateTestError::Timeout { secs } => { + slog::info!(log, "Simulating timeout while {reason}"); + // 15 seconds should be enough to cause a timeout. + tokio::time::sleep(Duration::from_secs(secs)).await; + "XXX request should time out before this is hit".into() + } + } + } +} diff --git a/wicket/src/cli/preflight.rs b/wicket/src/cli/preflight.rs index 29b6d2a5cb..3e8d5027ba 100644 --- a/wicket/src/cli/preflight.rs +++ b/wicket/src/cli/preflight.rs @@ -17,12 +17,11 @@ use std::borrow::Cow; use std::fmt::Display; use std::net::SocketAddrV6; use std::time::Duration; -use update_engine::events::StepEvent; -use update_engine::events::StepEventKind; -use update_engine::events::StepInfo; -use update_engine::events::StepInfoWithMetadata; -use update_engine::events::StepOutcome; -use update_engine::StepSpec; +use wicket_common::preflight_check::StepEvent; +use wicket_common::preflight_check::StepEventKind; +use wicket_common::preflight_check::StepInfo; +use wicket_common::preflight_check::StepInfoWithMetadata; +use wicket_common::preflight_check::StepOutcome; use wicketd_client::types::PreflightUplinkCheckOptions; use wicketd_client::Client; @@ -141,12 +140,10 @@ async fn poll_uplink_status_until_complete(client: Client) -> Result<()> { } } -fn print_completed_steps< - S: StepSpec, ->( - step_events: Vec>, +fn print_completed_steps( + step_events: Vec, last_seen: &mut Option, - all_steps: &mut Option>>, + all_steps: &mut Option>, progress_bar: &mut Option, execution_failed: &mut bool, ) -> Result<()> { @@ -228,9 +225,9 @@ fn print_completed_steps< Ok(()) } -fn print_completed_step>( - info: &StepInfoWithMetadata, - outcome: &StepOutcome, +fn print_completed_step( + info: &StepInfoWithMetadata, + outcome: &StepOutcome, step_elapsed: Duration, ) { let icon = icon_for_outcome(outcome); @@ -243,8 +240,8 @@ fn print_completed_step>( ); } -fn print_failed_step( - info: &StepInfoWithMetadata, +fn print_failed_step( + info: &StepInfoWithMetadata, step_elapsed: Duration, message: String, ) { @@ -252,27 +249,22 @@ fn print_failed_step( print_step(icon, info, step_elapsed, None, Some(&Cow::from(message))); } -fn print_step( +fn print_step( icon: impl Display, - info: &StepInfoWithMetadata, + info: &StepInfoWithMetadata, step_elapsed: Duration, - outcome_metadata: Option<&serde_json::Value>, + outcome_metadata: Option<&Vec>, message: Option<&Cow<'static, str>>, ) { println!("{icon} {} ({:?})", info.info.description, step_elapsed); if let Some(metadata) = outcome_metadata { - if let Some(array) = metadata.as_array() { - for element in array { - if let Some(s) = element.as_str() { - println!(" {s}"); - } else { - println!(" unexpected metadata type: {element:?}"); - } - } - } else { - println!(" unexpected metadata type: {metadata:?}"); + for element in metadata { + println!(" {element}"); } + } else { + println!(" missing metadata"); } + if let Some(message) = message { for line in message.split('\n') { println!(" {line}"); @@ -280,7 +272,7 @@ fn print_step( } } -fn icon_for_outcome(outcome: &StepOutcome) -> Box { +fn icon_for_outcome(outcome: &StepOutcome) -> Box { match outcome { StepOutcome::Success { .. } => Box::new('✔'.green()), StepOutcome::Warning { .. } => Box::new('âš '.red()), diff --git a/wicket/src/cli/rack_setup/config_toml.rs b/wicket/src/cli/rack_setup/config_toml.rs index cef3746ff9..68485815a8 100644 --- a/wicket/src/cli/rack_setup/config_toml.rs +++ b/wicket/src/cli/rack_setup/config_toml.rs @@ -23,9 +23,9 @@ use toml_edit::InlineTable; use toml_edit::Item; use toml_edit::Table; use toml_edit::Value; +use wicket_common::inventory::SpType; use wicket_common::rack_setup::BootstrapSledDescription; use wicket_common::rack_setup::CurrentRssUserConfigInsensitive; -use wicket_common::rack_setup::GatewaySpType; use wicket_common::rack_setup::UserSpecifiedBgpPeerConfig; use wicket_common::rack_setup::UserSpecifiedImportExportPolicy; use wicket_common::rack_setup::UserSpecifiedPortConfig; @@ -206,7 +206,7 @@ fn build_sleds_array(sleds: &BTreeSet) -> Array { for sled in sleds { // We should never get a non-sled from wicketd; if we do, filter it out. - if sled.id.type_ != GatewaySpType::Sled { + if sled.id.type_ != SpType::Sled { continue; } diff --git a/wicket/src/events.rs b/wicket/src/events.rs index fd0ac086ad..36480d261f 100644 --- a/wicket/src/events.rs +++ b/wicket/src/events.rs @@ -8,10 +8,11 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::fs::File; use std::time::{Duration, SystemTime}; +use wicket_common::inventory::RackV1Inventory; use wicket_common::update_events::EventReport; use wicketd_client::types::{ ArtifactId, CurrentRssUserConfig, GetLocationResponse, IgnitionCommand, - RackOperationStatus, RackV1Inventory, SemverVersion, + RackOperationStatus, SemverVersion, }; /// Event report type returned by the get_artifacts_and_event_reports API call. diff --git a/wicket/src/helpers.rs b/wicket/src/helpers.rs index 564b7e9348..bb4155231c 100644 --- a/wicket/src/helpers.rs +++ b/wicket/src/helpers.rs @@ -7,7 +7,7 @@ use std::env::VarError; use anyhow::{bail, Context}; -use wicketd_client::types::{UpdateSimulatedResult, UpdateTestError}; +use wicket_common::rack_update::{UpdateSimulatedResult, UpdateTestError}; pub(crate) fn get_update_test_error( env_var: &str, diff --git a/wicket/src/runner.rs b/wicket/src/runner.rs index 77fbb82df8..3af68ccbec 100644 --- a/wicket/src/runner.rs +++ b/wicket/src/runner.rs @@ -22,7 +22,7 @@ use tokio::sync::mpsc::{ unbounded_channel, UnboundedReceiver, UnboundedSender, }; use tokio::time::{interval, Duration}; -use wicketd_client::types::AbortUpdateOptions; +use wicket_common::rack_update::AbortUpdateOptions; use crate::events::EventReportMap; use crate::helpers::get_update_test_error; diff --git a/wicket/src/state/inventory.rs b/wicket/src/state/inventory.rs index 0ab187cc48..8155efb606 100644 --- a/wicket/src/state/inventory.rs +++ b/wicket/src/state/inventory.rs @@ -11,10 +11,9 @@ use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; use std::fmt::Display; use std::iter::Iterator; -use wicket_common::rack_update::SpType; -use wicketd_client::types::{ +use wicket_common::inventory::{ RackV1Inventory, RotInventory, RotSlot, SpComponentCaboose, - SpComponentInfo, SpIgnition, SpState, + SpComponentInfo, SpIgnition, SpState, SpType, }; pub static ALL_COMPONENT_IDS: Lazy> = Lazy::new(|| { @@ -173,18 +172,23 @@ impl Component { } pub fn stage0_version(&self) -> String { - version_or_unknown( - self.sp().rot.as_ref().and_then(|rot| rot.caboose_stage0.as_ref()), - ) + version_or_unknown(self.sp().rot.as_ref().and_then(|rot| { + // caboose_stage0 is an Option>, so we + // need to unwrap it twice, effectively. flatten would be nice but + // it doesn't work on Option<&Option>, which is what we end up + // with. + rot.caboose_stage0.as_ref().map_or(None, |x| x.as_ref()) + })) } pub fn stage0next_version(&self) -> String { - version_or_unknown( - self.sp() - .rot - .as_ref() - .and_then(|rot| rot.caboose_stage0next.as_ref()), - ) + version_or_unknown(self.sp().rot.as_ref().and_then(|rot| { + // caboose_stage0next is an Option>, so we + // need to unwrap it twice, effectively. flatten would be nice but + // it doesn't work on Option<&Option>, which is what we end up + // with. + rot.caboose_stage0next.as_ref().map_or(None, |x| x.as_ref()) + })) } } diff --git a/wicket/src/state/update.rs b/wicket/src/state/update.rs index 31876365e2..3e0c89e83e 100644 --- a/wicket/src/state/update.rs +++ b/wicket/src/state/update.rs @@ -4,6 +4,7 @@ use anyhow::Result; use ratatui::style::Style; +use wicket_common::rack_update::{ClearUpdateStateOptions, StartUpdateOptions}; use wicket_common::update_events::{ EventReport, ProgressEventKind, StepEventKind, UpdateComponent, UpdateStepId, @@ -18,9 +19,7 @@ use serde::{Deserialize, Serialize}; use slog::Logger; use std::collections::BTreeMap; use std::fmt::Display; -use wicketd_client::types::{ - ArtifactId, ClearUpdateStateOptions, SemverVersion, StartUpdateOptions, -}; +use wicketd_client::types::{ArtifactId, SemverVersion}; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct RackUpdateState { diff --git a/wicket/src/ui/panes/overview.rs b/wicket/src/ui/panes/overview.rs index 45d02311aa..00da6396c2 100644 --- a/wicket/src/ui/panes/overview.rs +++ b/wicket/src/ui/panes/overview.rs @@ -22,12 +22,12 @@ use ratatui::style::Style; use ratatui::text::{Line, Span, Text}; use ratatui::widgets::{Block, BorderType, Borders, Paragraph}; use ratatui::Frame; -use wicketd_client::types::RotState; -use wicketd_client::types::SpComponentCaboose; -use wicketd_client::types::SpComponentInfo; -use wicketd_client::types::SpComponentPresence; -use wicketd_client::types::SpIgnition; -use wicketd_client::types::SpState; +use wicket_common::inventory::RotState; +use wicket_common::inventory::SpComponentCaboose; +use wicket_common::inventory::SpComponentInfo; +use wicket_common::inventory::SpComponentPresence; +use wicket_common::inventory::SpIgnition; +use wicket_common::inventory::SpState; enum PopupKind { Ignition, @@ -844,9 +844,9 @@ fn inventory_description(component: &Component) -> Text { ] .into(), ); - if let Some(caboose) = - sp.rot().and_then(|r| r.caboose_stage0.as_ref()) - { + if let Some(caboose) = sp.rot().and_then(|r| { + r.caboose_stage0.as_ref().map_or(None, |x| x.as_ref()) + }) { append_caboose(&mut spans, nest_bullet(), caboose); } else { spans.push( @@ -889,9 +889,9 @@ fn inventory_description(component: &Component) -> Text { ] .into(), ); - if let Some(caboose) = - sp.rot().and_then(|r| r.caboose_stage0next.as_ref()) - { + if let Some(caboose) = sp.rot().and_then(|r| { + r.caboose_stage0next.as_ref().map_or(None, |x| x.as_ref()) + }) { append_caboose(&mut spans, nest_bullet(), caboose); } else { spans.push( diff --git a/wicket/src/ui/panes/update.rs b/wicket/src/ui/panes/update.rs index 09b119443d..3a61e25a3a 100644 --- a/wicket/src/ui/panes/update.rs +++ b/wicket/src/ui/panes/update.rs @@ -33,11 +33,12 @@ use update_engine::{ AbortReason, CompletionReason, ExecutionStatus, FailureReason, StepKey, TerminalKind, WillNotBeRunReason, }; +use wicket_common::inventory::RotSlot; use wicket_common::update_events::{ EventBuffer, EventReport, ProgressEvent, StepOutcome, StepStatus, UpdateComponent, }; -use wicketd_client::types::{RotSlot, SemverVersion}; +use wicketd_client::types::SemverVersion; const MAX_COLUMN_WIDTH: u16 = 25; diff --git a/wicket/src/ui/widgets/rack.rs b/wicket/src/ui/widgets/rack.rs index 7aa0c7d652..42ebf39d02 100644 --- a/wicket/src/ui/widgets/rack.rs +++ b/wicket/src/ui/widgets/rack.rs @@ -17,7 +17,7 @@ use ratatui::widgets::Borders; use ratatui::widgets::Paragraph; use ratatui::widgets::Widget; use std::collections::BTreeMap; -use wicketd_client::types::SpIgnition; +use wicket_common::inventory::SpIgnition; #[derive(Debug, Clone)] pub struct Rack<'a> { diff --git a/wicket/src/wicketd.rs b/wicket/src/wicketd.rs index c0ee3d9b14..dce1c7d286 100644 --- a/wicket/src/wicketd.rs +++ b/wicket/src/wicketd.rs @@ -9,12 +9,14 @@ use std::convert::From; use std::net::SocketAddrV6; use tokio::sync::mpsc::{self, Sender, UnboundedSender}; use tokio::time::{interval, Duration, MissedTickBehavior}; -use wicket_common::rack_update::{SpIdentifier, SpType}; +use wicket_common::inventory::{SpIdentifier, SpType}; +use wicket_common::rack_update::{ + AbortUpdateOptions, ClearUpdateStateOptions, StartUpdateOptions, +}; use wicket_common::WICKETD_TIMEOUT; use wicketd_client::types::{ - AbortUpdateOptions, ClearUpdateStateOptions, ClearUpdateStateParams, - GetInventoryParams, GetInventoryResponse, GetLocationResponse, - IgnitionCommand, StartUpdateOptions, StartUpdateParams, + ClearUpdateStateParams, GetInventoryParams, GetInventoryResponse, + GetLocationResponse, IgnitionCommand, StartUpdateParams, }; use crate::events::EventReportMap; diff --git a/wicketd-api/Cargo.toml b/wicketd-api/Cargo.toml new file mode 100644 index 0000000000..ba1d862a40 --- /dev/null +++ b/wicketd-api/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "wicketd-api" +version = "0.1.0" +edition = "2021" + +[lints] +workspace = true + +[dependencies] +bootstrap-agent-client.workspace = true +dropshot.workspace = true +gateway-client.workspace = true +omicron-common.workspace = true +omicron-passwords.workspace = true +omicron-workspace-hack.workspace = true +schemars.workspace = true +serde.workspace = true +sled-hardware-types.workspace = true +slog.workspace = true +wicket-common.workspace = true diff --git a/wicketd-api/src/lib.rs b/wicketd-api/src/lib.rs new file mode 100644 index 0000000000..9192578305 --- /dev/null +++ b/wicketd-api/src/lib.rs @@ -0,0 +1,545 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use bootstrap_agent_client::types::RackInitId; +use bootstrap_agent_client::types::RackOperationStatus; +use bootstrap_agent_client::types::RackResetId; +use dropshot::HttpError; +use dropshot::HttpResponseOk; +use dropshot::HttpResponseUpdatedNoContent; +use dropshot::Path; +use dropshot::RequestContext; +use dropshot::StreamingBody; +use dropshot::TypedBody; +use gateway_client::types::IgnitionCommand; +use omicron_common::api::external::SemverVersion; +use omicron_common::update::ArtifactHashId; +use omicron_common::update::ArtifactId; +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use sled_hardware_types::Baseboard; +use std::collections::BTreeMap; +use std::collections::BTreeSet; +use std::net::Ipv6Addr; +use std::time::Duration; +use wicket_common::inventory::RackV1Inventory; +use wicket_common::inventory::SpIdentifier; +use wicket_common::inventory::SpType; +use wicket_common::preflight_check; +use wicket_common::rack_setup::BgpAuthKey; +use wicket_common::rack_setup::BgpAuthKeyId; +use wicket_common::rack_setup::CurrentRssUserConfigInsensitive; +use wicket_common::rack_setup::GetBgpAuthKeyInfoResponse; +use wicket_common::rack_setup::PutRssUserConfigInsensitive; +use wicket_common::rack_update::AbortUpdateOptions; +use wicket_common::rack_update::ClearUpdateStateOptions; +use wicket_common::rack_update::ClearUpdateStateResponse; +use wicket_common::rack_update::StartUpdateOptions; +use wicket_common::update_events::EventReport; + +#[dropshot::api_description { + module = "wicketd_api_mod", +}] +pub trait WicketdApi { + type Context; + + /// Get wicketd's current view of all sleds visible on the bootstrap network. + #[endpoint { + method = GET, + path = "/bootstrap-sleds" + }] + async fn get_bootstrap_sleds( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Get the current status of the user-provided (or system-default-provided, in + /// some cases) RSS configuration. + #[endpoint { + method = GET, + path = "/rack-setup/config" + }] + async fn get_rss_config( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Update (a subset of) the current RSS configuration. + /// + /// Sensitive values (certificates and password hash) are not set through + /// this endpoint. + #[endpoint { + method = PUT, + path = "/rack-setup/config" + }] + async fn put_rss_config( + rqctx: RequestContext, + body: TypedBody, + ) -> Result; + + /// Add an external certificate. + /// + /// This must be paired with its private key. They may be posted in either + /// order, but one cannot post two certs in a row (or two keys in a row). + #[endpoint { + method = POST, + path = "/rack-setup/config/cert" + }] + async fn post_rss_config_cert( + rqctx: RequestContext, + body: TypedBody, + ) -> Result, HttpError>; + + /// Add the private key of an external certificate. + /// + /// This must be paired with its certificate. They may be posted in either + /// order, but one cannot post two keys in a row (or two certs in a row). + #[endpoint { + method = POST, + path = "/rack-setup/config/key" + }] + async fn post_rss_config_key( + rqctx: RequestContext, + body: TypedBody, + ) -> Result, HttpError>; + + // -- BGP authentication key management + + /// Return information about BGP authentication keys, including checking + /// validity of keys. + /// + /// Produces an error if the rack setup config wasn't set, or if any of the + /// requested key IDs weren't found. + #[endpoint( + method = GET, + path = "/rack-setup/config/bgp/auth-key" + )] + async fn get_bgp_auth_key_info( + rqctx: RequestContext, + // A bit weird for a GET request to have a TypedBody, but there's no other + // nice way to transmit this information as a batch. + params: TypedBody, + ) -> Result, HttpError>; + + /// Set the BGP authentication key for a particular key ID. + #[endpoint { + method = PUT, + path = "/rack-setup/config/bgp/auth-key/{key_id}" + }] + async fn put_bgp_auth_key( + rqctx: RequestContext, + params: Path, + body: TypedBody, + ) -> Result, HttpError>; + + /// Update the RSS config recovery silo user password hash. + #[endpoint { + method = PUT, + path = "/rack-setup/config/recovery-user-password-hash" + }] + async fn put_rss_config_recovery_user_password_hash( + rqctx: RequestContext, + body: TypedBody, + ) -> Result; + + /// Reset all RSS configuration to their default values. + #[endpoint { + method = DELETE, + path = "/rack-setup/config" + }] + async fn delete_rss_config( + rqctx: RequestContext, + ) -> Result; + + /// Query current state of rack setup. + #[endpoint { + method = GET, + path = "/rack-setup" + }] + async fn get_rack_setup_state( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Run rack setup. + /// + /// Will return an error if not all of the rack setup configuration has + /// been populated. + #[endpoint { + method = POST, + path = "/rack-setup" + }] + async fn post_run_rack_setup( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Run rack reset. + #[endpoint { + method = DELETE, + path = "/rack-setup" + }] + async fn post_run_rack_reset( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// A status endpoint used to report high level information known to + /// wicketd. + /// + /// This endpoint can be polled to see if there have been state changes in + /// the system that are useful to report to wicket. + /// + /// Wicket, and possibly other callers, will retrieve the changed + /// information, with follow up calls. + #[endpoint { + method = GET, + path = "/inventory" + }] + async fn get_inventory( + rqctx: RequestContext, + body_params: TypedBody, + ) -> Result, HttpError>; + + /// Upload a TUF repository to the server. + /// + /// At any given time, wicketd will keep at most one TUF repository in + /// memory. Any previously-uploaded repositories will be discarded. + #[endpoint { + method = PUT, + path = "/repository", + }] + async fn put_repository( + rqctx: RequestContext, + body: StreamingBody, + ) -> Result; + + /// An endpoint used to report all available artifacts and event reports. + /// + /// The order of the returned artifacts is unspecified, and may change between + /// calls even if the total set of artifacts has not. + #[endpoint { + method = GET, + path = "/artifacts-and-event-reports", + }] + async fn get_artifacts_and_event_reports( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Report the configured baseboard details. + #[endpoint { + method = GET, + path = "/baseboard", + }] + async fn get_baseboard( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Report the identity of the sled and switch we're currently running on / + /// connected to. + #[endpoint { + method = GET, + path = "/location", + }] + async fn get_location( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// An endpoint to start updating one or more sleds, switches and PSCs. + #[endpoint { + method = POST, + path = "/update", + }] + async fn post_start_update( + rqctx: RequestContext, + params: TypedBody, + ) -> Result; + + /// An endpoint to get the status of any update being performed or recently + /// completed on a single SP. + #[endpoint { + method = GET, + path = "/update/{type}/{slot}", + }] + async fn get_update_sp( + rqctx: RequestContext, + target: Path, + ) -> Result, HttpError>; + + /// Forcibly cancels a running update. + /// + /// This is a potentially dangerous operation, but one that is sometimes + /// required. A machine reset might be required after this operation completes. + #[endpoint { + method = POST, + path = "/abort-update/{type}/{slot}", + }] + async fn post_abort_update( + rqctx: RequestContext, + target: Path, + opts: TypedBody, + ) -> Result; + + /// Resets update state for a sled. + /// + /// Use this to clear update state after a failed update. + #[endpoint { + method = POST, + path = "/clear-update-state", + }] + async fn post_clear_update_state( + rqctx: RequestContext, + params: TypedBody, + ) -> Result, HttpError>; + + /// Send an ignition command targeting a specific SP. + /// + /// This endpoint acts as a proxy to the MGS endpoint performing the same + /// function, allowing wicket to communicate exclusively with wicketd (even + /// though wicketd adds no meaningful functionality here beyond what MGS + /// offers). + #[endpoint { + method = POST, + path = "/ignition/{type}/{slot}/{command}", + }] + async fn post_ignition_command( + rqctx: RequestContext, + path: Path, + ) -> Result; + + /// Start a preflight check for uplink configuration. + #[endpoint { + method = POST, + path = "/preflight/uplink", + }] + async fn post_start_preflight_uplink_check( + rqctx: RequestContext, + body: TypedBody, + ) -> Result; + + /// Get the report for the most recent (or still running) preflight uplink + /// check. + #[endpoint { + method = GET, + path = "/preflight/uplink", + }] + async fn get_preflight_uplink_report( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Instruct wicketd to reload its SMF config properties. + /// + /// The only expected client of this endpoint is `curl` from wicketd's SMF + /// `refresh` method, but other clients hitting it is harmless. + #[endpoint { + method = POST, + path = "/reload-config", + }] + async fn post_reload_config( + rqctx: RequestContext, + ) -> Result; +} + +#[derive( + Clone, + Debug, + Serialize, + Deserialize, + JsonSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct BootstrapSledIp { + pub baseboard: Baseboard, + pub ip: Ipv6Addr, +} + +#[derive( + Clone, + Debug, + Serialize, + Deserialize, + JsonSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct BootstrapSledIps { + pub sleds: Vec, +} + +// This is a summary of the subset of `RackInitializeRequest` that is sensitive; +// we only report a summary instead of returning actual data. +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] +pub struct CurrentRssUserConfigSensitive { + pub num_external_certificates: usize, + pub recovery_silo_password_set: bool, + // We define GetBgpAuthKeyInfoResponse in wicket-common and use a + // progenitor replace directive for it, because we don't want typify to + // turn the BTreeMap into a HashMap. Use the same struct here to piggyback + // on that. + pub bgp_auth_keys: GetBgpAuthKeyInfoResponse, +} + +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] +pub struct CurrentRssUserConfig { + pub sensitive: CurrentRssUserConfigSensitive, + pub insensitive: CurrentRssUserConfigInsensitive, +} + +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] +#[serde(tag = "status", rename_all = "snake_case")] +pub enum CertificateUploadResponse { + /// The key has been uploaded, but we're waiting on its corresponding + /// certificate chain. + WaitingOnCert, + /// The cert chain has been uploaded, but we're waiting on its corresponding + /// private key. + WaitingOnKey, + /// A cert chain and its key have been accepted. + CertKeyAccepted, + /// A cert chain and its key are valid, but have already been uploaded. + CertKeyDuplicateIgnored, +} + +#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Eq)] +pub struct GetBgpAuthKeyParams { + /// Checks that these keys are valid. + pub check_valid: BTreeSet, +} + +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] +pub struct PutBgpAuthKeyParams { + pub key_id: BgpAuthKeyId, +} + +#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Eq)] +pub struct PutBgpAuthKeyBody { + pub key: BgpAuthKey, +} + +#[derive(Clone, Debug, Serialize, JsonSchema, PartialEq)] +pub struct PutBgpAuthKeyResponse { + pub status: SetBgpAuthKeyStatus, +} + +#[derive(Clone, Debug, Serialize, JsonSchema, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum SetBgpAuthKeyStatus { + /// The key was accepted and replaced an old key. + Replaced, + + /// The key was accepted, and is the same as the existing key. + Unchanged, + + /// The key was accepted and is new. + Added, +} + +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] +pub struct PutRssRecoveryUserPasswordHash { + pub hash: omicron_passwords::NewPasswordHash, +} + +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] +pub struct GetInventoryParams { + /// Refresh the state of these SPs from MGS prior to returning (instead of + /// returning cached data). + pub force_refresh: Vec, +} + +/// The response to a `get_inventory` call: the inventory known to wicketd, or a +/// notification that data is unavailable. +#[derive(Clone, Debug, JsonSchema, Serialize)] +#[serde(rename_all = "snake_case", tag = "type", content = "data")] +pub enum GetInventoryResponse { + Response { inventory: RackV1Inventory, mgs_last_seen: Duration }, + Unavailable, +} + +#[derive(Clone, Debug, JsonSchema, Serialize)] +#[serde(rename_all = "snake_case")] +pub struct InstallableArtifacts { + pub artifact_id: ArtifactId, + pub installable: Vec, +} + +/// The response to a `get_artifacts` call: the system version, and the list of +/// all artifacts currently held by wicketd. +#[derive(Clone, Debug, JsonSchema, Serialize)] +#[serde(rename_all = "snake_case")] +pub struct GetArtifactsAndEventReportsResponse { + pub system_version: Option, + + /// Map of artifacts we ingested from the most-recently-uploaded TUF + /// repository to a list of artifacts we're serving over the bootstrap + /// network. In some cases the list of artifacts being served will have + /// length 1 (when we're serving the artifact directly); in other cases the + /// artifact in the TUF repo contains multiple nested artifacts inside it + /// (e.g., RoT artifacts contain both A and B images), and we serve the list + /// of extracted artifacts but not the original combination. + /// + /// Conceptually, this is a `BTreeMap>`, but + /// JSON requires string keys for maps, so we give back a vec of pairs + /// instead. + pub artifacts: Vec, + + pub event_reports: BTreeMap>, +} + +#[derive(Clone, Debug, JsonSchema, Deserialize)] +pub struct StartUpdateParams { + /// The SP identifiers to start the update with. Must be non-empty. + pub targets: BTreeSet, + + /// Options for the update. + pub options: StartUpdateOptions, +} + +#[derive(Clone, Debug, JsonSchema, Deserialize)] +pub struct ClearUpdateStateParams { + /// The SP identifiers to clear the update state for. Must be non-empty. + pub targets: BTreeSet, + + /// Options for clearing update state + pub options: ClearUpdateStateOptions, +} + +#[derive(Clone, Debug, JsonSchema, Serialize)] +#[serde(rename_all = "snake_case")] +pub struct GetBaseboardResponse { + pub baseboard: Option, +} + +/// All the fields of this response are optional, because it's possible we don't +/// know any of them (yet) if MGS has not yet finished discovering its location +/// or (ever) if we're running in a dev environment that doesn't support +/// MGS-location / baseboard mapping. +#[derive(Clone, Debug, JsonSchema, Serialize)] +#[serde(rename_all = "snake_case")] +pub struct GetLocationResponse { + /// The identity of our sled (where wicketd is running). + pub sled_id: Option, + /// The baseboard of our sled (where wicketd is running). + pub sled_baseboard: Option, + /// The baseboard of the switch our sled is physically connected to. + pub switch_baseboard: Option, + /// The identity of the switch our sled is physically connected to. + pub switch_id: Option, +} + +#[derive(Serialize, Deserialize, JsonSchema)] +pub struct PathSpIgnitionCommand { + #[serde(rename = "type")] + pub type_: SpType, + pub slot: u32, + pub command: IgnitionCommand, +} + +/// Options provided to the preflight uplink check. +#[derive(Clone, Debug, JsonSchema, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub struct PreflightUplinkCheckOptions { + /// DNS name to query. + pub dns_name_to_query: Option, +} diff --git a/wicketd/Cargo.toml b/wicketd/Cargo.toml index bfd8a4cf45..d2e870226b 100644 --- a/wicketd/Cargo.toml +++ b/wicketd/Cargo.toml @@ -52,7 +52,7 @@ uuid.workspace = true bootstrap-agent-client.workspace = true omicron-ddm-admin-client.workspace = true gateway-client.workspace = true -installinator-artifactd.workspace = true +installinator-api.workspace = true installinator-common.workspace = true omicron-certificates.workspace = true omicron-common.workspace = true @@ -62,6 +62,7 @@ tufaceous-lib.workspace = true update-common.workspace = true update-engine.workspace = true wicket-common.workspace = true +wicketd-api.workspace = true wicketd-client.workspace = true omicron-workspace-hack.workspace = true @@ -76,7 +77,7 @@ fs-err.workspace = true gateway-test-utils.workspace = true http.workspace = true installinator.workspace = true -installinator-artifact-client.workspace = true +installinator-client.workspace = true maplit.workspace = true omicron-test-utils.workspace = true openapi-lint.workspace = true diff --git a/wicketd/src/artifacts.rs b/wicketd/src/artifacts.rs index 3e5854d17e..59981b2ac3 100644 --- a/wicketd/src/artifacts.rs +++ b/wicketd/src/artifacts.rs @@ -5,5 +5,6 @@ mod server; mod store; -pub(crate) use self::server::WicketdArtifactServer; +pub(crate) use self::server::WicketdInstallinatorApiImpl; +pub(crate) use self::server::WicketdInstallinatorContext; pub(crate) use self::store::WicketdArtifactStore; diff --git a/wicketd/src/artifacts/server.rs b/wicketd/src/artifacts/server.rs index 3808f01753..6d677c7b4f 100644 --- a/wicketd/src/artifacts/server.rs +++ b/wicketd/src/artifacts/server.rs @@ -2,62 +2,99 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use super::store::WicketdArtifactStore; use crate::installinator_progress::IprArtifactServer; -use async_trait::async_trait; +use dropshot::FreeformBody; use dropshot::HttpError; +use dropshot::HttpResponseHeaders; +use dropshot::HttpResponseOk; +use dropshot::HttpResponseUpdatedNoContent; +use dropshot::Path; +use dropshot::RequestContext; +use dropshot::TypedBody; use hyper::Body; -use installinator_artifactd::ArtifactGetter; -use installinator_artifactd::EventReportStatus; +use installinator_api::body_to_artifact_response; +use installinator_api::InstallinatorApi; +use installinator_api::ReportQuery; +use installinator_common::EventReport; use omicron_common::update::ArtifactHashId; use slog::error; use slog::Logger; -use uuid::Uuid; + +use super::WicketdArtifactStore; + +pub(crate) enum WicketdInstallinatorApiImpl {} /// The artifact server interface for wicketd. #[derive(Debug)] -pub(crate) struct WicketdArtifactServer { - #[allow(dead_code)] +pub struct WicketdInstallinatorContext { log: Logger, store: WicketdArtifactStore, ipr_artifact: IprArtifactServer, } -impl WicketdArtifactServer { +impl WicketdInstallinatorContext { pub(crate) fn new( log: &Logger, store: WicketdArtifactStore, ipr_artifact: IprArtifactServer, ) -> Self { - let log = log.new(slog::o!("component" => "wicketd artifact server")); - Self { log, store, ipr_artifact } + Self { + log: log + .new(slog::o!("component" => "wicketd installinator server")), + store, + ipr_artifact, + } } } -#[async_trait] -impl ArtifactGetter for WicketdArtifactServer { - async fn get_by_hash(&self, id: &ArtifactHashId) -> Option<(u64, Body)> { - let data_handle = self.store.get_by_hash(id)?; - let size = data_handle.file_size() as u64; - let data_stream = match data_handle.reader_stream().await { - Ok(stream) => stream, - Err(err) => { - error!( - self.log, "failed to open extracted archive on demand"; - "error" => #%err, - ); - return None; - } - }; +impl InstallinatorApi for WicketdInstallinatorApiImpl { + type Context = WicketdInstallinatorContext; + + async fn get_artifact_by_hash( + rqctx: RequestContext, + path: Path, + ) -> Result>, HttpError> + { + let context = rqctx.context(); + match context.store.get_by_hash(&path.into_inner()) { + Some(data_handle) => { + let size = data_handle.file_size() as u64; + let data_stream = match data_handle.reader_stream().await { + Ok(stream) => stream, + Err(err) => { + error!( + context.log, "failed to open extracted archive on demand"; + "error" => #%err, + ); + return Err(HttpError::for_internal_error(format!( + // TODO: print error chain + "Artifact not found: {err}" + ))); + } + }; - Some((size, Body::wrap_stream(data_stream))) + Ok(body_to_artifact_response( + size, + Body::wrap_stream(data_stream), + )) + } + None => { + Err(HttpError::for_not_found(None, "Artifact not found".into())) + } + } } async fn report_progress( - &self, - update_id: Uuid, - report: installinator_common::EventReport, - ) -> Result { - Ok(self.ipr_artifact.report_progress(update_id, report)) + rqctx: RequestContext, + path: Path, + report: TypedBody, + ) -> Result { + let context = rqctx.context(); + let update_id = path.into_inner().update_id; + + context + .ipr_artifact + .report_progress(update_id, report.into_inner()) + .to_http_result(update_id) } } diff --git a/wicketd/src/artifacts/store.rs b/wicketd/src/artifacts/store.rs index 01543432a2..98a6abcaad 100644 --- a/wicketd/src/artifacts/store.rs +++ b/wicketd/src/artifacts/store.rs @@ -2,7 +2,6 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use crate::http_entrypoints::InstallableArtifacts; use omicron_common::api::external::SemverVersion; use omicron_common::update::ArtifactHashId; use slog::Logger; @@ -11,6 +10,7 @@ use std::sync::Mutex; use update_common::artifacts::ArtifactsWithPlan; use update_common::artifacts::ExtractedArtifactDataHandle; use update_common::artifacts::UpdatePlan; +use wicketd_api::InstallableArtifacts; /// The artifact store for wicketd. /// diff --git a/wicketd/src/bin/wicketd.rs b/wicketd/src/bin/wicketd.rs index 4037bc4c23..bc23362daf 100644 --- a/wicketd/src/bin/wicketd.rs +++ b/wicketd/src/bin/wicketd.rs @@ -14,14 +14,11 @@ use omicron_common::{ use sled_hardware_types::Baseboard; use std::net::{Ipv6Addr, SocketAddrV6}; use std::path::PathBuf; -use wicketd::{self, run_openapi, Config, Server, SmfConfigValues}; +use wicketd::{Config, Server, SmfConfigValues}; #[derive(Debug, Parser)] #[clap(name = "wicketd", about = "See README.adoc for more information")] enum Args { - /// Print the external OpenAPI Spec document and exit - Openapi, - /// Start a wicketd server Run { #[clap(name = "CONFIG_FILE_PATH", action)] @@ -84,9 +81,6 @@ async fn do_run() -> Result<(), CmdError> { let args = Args::parse(); match args { - Args::Openapi => { - run_openapi().map_err(|err| CmdError::Failure(anyhow!(err))) - } Args::Run { config_file_path, address, @@ -144,9 +138,8 @@ async fn do_run() -> Result<(), CmdError> { .to_logger("wicketd") .context("failed to initialize logger") .map_err(CmdError::Failure)?; - let server = Server::start(log, args) - .await - .map_err(|err| CmdError::Failure(anyhow!(err)))?; + let server = + Server::start(log, args).await.map_err(CmdError::Failure)?; server .wait_for_finish() .await diff --git a/wicketd/src/context.rs b/wicketd/src/context.rs index 68d04f35dc..8f4dfb451b 100644 --- a/wicketd/src/context.rs +++ b/wicketd/src/context.rs @@ -12,7 +12,6 @@ use crate::MgsHandle; use anyhow::anyhow; use anyhow::bail; use anyhow::Result; -use gateway_client::types::SpIdentifier; use internal_dns::resolver::Resolver; use sled_hardware_types::Baseboard; use slog::info; @@ -21,6 +20,7 @@ use std::net::SocketAddrV6; use std::sync::Arc; use std::sync::Mutex; use std::sync::OnceLock; +use wicket_common::inventory::SpIdentifier; /// Shared state used by API handlers pub struct ServerContext { diff --git a/wicketd/src/helpers.rs b/wicketd/src/helpers.rs index a8b47d4f12..8cc0d3330d 100644 --- a/wicketd/src/helpers.rs +++ b/wicketd/src/helpers.rs @@ -6,8 +6,8 @@ use std::fmt; -use gateway_client::types::{SpIdentifier, SpType}; use itertools::Itertools; +use wicket_common::inventory::{SpIdentifier, SpType}; #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)] pub(crate) struct SpIdentifierDisplay(pub(crate) SpIdentifier); diff --git a/wicketd/src/http_entrypoints.rs b/wicketd/src/http_entrypoints.rs index 4a4374b312..ea79569375 100644 --- a/wicketd/src/http_entrypoints.rs +++ b/wicketd/src/http_entrypoints.rs @@ -7,18 +7,13 @@ use crate::helpers::sps_to_string; use crate::helpers::SpIdentifierDisplay; use crate::mgs::GetInventoryError; -use crate::mgs::GetInventoryResponse; use crate::mgs::MgsHandle; use crate::mgs::ShutdownInProgress; -use crate::preflight_check::UplinkEventReport; -use crate::RackV1Inventory; use crate::SmfConfigValues; use bootstrap_agent_client::types::RackInitId; use bootstrap_agent_client::types::RackOperationStatus; use bootstrap_agent_client::types::RackResetId; -use dropshot::endpoint; use dropshot::ApiDescription; -use dropshot::ApiDescriptionRegisterError; use dropshot::HttpError; use dropshot::HttpResponseOk; use dropshot::HttpResponseUpdatedNoContent; @@ -26,31 +21,23 @@ use dropshot::Path; use dropshot::RequestContext; use dropshot::StreamingBody; use dropshot::TypedBody; -use gateway_client::types::IgnitionCommand; -use gateway_client::types::SpIdentifier; -use gateway_client::types::SpType; use http::StatusCode; use internal_dns::resolver::Resolver; -use omicron_common::api::external::SemverVersion; use omicron_common::api::internal::shared::SwitchLocation; -use omicron_common::update::ArtifactHashId; -use omicron_common::update::ArtifactId; -use schemars::JsonSchema; -use serde::Deserialize; -use serde::Serialize; use sled_hardware_types::Baseboard; use slog::o; use std::collections::BTreeMap; use std::collections::BTreeSet; -use std::net::Ipv6Addr; -use std::time::Duration; -use wicket_common::rack_setup::BgpAuthKey; -use wicket_common::rack_setup::BgpAuthKeyId; -use wicket_common::rack_setup::CurrentRssUserConfigInsensitive; +use wicket_common::inventory::RackV1Inventory; +use wicket_common::inventory::SpIdentifier; +use wicket_common::inventory::SpType; use wicket_common::rack_setup::GetBgpAuthKeyInfoResponse; use wicket_common::rack_setup::PutRssUserConfigInsensitive; +use wicket_common::rack_update::AbortUpdateOptions; +use wicket_common::rack_update::ClearUpdateStateResponse; use wicket_common::update_events::EventReport; use wicket_common::WICKETD_TIMEOUT; +use wicketd_api::*; use crate::ServerContext; @@ -58,1258 +45,753 @@ type WicketdApiDescription = ApiDescription; /// Return a description of the wicketd api for use in generating an OpenAPI spec pub fn api() -> WicketdApiDescription { - fn register_endpoints( - api: &mut WicketdApiDescription, - ) -> Result<(), ApiDescriptionRegisterError> { - api.register(get_bootstrap_sleds)?; - api.register(get_rss_config)?; - api.register(put_rss_config)?; - api.register(put_rss_config_recovery_user_password_hash)?; - api.register(post_rss_config_cert)?; - api.register(post_rss_config_key)?; - api.register(get_bgp_auth_key_info)?; - api.register(put_bgp_auth_key)?; - api.register(delete_rss_config)?; - api.register(get_rack_setup_state)?; - api.register(post_run_rack_setup)?; - api.register(post_run_rack_reset)?; - api.register(get_inventory)?; - api.register(get_location)?; - api.register(put_repository)?; - api.register(get_artifacts_and_event_reports)?; - api.register(get_baseboard)?; - api.register(post_start_update)?; - api.register(post_abort_update)?; - api.register(post_clear_update_state)?; - api.register(get_update_sp)?; - api.register(post_ignition_command)?; - api.register(post_start_preflight_uplink_check)?; - api.register(get_preflight_uplink_report)?; - api.register(post_reload_config)?; - Ok(()) - } - - let mut api = WicketdApiDescription::new(); - if let Err(err) = register_endpoints(&mut api) { - panic!("failed to register entrypoints: {}", err); - } - api + wicketd_api_mod::api_description::() + .expect("failed to register entrypoints") } -#[derive( - Clone, - Debug, - Serialize, - Deserialize, - JsonSchema, - PartialEq, - Eq, - PartialOrd, - Ord, -)] -pub struct BootstrapSledIp { - pub baseboard: Baseboard, - pub ip: Ipv6Addr, -} +pub enum WicketdApiImpl {} -#[derive( - Clone, - Debug, - Serialize, - Deserialize, - JsonSchema, - PartialEq, - Eq, - PartialOrd, - Ord, -)] -pub struct BootstrapSledIps { - pub sleds: Vec, -} +impl WicketdApi for WicketdApiImpl { + type Context = ServerContext; -/// Get wicketd's current view of all sleds visible on the bootstrap network. -#[endpoint { - method = GET, - path = "/bootstrap-sleds" -}] -async fn get_bootstrap_sleds( - rqctx: RequestContext, -) -> Result, HttpError> { - let ctx = rqctx.context(); - - let sleds = ctx - .bootstrap_peers - .sleds() - .into_iter() - .map(|(baseboard, ip)| BootstrapSledIp { baseboard, ip }) - .collect(); - - Ok(HttpResponseOk(BootstrapSledIps { sleds })) -} - -// This is a summary of the subset of `RackInitializeRequest` that is sensitive; -// we only report a summary instead of returning actual data. -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] -pub struct CurrentRssUserConfigSensitive { - pub num_external_certificates: usize, - pub recovery_silo_password_set: bool, - // We define GetBgpAuthKeyInfoResponse in wicket-common and use a - // progenitor replace directive for it, because we don't want typify to - // turn the BTreeMap into a HashMap. Use the same struct here to piggyback - // on that. - pub bgp_auth_keys: GetBgpAuthKeyInfoResponse, -} + async fn get_bootstrap_sleds( + rqctx: RequestContext, + ) -> Result, HttpError> { + let ctx = rqctx.context(); -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] -pub struct CurrentRssUserConfig { - pub sensitive: CurrentRssUserConfigSensitive, - pub insensitive: CurrentRssUserConfigInsensitive, -} + let sleds = ctx + .bootstrap_peers + .sleds() + .into_iter() + .map(|(baseboard, ip)| BootstrapSledIp { baseboard, ip }) + .collect(); -// Get the current inventory or return a 503 Unavailable. -async fn inventory_or_unavail( - mgs_handle: &MgsHandle, -) -> Result { - match mgs_handle.get_cached_inventory().await { - Ok(GetInventoryResponse::Response { inventory, .. }) => Ok(inventory), - Ok(GetInventoryResponse::Unavailable) => Err(HttpError::for_unavail( - None, - "Rack inventory not yet available".into(), - )), - Err(ShutdownInProgress) => { - Err(HttpError::for_unavail(None, "Server is shutting down".into())) - } + Ok(HttpResponseOk(BootstrapSledIps { sleds })) } -} - -/// Get the current status of the user-provided (or system-default-provided, in -/// some cases) RSS configuration. -#[endpoint { - method = GET, - path = "/rack-setup/config" -}] -async fn get_rss_config( - rqctx: RequestContext, -) -> Result, HttpError> { - let ctx = rqctx.context(); - - // We can't run RSS if we don't have an inventory from MGS yet; we always - // need to fill in the bootstrap sleds first. - let inventory = inventory_or_unavail(&ctx.mgs_handle).await?; - - let mut config = ctx.rss_config.lock().unwrap(); - config.update_with_inventory_and_bootstrap_peers( - &inventory, - &ctx.bootstrap_peers, - ); - - Ok(HttpResponseOk((&*config).into())) -} - -/// Update (a subset of) the current RSS configuration. -/// -/// Sensitive values (certificates and password hash) are not set through this -/// endpoint. -#[endpoint { - method = PUT, - path = "/rack-setup/config" -}] -async fn put_rss_config( - rqctx: RequestContext, - body: TypedBody, -) -> Result { - let ctx = rqctx.context(); - - // We can't run RSS if we don't have an inventory from MGS yet; we always - // need to fill in the bootstrap sleds first. - let inventory = inventory_or_unavail(&ctx.mgs_handle).await?; - - let mut config = ctx.rss_config.lock().unwrap(); - config.update_with_inventory_and_bootstrap_peers( - &inventory, - &ctx.bootstrap_peers, - ); - config - .update(body.into_inner(), ctx.baseboard.as_ref()) - .map_err(|err| HttpError::for_bad_request(None, err))?; - - Ok(HttpResponseUpdatedNoContent()) -} - -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] -#[serde(tag = "status", rename_all = "snake_case")] -pub enum CertificateUploadResponse { - /// The key has been uploaded, but we're waiting on its corresponding - /// certificate chain. - WaitingOnCert, - /// The cert chain has been uploaded, but we're waiting on its corresponding - /// private key. - WaitingOnKey, - /// A cert chain and its key have been accepted. - CertKeyAccepted, - /// A cert chain and its key are valid, but have already been uploaded. - CertKeyDuplicateIgnored, -} - -/// Add an external certificate. -/// -/// This must be paired with its private key. They may be posted in either -/// order, but one cannot post two certs in a row (or two keys in a row). -#[endpoint { - method = POST, - path = "/rack-setup/config/cert" -}] -async fn post_rss_config_cert( - rqctx: RequestContext, - body: TypedBody, -) -> Result, HttpError> { - let ctx = rqctx.context(); - - let mut config = ctx.rss_config.lock().unwrap(); - let response = config - .push_cert(body.into_inner()) - .map_err(|err| HttpError::for_bad_request(None, err))?; - - Ok(HttpResponseOk(response)) -} - -/// Add the private key of an external certificate. -/// -/// This must be paired with its certificate. They may be posted in either -/// order, but one cannot post two keys in a row (or two certs in a row). -#[endpoint { - method = POST, - path = "/rack-setup/config/key" -}] -async fn post_rss_config_key( - rqctx: RequestContext, - body: TypedBody, -) -> Result, HttpError> { - let ctx = rqctx.context(); - - let mut config = ctx.rss_config.lock().unwrap(); - let response = config - .push_key(body.into_inner()) - .map_err(|err| HttpError::for_bad_request(None, err))?; - - Ok(HttpResponseOk(response)) -} - -// -- BGP authentication key management -#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Eq)] -pub(crate) struct GetBgpAuthKeyParams { - /// Checks that these keys are valid. - check_valid: BTreeSet, -} + async fn get_rss_config( + rqctx: RequestContext, + ) -> Result, HttpError> { + let ctx = rqctx.context(); -/// Return information about BGP authentication keys, including checking -/// validity of keys. -/// -/// Produces an error if the rack setup config wasn't set, or if any of the -/// requested key IDs weren't found. -#[endpoint( - method = GET, - path = "/rack-setup/config/bgp/auth-key" -)] -async fn get_bgp_auth_key_info( - rqctx: RequestContext, - // A bit weird for a GET request to have a TypedBody, but there's no other - // nice way to transmit this information as a batch. - params: TypedBody, -) -> Result, HttpError> { - let ctx = rqctx.context(); - let params = params.into_inner(); - - let config = ctx.rss_config.lock().unwrap(); - config - .check_bgp_auth_keys_valid(¶ms.check_valid) - .map_err(|err| HttpError::for_bad_request(None, err.to_string()))?; - let data = config.get_bgp_auth_key_data(); - - Ok(HttpResponseOk(GetBgpAuthKeyInfoResponse { data })) -} + // We can't run RSS if we don't have an inventory from MGS yet; we always + // need to fill in the bootstrap sleds first. + let inventory = inventory_or_unavail(&ctx.mgs_handle).await?; -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] -struct PutBgpAuthKeyParams { - key_id: BgpAuthKeyId, -} - -#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Eq)] -struct PutBgpAuthKeyBody { - key: BgpAuthKey, -} + let mut config = ctx.rss_config.lock().unwrap(); + config.update_with_inventory_and_bootstrap_peers( + &inventory, + &ctx.bootstrap_peers, + ); -#[derive(Clone, Debug, Serialize, JsonSchema, PartialEq)] -struct PutBgpAuthKeyResponse { - status: SetBgpAuthKeyStatus, -} + Ok(HttpResponseOk((&*config).into())) + } -#[derive(Clone, Debug, Serialize, JsonSchema, PartialEq)] -#[serde(rename_all = "snake_case")] -pub(crate) enum SetBgpAuthKeyStatus { - /// The key was accepted and replaced an old key. - Replaced, + async fn put_rss_config( + rqctx: RequestContext, + body: TypedBody, + ) -> Result { + let ctx = rqctx.context(); - /// The key was accepted, and is the same as the existing key. - Unchanged, + // We can't run RSS if we don't have an inventory from MGS yet; we always + // need to fill in the bootstrap sleds first. + let inventory = inventory_or_unavail(&ctx.mgs_handle).await?; - /// The key was accepted and is new. - Added, -} + let mut config = ctx.rss_config.lock().unwrap(); + config.update_with_inventory_and_bootstrap_peers( + &inventory, + &ctx.bootstrap_peers, + ); + config + .update(body.into_inner(), ctx.baseboard.as_ref()) + .map_err(|err| HttpError::for_bad_request(None, err))?; + + Ok(HttpResponseUpdatedNoContent()) + } -/// Set the BGP authentication key for a particular key ID. -#[endpoint { - method = PUT, - path = "/rack-setup/config/bgp/auth-key/{key_id}" -}] -async fn put_bgp_auth_key( - rqctx: RequestContext, - params: Path, - body: TypedBody, -) -> Result, HttpError> { - let ctx = rqctx.context(); - let params = params.into_inner(); - - let mut config = ctx.rss_config.lock().unwrap(); - let status = config - .set_bgp_auth_key(params.key_id, body.into_inner().key) - .map_err(|err| HttpError::for_bad_request(None, err.to_string()))?; - - Ok(HttpResponseOk(PutBgpAuthKeyResponse { status })) -} + async fn post_rss_config_cert( + rqctx: RequestContext, + body: TypedBody, + ) -> Result, HttpError> { + let ctx = rqctx.context(); -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] -pub struct PutRssRecoveryUserPasswordHash { - pub hash: omicron_passwords::NewPasswordHash, -} + let mut config = ctx.rss_config.lock().unwrap(); + let response = config + .push_cert(body.into_inner()) + .map_err(|err| HttpError::for_bad_request(None, err))?; -/// Update the RSS config recovery silo user password hash. -#[endpoint { - method = PUT, - path = "/rack-setup/config/recovery-user-password-hash" -}] -async fn put_rss_config_recovery_user_password_hash( - rqctx: RequestContext, - body: TypedBody, -) -> Result { - let ctx = rqctx.context(); - - let mut config = ctx.rss_config.lock().unwrap(); - config.set_recovery_user_password_hash(body.into_inner().hash); - - Ok(HttpResponseUpdatedNoContent()) -} + Ok(HttpResponseOk(response)) + } -/// Reset all RSS configuration to their default values. -#[endpoint { - method = DELETE, - path = "/rack-setup/config" -}] -async fn delete_rss_config( - rqctx: RequestContext, -) -> Result { - let ctx = rqctx.context(); + async fn post_rss_config_key( + rqctx: RequestContext, + body: TypedBody, + ) -> Result, HttpError> { + let ctx = rqctx.context(); - let mut config = ctx.rss_config.lock().unwrap(); - *config = Default::default(); + let mut config = ctx.rss_config.lock().unwrap(); + let response = config + .push_key(body.into_inner()) + .map_err(|err| HttpError::for_bad_request(None, err))?; - Ok(HttpResponseUpdatedNoContent()) -} + Ok(HttpResponseOk(response)) + } -/// Query current state of rack setup. -#[endpoint { - method = GET, - path = "/rack-setup" -}] -async fn get_rack_setup_state( - rqctx: RequestContext, -) -> Result, HttpError> { - let ctx = rqctx.context(); - - let sled_agent_addr = ctx - .bootstrap_agent_addr() - .map_err(|err| HttpError::for_bad_request(None, format!("{err:#}")))?; - - let client = bootstrap_agent_client::Client::new( - &format!("http://{}", sled_agent_addr), - ctx.log.new(slog::o!("component" => "bootstrap client")), - ); - - let op_status = client - .rack_initialization_status() - .await - .map_err(|err| { - use bootstrap_agent_client::Error as BaError; - match err { - BaError::CommunicationError(err) => { - let message = - format!("Failed to send rack setup request: {err}"); - HttpError { - status_code: http::StatusCode::SERVICE_UNAVAILABLE, - error_code: None, - external_message: message.clone(), - internal_message: message, - } - } - other => HttpError::for_bad_request( - None, - format!("Rack setup request failed: {other}"), - ), - } - })? - .into_inner(); + async fn get_bgp_auth_key_info( + rqctx: RequestContext, + // A bit weird for a GET request to have a TypedBody, but there's no other + // nice way to transmit this information as a batch. + params: TypedBody, + ) -> Result, HttpError> { + let ctx = rqctx.context(); + let params = params.into_inner(); + + let config = ctx.rss_config.lock().unwrap(); + config + .check_bgp_auth_keys_valid(¶ms.check_valid) + .map_err(|err| HttpError::for_bad_request(None, err.to_string()))?; + let data = config.get_bgp_auth_key_data(); + + Ok(HttpResponseOk(GetBgpAuthKeyInfoResponse { data })) + } - Ok(HttpResponseOk(op_status)) -} + async fn put_bgp_auth_key( + rqctx: RequestContext, + params: Path, + body: TypedBody, + ) -> Result, HttpError> { + let ctx = rqctx.context(); + let params = params.into_inner(); -/// Run rack setup. -/// -/// Will return an error if not all of the rack setup configuration has been -/// populated. -#[endpoint { - method = POST, - path = "/rack-setup" -}] -async fn post_run_rack_setup( - rqctx: RequestContext, -) -> Result, HttpError> { - let ctx = rqctx.context(); - let log = &rqctx.log; - - let sled_agent_addr = ctx - .bootstrap_agent_addr() - .map_err(|err| HttpError::for_bad_request(None, format!("{err:#}")))?; - - let request = { let mut config = ctx.rss_config.lock().unwrap(); - config.start_rss_request(&ctx.bootstrap_peers, log).map_err(|err| { - HttpError::for_bad_request(None, format!("{err:#}")) - })? - }; - - slog::info!( - ctx.log, - "Sending RSS initialize request to {}", - sled_agent_addr - ); - let client = bootstrap_agent_client::Client::new( - &format!("http://{}", sled_agent_addr), - ctx.log.new(slog::o!("component" => "bootstrap client")), - ); - - let init_id = client - .rack_initialize(&request) - .await - .map_err(|err| { - use bootstrap_agent_client::Error as BaError; - match err { - BaError::CommunicationError(err) => { - let message = - format!("Failed to send rack setup request: {err}"); - HttpError { - status_code: http::StatusCode::SERVICE_UNAVAILABLE, - error_code: None, - external_message: message.clone(), - internal_message: message, - } - } - other => HttpError::for_bad_request( - None, - format!("Rack setup request failed: {other}"), - ), - } - })? - .into_inner(); - - Ok(HttpResponseOk(init_id)) -} + let status = config + .set_bgp_auth_key(params.key_id, body.into_inner().key) + .map_err(|err| HttpError::for_bad_request(None, err.to_string()))?; -/// Run rack reset. -#[endpoint { - method = DELETE, - path = "/rack-setup" -}] -async fn post_run_rack_reset( - rqctx: RequestContext, -) -> Result, HttpError> { - let ctx = rqctx.context(); - - let sled_agent_addr = ctx - .bootstrap_agent_addr() - .map_err(|err| HttpError::for_bad_request(None, format!("{err:#}")))?; - - slog::info!(ctx.log, "Sending RSS reset request to {}", sled_agent_addr); - let client = bootstrap_agent_client::Client::new( - &format!("http://{}", sled_agent_addr), - ctx.log.new(slog::o!("component" => "bootstrap client")), - ); - - let reset_id = client - .rack_reset() - .await - .map_err(|err| { - use bootstrap_agent_client::Error as BaError; - match err { - BaError::CommunicationError(err) => { - let message = - format!("Failed to send rack reset request: {err}"); - HttpError { - status_code: http::StatusCode::SERVICE_UNAVAILABLE, - error_code: None, - external_message: message.clone(), - internal_message: message, - } - } - other => HttpError::for_bad_request( - None, - format!("Rack setup request failed: {other}"), - ), - } - })? - .into_inner(); + Ok(HttpResponseOk(PutBgpAuthKeyResponse { status })) + } - Ok(HttpResponseOk(reset_id)) -} + async fn put_rss_config_recovery_user_password_hash( + rqctx: RequestContext, + body: TypedBody, + ) -> Result { + let ctx = rqctx.context(); -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] -pub struct GetInventoryParams { - /// If true, refresh the state of these SPs from MGS prior to returning - /// (instead of returning cached data). - pub force_refresh: Vec, -} + let mut config = ctx.rss_config.lock().unwrap(); + config.set_recovery_user_password_hash(body.into_inner().hash); -/// A status endpoint used to report high level information known to wicketd. -/// -/// This endpoint can be polled to see if there have been state changes in the -/// system that are useful to report to wicket. -/// -/// Wicket, and possibly other callers, will retrieve the changed information, -/// with follow up calls. -#[endpoint { - method = GET, - path = "/inventory" -}] -async fn get_inventory( - rqctx: RequestContext, - body_params: TypedBody, -) -> Result, HttpError> { - let GetInventoryParams { force_refresh } = body_params.into_inner(); - match rqctx - .context() - .mgs_handle - .get_inventory_refreshing_sps(force_refresh) - .await - { - Ok(response) => Ok(HttpResponseOk(response)), - Err(GetInventoryError::InvalidSpIdentifier) => { - Err(HttpError::for_unavail( - None, - "Invalid SP identifier in request".into(), - )) - } - Err(GetInventoryError::ShutdownInProgress) => { - Err(HttpError::for_unavail(None, "Server is shutting down".into())) - } + Ok(HttpResponseUpdatedNoContent()) } -} - -/// Upload a TUF repository to the server. -/// -/// At any given time, wicketd will keep at most one TUF repository in memory. -/// Any previously-uploaded repositories will be discarded. -#[endpoint { - method = PUT, - path = "/repository", -}] -async fn put_repository( - rqctx: RequestContext, - body: StreamingBody, -) -> Result { - let rqctx = rqctx.context(); - - rqctx.update_tracker.put_repository(body.into_stream()).await?; - - Ok(HttpResponseUpdatedNoContent()) -} -#[derive(Clone, Debug, JsonSchema, Serialize)] -#[serde(rename_all = "snake_case")] -pub struct InstallableArtifacts { - pub artifact_id: ArtifactId, - pub installable: Vec, -} + async fn delete_rss_config( + rqctx: RequestContext, + ) -> Result { + let ctx = rqctx.context(); -/// The response to a `get_artifacts` call: the system version, and the list of -/// all artifacts currently held by wicketd. -#[derive(Clone, Debug, JsonSchema, Serialize)] -#[serde(rename_all = "snake_case")] -pub struct GetArtifactsAndEventReportsResponse { - pub system_version: Option, - - /// Map of artifacts we ingested from the most-recently-uploaded TUF - /// repository to a list of artifacts we're serving over the bootstrap - /// network. In some cases the list of artifacts being served will have - /// length 1 (when we're serving the artifact directly); in other cases the - /// artifact in the TUF repo contains multiple nested artifacts inside it - /// (e.g., RoT artifacts contain both A and B images), and we serve the list - /// of extracted artifacts but not the original combination. - /// - /// Conceptually, this is a `BTreeMap>`, but - /// JSON requires string keys for maps, so we give back a vec of pairs - /// instead. - pub artifacts: Vec, - - pub event_reports: BTreeMap>, -} + let mut config = ctx.rss_config.lock().unwrap(); + *config = Default::default(); -/// An endpoint used to report all available artifacts and event reports. -/// -/// The order of the returned artifacts is unspecified, and may change between -/// calls even if the total set of artifacts has not. -#[endpoint { - method = GET, - path = "/artifacts-and-event-reports", -}] -async fn get_artifacts_and_event_reports( - rqctx: RequestContext, -) -> Result, HttpError> { - let response = - rqctx.context().update_tracker.artifacts_and_event_reports().await; - Ok(HttpResponseOk(response)) -} + Ok(HttpResponseUpdatedNoContent()) + } -#[derive(Clone, Debug, JsonSchema, Deserialize)] -pub(crate) struct StartUpdateParams { - /// The SP identifiers to start the update with. Must be non-empty. - pub(crate) targets: BTreeSet, + async fn get_rack_setup_state( + rqctx: RequestContext, + ) -> Result, HttpError> { + let ctx = rqctx.context(); - /// Options for the update. - pub(crate) options: StartUpdateOptions, -} + let sled_agent_addr = ctx.bootstrap_agent_addr().map_err(|err| { + HttpError::for_bad_request(None, format!("{err:#}")) + })?; -#[derive(Clone, Debug, JsonSchema, Deserialize)] -pub(crate) struct StartUpdateOptions { - /// If passed in, fails the update with a simulated error. - pub(crate) test_error: Option, - - /// If passed in, creates a test step that lasts these many seconds long. - /// - /// This is used for testing. - pub(crate) test_step_seconds: Option, - - /// If passed in, simulates a result for the RoT Bootloader update. - /// - /// This is used for testing. - pub(crate) test_simulate_rot_bootloader_result: - Option, - - /// If passed in, simulates a result for the RoT update. - /// - /// This is used for testing. - pub(crate) test_simulate_rot_result: Option, - - /// If passed in, simulates a result for the SP update. - /// - /// This is used for testing. - pub(crate) test_simulate_sp_result: Option, - - /// If true, skip the check on the current RoT version and always update it - /// regardless of whether the update appears to be neeeded. - pub(crate) skip_rot_bootloader_version_check: bool, - - /// If true, skip the check on the current RoT version and always update it - /// regardless of whether the update appears to be neeeded. - pub(crate) skip_rot_version_check: bool, - - /// If true, skip the check on the current SP version and always update it - /// regardless of whether the update appears to be neeeded. - pub(crate) skip_sp_version_check: bool, -} + let client = bootstrap_agent_client::Client::new( + &format!("http://{}", sled_agent_addr), + ctx.log.new(slog::o!("component" => "bootstrap client")), + ); + + let op_status = client + .rack_initialization_status() + .await + .map_err(|err| { + use bootstrap_agent_client::Error as BaError; + match err { + BaError::CommunicationError(err) => { + let message = + format!("Failed to send rack setup request: {err}"); + HttpError { + status_code: http::StatusCode::SERVICE_UNAVAILABLE, + error_code: None, + external_message: message.clone(), + internal_message: message, + } + } + other => HttpError::for_bad_request( + None, + format!("Rack setup request failed: {other}"), + ), + } + })? + .into_inner(); -/// A simulated result for a component update. -/// -/// Used by [`StartUpdateOptions`]. -#[derive(Clone, Debug, JsonSchema, Deserialize)] -#[serde(rename_all = "snake_case")] -pub(crate) enum UpdateSimulatedResult { - Success, - Warning, - Skipped, - Failure, -} + Ok(HttpResponseOk(op_status)) + } -#[derive(Clone, Debug, JsonSchema, Deserialize)] -pub(crate) struct ClearUpdateStateParams { - /// The SP identifiers to clear the update state for. Must be non-empty. - pub(crate) targets: BTreeSet, + async fn post_run_rack_setup( + rqctx: RequestContext, + ) -> Result, HttpError> { + let ctx = rqctx.context(); + let log = &rqctx.log; - /// Options for clearing update state - pub(crate) options: ClearUpdateStateOptions, -} + let sled_agent_addr = ctx.bootstrap_agent_addr().map_err(|err| { + HttpError::for_bad_request(None, format!("{err:#}")) + })?; -#[derive(Clone, Debug, JsonSchema, Deserialize)] -pub(crate) struct ClearUpdateStateOptions { - /// If passed in, fails the clear update state operation with a simulated - /// error. - pub(crate) test_error: Option, -} + let request = { + let mut config = ctx.rss_config.lock().unwrap(); + config.start_rss_request(&ctx.bootstrap_peers, log).map_err( + |err| HttpError::for_bad_request(None, format!("{err:#}")), + )? + }; -#[derive(Clone, Debug, Default, JsonSchema, Serialize)] -pub(crate) struct ClearUpdateStateResponse { - /// The SPs for which update data was cleared. - pub(crate) cleared: BTreeSet, + slog::info!( + ctx.log, + "Sending RSS initialize request to {}", + sled_agent_addr + ); + let client = bootstrap_agent_client::Client::new( + &format!("http://{}", sled_agent_addr), + ctx.log.new(slog::o!("component" => "bootstrap client")), + ); + + let init_id = client + .rack_initialize(&request) + .await + .map_err(|err| { + use bootstrap_agent_client::Error as BaError; + match err { + BaError::CommunicationError(err) => { + let message = + format!("Failed to send rack setup request: {err}"); + HttpError { + status_code: http::StatusCode::SERVICE_UNAVAILABLE, + error_code: None, + external_message: message.clone(), + internal_message: message, + } + } + other => HttpError::for_bad_request( + None, + format!("Rack setup request failed: {other}"), + ), + } + })? + .into_inner(); - /// The SPs that had no update state to clear. - pub(crate) no_update_data: BTreeSet, -} + Ok(HttpResponseOk(init_id)) + } -#[derive(Clone, Debug, JsonSchema, Deserialize)] -pub(crate) struct AbortUpdateOptions { - /// The message to abort the update with. - pub(crate) message: String, + async fn post_run_rack_reset( + rqctx: RequestContext, + ) -> Result, HttpError> { + let ctx = rqctx.context(); - /// If passed in, fails the force cancel update operation with a simulated - /// error. - pub(crate) test_error: Option, -} + let sled_agent_addr = ctx.bootstrap_agent_addr().map_err(|err| { + HttpError::for_bad_request(None, format!("{err:#}")) + })?; -#[derive(Copy, Clone, Debug, JsonSchema, Deserialize, PartialEq, Eq)] -#[serde(rename_all = "snake_case", tag = "kind", content = "content")] -pub(crate) enum UpdateTestError { - /// Simulate an error where the operation fails to complete. - Fail, - - /// Simulate an issue where the operation times out. - Timeout { - /// The number of seconds to time out after. - secs: u64, - }, -} + slog::info!( + ctx.log, + "Sending RSS reset request to {}", + sled_agent_addr + ); + let client = bootstrap_agent_client::Client::new( + &format!("http://{}", sled_agent_addr), + ctx.log.new(slog::o!("component" => "bootstrap client")), + ); + + let reset_id = client + .rack_reset() + .await + .map_err(|err| { + use bootstrap_agent_client::Error as BaError; + match err { + BaError::CommunicationError(err) => { + let message = + format!("Failed to send rack reset request: {err}"); + HttpError { + status_code: http::StatusCode::SERVICE_UNAVAILABLE, + error_code: None, + external_message: message.clone(), + internal_message: message, + } + } + other => HttpError::for_bad_request( + None, + format!("Rack setup request failed: {other}"), + ), + } + })? + .into_inner(); -impl UpdateTestError { - pub(crate) async fn into_http_error( - self, - log: &slog::Logger, - reason: &str, - ) -> HttpError { - let message = self.into_error_string(log, reason).await; - HttpError::for_bad_request(None, message) + Ok(HttpResponseOk(reset_id)) } - pub(crate) async fn into_error_string( - self, - log: &slog::Logger, - reason: &str, - ) -> String { - match self { - UpdateTestError::Fail => { - format!("Simulated failure while {reason}") - } - UpdateTestError::Timeout { secs } => { - slog::info!(log, "Simulating timeout while {reason}"); - // 15 seconds should be enough to cause a timeout. - tokio::time::sleep(Duration::from_secs(secs)).await; - "XXX request should time out before this is hit".into() + async fn get_inventory( + rqctx: RequestContext, + body_params: TypedBody, + ) -> Result, HttpError> { + let GetInventoryParams { force_refresh } = body_params.into_inner(); + match rqctx + .context() + .mgs_handle + .get_inventory_refreshing_sps(force_refresh) + .await + { + Ok(response) => Ok(HttpResponseOk(response)), + Err(GetInventoryError::InvalidSpIdentifier) => { + Err(HttpError::for_unavail( + None, + "Invalid SP identifier in request".into(), + )) } + Err(GetInventoryError::ShutdownInProgress) => Err( + HttpError::for_unavail(None, "Server is shutting down".into()), + ), } } -} -#[derive(Clone, Debug, JsonSchema, Serialize)] -#[serde(rename_all = "snake_case")] -pub struct GetBaseboardResponse { - pub baseboard: Option, -} + async fn put_repository( + rqctx: RequestContext, + body: StreamingBody, + ) -> Result { + let rqctx = rqctx.context(); -/// Report the configured baseboard details -#[endpoint { - method = GET, - path = "/baseboard", -}] -async fn get_baseboard( - rqctx: RequestContext, -) -> Result, HttpError> { - let rqctx = rqctx.context(); - Ok(HttpResponseOk(GetBaseboardResponse { - baseboard: rqctx.baseboard.clone(), - })) -} + rqctx.update_tracker.put_repository(body.into_stream()).await?; -/// All the fields of this response are optional, because it's possible we don't -/// know any of them (yet) if MGS has not yet finished discovering its location -/// or (ever) if we're running in a dev environment that doesn't support -/// MGS-location / baseboard mapping. -#[derive(Clone, Debug, JsonSchema, Serialize)] -#[serde(rename_all = "snake_case")] -pub struct GetLocationResponse { - /// The identity of our sled (where wicketd is running). - pub sled_id: Option, - /// The baseboard of our sled (where wicketd is running). - pub sled_baseboard: Option, - /// The baseboard of the switch our sled is physically connected to. - pub switch_baseboard: Option, - /// The identity of the switch our sled is physically connected to. - pub switch_id: Option, -} + Ok(HttpResponseUpdatedNoContent()) + } -/// Report the identity of the sled and switch we're currently running on / -/// connected to. -#[endpoint { - method = GET, - path = "/location", -}] -async fn get_location( - rqctx: RequestContext, -) -> Result, HttpError> { - let rqctx = rqctx.context(); - let inventory = inventory_or_unavail(&rqctx.mgs_handle).await?; - - let switch_id = rqctx.local_switch_id().await; - let sled_baseboard = rqctx.baseboard.clone(); - - let mut switch_baseboard = None; - let mut sled_id = None; - - for sp in &inventory.sps { - if Some(sp.id) == switch_id { - switch_baseboard = sp.state.as_ref().map(|state| { - // TODO-correctness `new_gimlet` isn't the right name: this is a - // sidecar baseboard. - Baseboard::new_gimlet( - state.serial_number.clone(), - state.model.clone(), - i64::from(state.revision), - ) - }); - } else if let (Some(sled_baseboard), Some(state)) = - (sled_baseboard.as_ref(), sp.state.as_ref()) - { - if sled_baseboard.identifier() == state.serial_number - && sled_baseboard.model() == state.model - && sled_baseboard.revision() == i64::from(state.revision) + async fn get_artifacts_and_event_reports( + rqctx: RequestContext, + ) -> Result, HttpError> + { + let response = + rqctx.context().update_tracker.artifacts_and_event_reports().await; + Ok(HttpResponseOk(response)) + } + + async fn get_baseboard( + rqctx: RequestContext, + ) -> Result, HttpError> { + let rqctx = rqctx.context(); + Ok(HttpResponseOk(GetBaseboardResponse { + baseboard: rqctx.baseboard.clone(), + })) + } + + async fn get_location( + rqctx: RequestContext, + ) -> Result, HttpError> { + let rqctx = rqctx.context(); + let inventory = inventory_or_unavail(&rqctx.mgs_handle).await?; + + let switch_id = rqctx.local_switch_id().await; + let sled_baseboard = rqctx.baseboard.clone(); + + let mut switch_baseboard = None; + let mut sled_id = None; + + for sp in &inventory.sps { + if Some(sp.id) == switch_id { + switch_baseboard = sp.state.as_ref().map(|state| { + // TODO-correctness `new_gimlet` isn't the right name: this is a + // sidecar baseboard. + Baseboard::new_gimlet( + state.serial_number.clone(), + state.model.clone(), + i64::from(state.revision), + ) + }); + } else if let (Some(sled_baseboard), Some(state)) = + (sled_baseboard.as_ref(), sp.state.as_ref()) { - sled_id = Some(sp.id); + if sled_baseboard.identifier() == state.serial_number + && sled_baseboard.model() == state.model + && sled_baseboard.revision() == i64::from(state.revision) + { + sled_id = Some(sp.id); + } } } - } - - Ok(HttpResponseOk(GetLocationResponse { - sled_id, - sled_baseboard, - switch_baseboard, - switch_id, - })) -} -/// An endpoint to start updating one or more sleds, switches and PSCs. -#[endpoint { - method = POST, - path = "/update", -}] -async fn post_start_update( - rqctx: RequestContext, - params: TypedBody, -) -> Result { - let log = &rqctx.log; - let rqctx = rqctx.context(); - let params = params.into_inner(); - - if params.targets.is_empty() { - return Err(HttpError::for_bad_request( - None, - "No update targets specified".into(), - )); + Ok(HttpResponseOk(GetLocationResponse { + sled_id, + sled_baseboard, + switch_baseboard, + switch_id, + })) } - // Can we update the target SPs? We refuse to update if, for any target SP: - // - // 1. We haven't pulled its state in our inventory (most likely cause: the - // cubby is empty; less likely cause: the SP is misbehaving, which will - // make updating it very unlikely to work anyway) - // 2. We have pulled its state but our hardware manager says we can't - // update it (most likely cause: the target is the sled we're currently - // running on; less likely cause: our hardware manager failed to get our - // local identifying information, and it refuses to update this target - // out of an abundance of caution). - // - // First, get our most-recently-cached inventory view. (Only wait 80% of - // WICKETD_TIMEOUT for this: if even a cached inventory isn't available, - // it's because we've never established contact with MGS. In that case, we - // should produce a useful error message rather than timing out on the - // client.) - let inventory = match tokio::time::timeout( - WICKETD_TIMEOUT.mul_f32(0.8), - rqctx.mgs_handle.get_cached_inventory(), - ) - .await - { - Ok(Ok(inventory)) => inventory, - Ok(Err(ShutdownInProgress)) => { - return Err(HttpError::for_unavail( + async fn post_start_update( + rqctx: RequestContext, + params: TypedBody, + ) -> Result { + let log = &rqctx.log; + let rqctx = rqctx.context(); + let params = params.into_inner(); + + if params.targets.is_empty() { + return Err(HttpError::for_bad_request( None, - "Server is shutting down".into(), + "No update targets specified".into(), )); } - Err(_) => { - // Have to construct an HttpError manually because - // HttpError::for_unavail doesn't accept an external message. - let message = - "Rack inventory not yet available (is MGS alive?)".to_owned(); - return Err(HttpError { - status_code: http::StatusCode::SERVICE_UNAVAILABLE, - error_code: None, - external_message: message.clone(), - internal_message: message, - }); - } - }; - // Error cases. - let mut inventory_absent = BTreeSet::new(); - let mut self_update = None; - let mut maybe_self_update = BTreeSet::new(); + // Can we update the target SPs? We refuse to update if, for any target SP: + // + // 1. We haven't pulled its state in our inventory (most likely cause: the + // cubby is empty; less likely cause: the SP is misbehaving, which will + // make updating it very unlikely to work anyway) + // 2. We have pulled its state but our hardware manager says we can't + // update it (most likely cause: the target is the sled we're currently + // running on; less likely cause: our hardware manager failed to get our + // local identifying information, and it refuses to update this target + // out of an abundance of caution). + // + // First, get our most-recently-cached inventory view. (Only wait 80% of + // WICKETD_TIMEOUT for this: if even a cached inventory isn't available, + // it's because we've never established contact with MGS. In that case, we + // should produce a useful error message rather than timing out on the + // client.) + let inventory = match tokio::time::timeout( + WICKETD_TIMEOUT.mul_f32(0.8), + rqctx.mgs_handle.get_cached_inventory(), + ) + .await + { + Ok(Ok(inventory)) => inventory, + Ok(Err(ShutdownInProgress)) => { + return Err(HttpError::for_unavail( + None, + "Server is shutting down".into(), + )); + } + Err(_) => { + // Have to construct an HttpError manually because + // HttpError::for_unavail doesn't accept an external message. + let message = + "Rack inventory not yet available (is MGS alive?)" + .to_owned(); + return Err(HttpError { + status_code: http::StatusCode::SERVICE_UNAVAILABLE, + error_code: None, + external_message: message.clone(), + internal_message: message, + }); + } + }; - // Next, do we have the states of the target SP? - let sp_states = match inventory { - GetInventoryResponse::Response { inventory, .. } => inventory - .sps - .into_iter() - .filter_map(|sp| { - if params.targets.contains(&sp.id) { - if let Some(sp_state) = sp.state { - Some((sp.id, sp_state)) + // Error cases. + let mut inventory_absent = BTreeSet::new(); + let mut self_update = None; + let mut maybe_self_update = BTreeSet::new(); + + // Next, do we have the states of the target SP? + let sp_states = match inventory { + GetInventoryResponse::Response { inventory, .. } => inventory + .sps + .into_iter() + .filter_map(|sp| { + if params.targets.contains(&sp.id) { + if let Some(sp_state) = sp.state { + Some((sp.id, sp_state)) + } else { + None + } } else { None } - } else { - None - } - }) - .collect(), - GetInventoryResponse::Unavailable => BTreeMap::new(), - }; - - for target in ¶ms.targets { - let sp_state = match sp_states.get(target) { - Some(sp_state) => sp_state, - None => { - // The state isn't present, so add to inventory_absent. - inventory_absent.insert(*target); - continue; - } + }) + .collect(), + GetInventoryResponse::Unavailable => BTreeMap::new(), }; - // If we have the state of the SP, are we allowed to update it? We - // refuse to try to update our own sled. - match &rqctx.baseboard { - Some(baseboard) => { - if baseboard.identifier() == sp_state.serial_number - && baseboard.model() == sp_state.model - && baseboard.revision() == i64::from(sp_state.revision) - { - self_update = Some(*target); + for target in ¶ms.targets { + let sp_state = match sp_states.get(target) { + Some(sp_state) => sp_state, + None => { + // The state isn't present, so add to inventory_absent. + inventory_absent.insert(*target); continue; } - } - None => { - // We don't know our own baseboard, which is a very questionable - // state to be in! For now, we will hard-code the possibly - // locations where we could be running: scrimlets can only be in - // cubbies 14 or 16, so we refuse to update either of those. - let target_is_scrimlet = matches!( - (target.type_, target.slot), - (SpType::Sled, 14 | 16) - ); - if target_is_scrimlet { - maybe_self_update.insert(*target); - continue; + }; + + // If we have the state of the SP, are we allowed to update it? We + // refuse to try to update our own sled. + match &rqctx.baseboard { + Some(baseboard) => { + if baseboard.identifier() == sp_state.serial_number + && baseboard.model() == sp_state.model + && baseboard.revision() == i64::from(sp_state.revision) + { + self_update = Some(*target); + continue; + } + } + None => { + // We don't know our own baseboard, which is a very questionable + // state to be in! For now, we will hard-code the possibly + // locations where we could be running: scrimlets can only be in + // cubbies 14 or 16, so we refuse to update either of those. + let target_is_scrimlet = matches!( + (target.type_, target.slot), + (SpType::Sled, 14 | 16) + ); + if target_is_scrimlet { + maybe_self_update.insert(*target); + continue; + } } } } - } - // Do we have any errors? - let mut errors = Vec::new(); - if !inventory_absent.is_empty() { - errors.push(format!( - "cannot update sleds (no inventory state present for {})", - sps_to_string(&inventory_absent) - )); - } - if let Some(self_update) = self_update { - errors.push(format!( - "cannot update sled where wicketd is running ({})", - SpIdentifierDisplay(self_update) - )); - } - if !maybe_self_update.is_empty() { - errors.push(format!( - "wicketd does not know its own baseboard details: \ + // Do we have any errors? + let mut errors = Vec::new(); + if !inventory_absent.is_empty() { + errors.push(format!( + "cannot update sleds (no inventory state present for {})", + sps_to_string(&inventory_absent) + )); + } + if let Some(self_update) = self_update { + errors.push(format!( + "cannot update sled where wicketd is running ({})", + SpIdentifierDisplay(self_update) + )); + } + if !maybe_self_update.is_empty() { + errors.push(format!( + "wicketd does not know its own baseboard details: \ refusing to update either scrimlet ({})", - sps_to_string(&inventory_absent) - )); + sps_to_string(&inventory_absent) + )); + } + + if let Some(test_error) = ¶ms.options.test_error { + errors.push( + test_error.into_error_string(log, "starting update").await, + ); + } + + let start_update_errors = if errors.is_empty() { + // No errors: we can try and proceed with this update. + match rqctx + .update_tracker + .start(params.targets, params.options) + .await + { + Ok(()) => return Ok(HttpResponseUpdatedNoContent {}), + Err(errors) => errors, + } + } else { + // We've already found errors, so all we want to do is to check whether + // the update tracker thinks there are any errors as well. + match rqctx.update_tracker.update_pre_checks(params.targets).await { + Ok(()) => Vec::new(), + Err(errors) => errors, + } + }; + + errors + .extend(start_update_errors.iter().map(|error| error.to_string())); + + // If we get here, we have errors to report. + + match errors.len() { + 0 => { + unreachable!( + "we already returned Ok(_) above if there were no errors" + ) + } + 1 => { + return Err(HttpError::for_bad_request( + None, + errors.pop().unwrap(), + )); + } + _ => { + return Err(HttpError::for_bad_request( + None, + format!( + "multiple errors encountered:\n - {}", + itertools::join(errors, "\n - ") + ), + )); + } + } } - if let Some(test_error) = ¶ms.options.test_error { - errors.push(test_error.into_error_string(log, "starting update").await); + async fn get_update_sp( + rqctx: RequestContext, + target: Path, + ) -> Result, HttpError> { + let event_report = rqctx + .context() + .update_tracker + .event_report(target.into_inner()) + .await; + Ok(HttpResponseOk(event_report)) } - let start_update_errors = if errors.is_empty() { - // No errors: we can try and proceed with this update. - match rqctx.update_tracker.start(params.targets, params.options).await { - Ok(()) => return Ok(HttpResponseUpdatedNoContent {}), - Err(errors) => errors, - } - } else { - // We've already found errors, so all we want to do is to check whether - // the update tracker thinks there are any errors as well. - match rqctx.update_tracker.update_pre_checks(params.targets).await { - Ok(()) => Vec::new(), - Err(errors) => errors, + async fn post_abort_update( + rqctx: RequestContext, + target: Path, + opts: TypedBody, + ) -> Result { + let log = &rqctx.log; + let target = target.into_inner(); + + let opts = opts.into_inner(); + if let Some(test_error) = opts.test_error { + return Err(test_error + .into_http_error(log, "aborting update") + .await); } - }; - errors.extend(start_update_errors.iter().map(|error| error.to_string())); + match rqctx + .context() + .update_tracker + .abort_update(target, opts.message) + .await + { + Ok(()) => Ok(HttpResponseUpdatedNoContent {}), + Err(err) => Err(err.to_http_error()), + } + } - // If we get here, we have errors to report. + async fn post_clear_update_state( + rqctx: RequestContext, + params: TypedBody, + ) -> Result, HttpError> { + let log = &rqctx.log; + let rqctx = rqctx.context(); + let params = params.into_inner(); - match errors.len() { - 0 => { - unreachable!( - "we already returned Ok(_) above if there were no errors" - ) - } - 1 => { + if params.targets.is_empty() { return Err(HttpError::for_bad_request( None, - errors.pop().unwrap(), + "No targets specified".into(), )); } - _ => { - return Err(HttpError::for_bad_request( - None, - format!( - "multiple errors encountered:\n - {}", - itertools::join(errors, "\n - ") - ), - )); - } - } -} -/// An endpoint to get the status of any update being performed or recently -/// completed on a single SP. -#[endpoint { - method = GET, - path = "/update/{type}/{slot}", -}] -async fn get_update_sp( - rqctx: RequestContext, - target: Path, -) -> Result, HttpError> { - let event_report = - rqctx.context().update_tracker.event_report(target.into_inner()).await; - Ok(HttpResponseOk(event_report)) -} - -/// Forcibly cancels a running update. -/// -/// This is a potentially dangerous operation, but one that is sometimes -/// required. A machine reset might be required after this operation completes. -#[endpoint { - method = POST, - path = "/abort-update/{type}/{slot}", -}] -async fn post_abort_update( - rqctx: RequestContext, - target: Path, - opts: TypedBody, -) -> Result { - let log = &rqctx.log; - let target = target.into_inner(); - - let opts = opts.into_inner(); - if let Some(test_error) = opts.test_error { - return Err(test_error.into_http_error(log, "aborting update").await); - } + if let Some(test_error) = params.options.test_error { + return Err(test_error + .into_http_error(log, "clearing update state") + .await); + } - match rqctx - .context() - .update_tracker - .abort_update(target, opts.message) - .await - { - Ok(()) => Ok(HttpResponseUpdatedNoContent {}), - Err(err) => Err(err.to_http_error()), + match rqctx.update_tracker.clear_update_state(params.targets).await { + Ok(response) => Ok(HttpResponseOk(response)), + Err(err) => Err(err.to_http_error()), + } } -} -/// Resets update state for a sled. -/// -/// Use this to clear update state after a failed update. -#[endpoint { - method = POST, - path = "/clear-update-state", -}] -async fn post_clear_update_state( - rqctx: RequestContext, - params: TypedBody, -) -> Result, HttpError> { - let log = &rqctx.log; - let rqctx = rqctx.context(); - let params = params.into_inner(); - - if params.targets.is_empty() { - return Err(HttpError::for_bad_request( - None, - "No targets specified".into(), - )); - } + async fn post_ignition_command( + rqctx: RequestContext, + path: Path, + ) -> Result { + let apictx = rqctx.context(); + let PathSpIgnitionCommand { type_, slot, command } = path.into_inner(); - if let Some(test_error) = params.options.test_error { - return Err(test_error - .into_http_error(log, "clearing update state") - .await); - } + apictx + .mgs_client + .ignition_command(type_, slot, command) + .await + .map_err(http_error_from_client_error)?; - match rqctx.update_tracker.clear_update_state(params.targets).await { - Ok(response) => Ok(HttpResponseOk(response)), - Err(err) => Err(err.to_http_error()), + Ok(HttpResponseUpdatedNoContent()) } -} -#[derive(Serialize, Deserialize, JsonSchema)] -struct PathSpIgnitionCommand { - #[serde(rename = "type")] - type_: SpType, - slot: u32, - command: IgnitionCommand, -} - -/// Send an ignition command targeting a specific SP. -/// -/// This endpoint acts as a proxy to the MGS endpoint performing the same -/// function, allowing wicket to communicate exclusively with wicketd (even -/// though wicketd adds no meaningful functionality here beyond what MGS -/// offers). -#[endpoint { - method = POST, - path = "/ignition/{type}/{slot}/{command}", -}] -async fn post_ignition_command( - rqctx: RequestContext, - path: Path, -) -> Result { - let apictx = rqctx.context(); - let PathSpIgnitionCommand { type_, slot, command } = path.into_inner(); - - apictx - .mgs_client - .ignition_command(type_, slot, command) - .await - .map_err(http_error_from_client_error)?; - - Ok(HttpResponseUpdatedNoContent()) -} - -/// Options provided to the preflight uplink check. -#[derive(Clone, Debug, JsonSchema, Serialize, Deserialize)] -#[serde(rename_all = "snake_case")] -pub struct PreflightUplinkCheckOptions { - /// DNS name to query. - pub dns_name_to_query: Option, -} - -/// An endpoint to start a preflight check for uplink configuration. -#[endpoint { - method = POST, - path = "/preflight/uplink", -}] -async fn post_start_preflight_uplink_check( - rqctx: RequestContext, - body: TypedBody, -) -> Result { - let rqctx = rqctx.context(); - let options = body.into_inner(); - - let our_switch_location = match rqctx.local_switch_id().await { - Some(SpIdentifier { slot, type_: SpType::Switch }) => match slot { - 0 => SwitchLocation::Switch0, - 1 => SwitchLocation::Switch1, - _ => { + async fn post_start_preflight_uplink_check( + rqctx: RequestContext, + body: TypedBody, + ) -> Result { + let rqctx = rqctx.context(); + let options = body.into_inner(); + + let our_switch_location = match rqctx.local_switch_id().await { + Some(SpIdentifier { slot, type_: SpType::Switch }) => match slot { + 0 => SwitchLocation::Switch0, + 1 => SwitchLocation::Switch1, + _ => { + return Err(HttpError::for_internal_error(format!( + "unexpected switch slot {slot}" + ))); + } + }, + Some(other) => { return Err(HttpError::for_internal_error(format!( - "unexpected switch slot {slot}" + "unexpected switch SP identifier {other:?}" ))); } - }, - Some(other) => { - return Err(HttpError::for_internal_error(format!( - "unexpected switch SP identifier {other:?}" - ))); - } - None => { - return Err(HttpError::for_unavail( - Some("UnknownSwitchLocation".to_string()), - "local switch location not yet determined".to_string(), - )); - } - }; + None => { + return Err(HttpError::for_unavail( + Some("UnknownSwitchLocation".to_string()), + "local switch location not yet determined".to_string(), + )); + } + }; - let (network_config, dns_servers, ntp_servers) = { - let rss_config = rqctx.rss_config.lock().unwrap(); + let (network_config, dns_servers, ntp_servers) = { + let rss_config = rqctx.rss_config.lock().unwrap(); - let network_config = rss_config - .user_specified_rack_network_config() - .cloned() - .ok_or_else(|| { - HttpError::for_bad_request( - None, - "uplink preflight check requires setting \ + let network_config = rss_config + .user_specified_rack_network_config() + .cloned() + .ok_or_else(|| { + HttpError::for_bad_request( + None, + "uplink preflight check requires setting \ the uplink config for RSS" - .to_string(), - ) - })?; + .to_string(), + ) + })?; + + ( + network_config, + rss_config.dns_servers().to_vec(), + rss_config.ntp_servers().to_vec(), + ) + }; - ( - network_config, - rss_config.dns_servers().to_vec(), - rss_config.ntp_servers().to_vec(), - ) - }; - - match rqctx - .preflight_checker - .uplink_start( - network_config, - dns_servers, - ntp_servers, - our_switch_location, - options.dns_name_to_query, - ) - .await - { - Ok(()) => Ok(HttpResponseUpdatedNoContent {}), - Err(err) => Err(HttpError::for_client_error( - None, - StatusCode::TOO_MANY_REQUESTS, - err.to_string(), - )), + match rqctx + .preflight_checker + .uplink_start( + network_config, + dns_servers, + ntp_servers, + our_switch_location, + options.dns_name_to_query, + ) + .await + { + Ok(()) => Ok(HttpResponseUpdatedNoContent {}), + Err(err) => Err(HttpError::for_client_error( + None, + StatusCode::TOO_MANY_REQUESTS, + err.to_string(), + )), + } } -} -/// An endpoint to get the report for the most recent (or still running) -/// preflight uplink check. -#[endpoint { - method = GET, - path = "/preflight/uplink", -}] -async fn get_preflight_uplink_report( - rqctx: RequestContext, -) -> Result, HttpError> { - let rqctx = rqctx.context(); - - match rqctx.preflight_checker.uplink_event_report() { + async fn get_preflight_uplink_report( + rqctx: RequestContext, + ) -> Result< + HttpResponseOk, + HttpError, + > { + let rqctx = rqctx.context(); + + match rqctx.preflight_checker.uplink_event_report() { Some(report) => Ok(HttpResponseOk(report)), None => Err(HttpError::for_bad_request( None, @@ -1317,53 +799,62 @@ async fn get_preflight_uplink_report( .to_string(), )), } -} - -/// An endpoint instructing wicketd to reload its SMF config properties. -/// -/// The only expected client of this endpoint is `curl` from wicketd's SMF -/// `refresh` method, but other clients hitting it is harmless. -#[endpoint { - method = POST, - path = "/reload-config", -}] -async fn post_reload_config( - rqctx: RequestContext, -) -> Result { - let smf_values = SmfConfigValues::read_current().map_err(|err| { - HttpError::for_unavail( - None, - format!("failed to read SMF values: {err}"), - ) - })?; - - let rqctx = rqctx.context(); - - // We do not allow a config reload to change our bound address; return an - // error if the caller is attempting to do so. - if rqctx.bind_address != smf_values.address { - return Err(HttpError::for_bad_request( - None, - "listening address cannot be reconfigured".to_string(), - )); } - if let Some(rack_subnet) = smf_values.rack_subnet { - let resolver = Resolver::new_from_subnet( - rqctx.log.new(o!("component" => "InternalDnsResolver")), - rack_subnet, - ) - .map_err(|err| { + async fn post_reload_config( + rqctx: RequestContext, + ) -> Result { + let smf_values = SmfConfigValues::read_current().map_err(|err| { HttpError::for_unavail( None, - format!("failed to create internal DNS resolver: {err}"), + format!("failed to read SMF values: {err}"), ) })?; - *rqctx.internal_dns_resolver.lock().unwrap() = Some(resolver); + let rqctx = rqctx.context(); + + // We do not allow a config reload to change our bound address; return an + // error if the caller is attempting to do so. + if rqctx.bind_address != smf_values.address { + return Err(HttpError::for_bad_request( + None, + "listening address cannot be reconfigured".to_string(), + )); + } + + if let Some(rack_subnet) = smf_values.rack_subnet { + let resolver = Resolver::new_from_subnet( + rqctx.log.new(o!("component" => "InternalDnsResolver")), + rack_subnet, + ) + .map_err(|err| { + HttpError::for_unavail( + None, + format!("failed to create internal DNS resolver: {err}"), + ) + })?; + + *rqctx.internal_dns_resolver.lock().unwrap() = Some(resolver); + } + + Ok(HttpResponseUpdatedNoContent()) } +} - Ok(HttpResponseUpdatedNoContent()) +// Get the current inventory or return a 503 Unavailable. +async fn inventory_or_unavail( + mgs_handle: &MgsHandle, +) -> Result { + match mgs_handle.get_cached_inventory().await { + Ok(GetInventoryResponse::Response { inventory, .. }) => Ok(inventory), + Ok(GetInventoryResponse::Unavailable) => Err(HttpError::for_unavail( + None, + "Rack inventory not yet available".into(), + )), + Err(ShutdownInProgress) => { + Err(HttpError::for_unavail(None, "Server is shutting down".into())) + } + } } fn http_error_from_client_error( diff --git a/wicketd/src/installinator_progress.rs b/wicketd/src/installinator_progress.rs index 77baec2c94..7d076e7b0e 100644 --- a/wicketd/src/installinator_progress.rs +++ b/wicketd/src/installinator_progress.rs @@ -12,7 +12,7 @@ use std::{ sync::{Arc, Mutex}, }; -use installinator_artifactd::EventReportStatus; +use installinator_api::EventReportStatus; use tokio::sync::{oneshot, watch}; use update_engine::events::StepEventIsTerminal; use uuid::Uuid; diff --git a/wicketd/src/lib.rs b/wicketd/src/lib.rs index 5926fc468d..66c6bd41e8 100644 --- a/wicketd/src/lib.rs +++ b/wicketd/src/lib.rs @@ -9,15 +9,17 @@ mod context; mod helpers; mod http_entrypoints; mod installinator_progress; -mod inventory; pub mod mgs; mod nexus_proxy; mod preflight_check; mod rss_config; mod update_tracker; -use anyhow::{anyhow, Context, Result}; -use artifacts::{WicketdArtifactServer, WicketdArtifactStore}; +use anyhow::{anyhow, bail, Context, Result}; +use artifacts::{ + WicketdArtifactStore, WicketdInstallinatorApiImpl, + WicketdInstallinatorContext, +}; use bootstrap_addrs::BootstrapPeers; pub use config::Config; pub(crate) use context::ServerContext; @@ -25,7 +27,6 @@ use display_error_chain::DisplayErrorChain; use dropshot::{ConfigDropshot, HandlerTaskMode, HttpServer}; pub use installinator_progress::{IprUpdateTracker, RunningUpdateState}; use internal_dns::resolver::Resolver; -pub use inventory::{RackV1Inventory, SpInventory}; use mgs::make_mgs_client; pub(crate) use mgs::{MgsHandle, MgsManager}; use nexus_proxy::NexusTcpProxy; @@ -42,18 +43,6 @@ use std::{ }; pub use update_tracker::{StartUpdateError, UpdateTracker}; -/// Run the OpenAPI generator for the API; which emits the OpenAPI spec -/// to stdout. -pub fn run_openapi() -> Result<(), String> { - http_entrypoints::api() - .openapi("Oxide Technician Port Control Service", "0.0.1") - .description("API for use by the technician port TUI: wicket") - .contact_url("https://oxide.computer") - .contact_email("api@oxide.computer") - .write(&mut std::io::stdout()) - .map_err(|e| e.to_string()) -} - /// Command line arguments for wicketd pub struct Args { pub address: SocketAddrV6, @@ -118,7 +107,7 @@ impl SmfConfigValues { pub struct Server { pub wicketd_server: HttpServer, - pub artifact_server: HttpServer, + pub installinator_server: HttpServer, pub artifact_store: WicketdArtifactStore, pub update_tracker: Arc, pub ipr_update_tracker: IprUpdateTracker, @@ -127,14 +116,14 @@ pub struct Server { impl Server { /// Run an instance of the wicketd server - pub async fn start(log: slog::Logger, args: Args) -> Result { + pub async fn start(log: slog::Logger, args: Args) -> anyhow::Result { let (drain, registration) = slog_dtrace::with_drain(log); let log = slog::Logger::root(drain.fuse(), slog::o!(FileKv)); if let slog_dtrace::ProbeRegistration::Failed(e) = registration { let msg = format!("failed to register DTrace probes: {}", e); error!(log, "{}", msg); - return Err(msg); + bail!(msg); } else { debug!(log, "registered DTrace probes"); }; @@ -174,7 +163,8 @@ impl Server { addr, ) .map_err(|err| { - format!("Could not create internal DNS resolver: {err}") + anyhow!(err) + .context("Could not create internal DNS resolver") }) }) .transpose()?; @@ -186,7 +176,9 @@ impl Server { &log, ) .await - .map_err(|err| format!("failed to start Nexus TCP proxy: {err}"))?; + .map_err(|err| { + anyhow!(err).context("failed to start Nexus TCP proxy") + })?; let wicketd_server = { let ds_log = log.new(o!("component" => "dropshot (wicketd)")); @@ -209,25 +201,39 @@ impl Server { }, &ds_log, ) - .map_err(|err| format!("initializing http server: {}", err))? + .map_err(|err| anyhow!(err).context("initializing http server"))? .start() }; - let server = - WicketdArtifactServer::new(&log, store.clone(), ipr_artifact); - let artifact_server = installinator_artifactd::ArtifactServer::new( - server, - args.artifact_address, - &log, - ) - .start() - .map_err(|error| { - format!("failed to start artifact server: {error:?}") - })?; + let installinator_server = { + let installinator_config = installinator_api::default_config( + SocketAddr::V6(args.artifact_address), + ); + let api_description = + installinator_api::installinator_api::api_description::< + WicketdInstallinatorApiImpl, + >()?; + + dropshot::HttpServerStarter::new( + &installinator_config, + api_description, + WicketdInstallinatorContext::new( + &log, + store.clone(), + ipr_artifact, + ), + &log, + ) + .map_err(|err| { + anyhow!(err) + .context("failed to create installinator artifact server") + })? + .start() + }; Ok(Self { wicketd_server, - artifact_server, + installinator_server, artifact_store: store, update_tracker, ipr_update_tracker, @@ -240,7 +246,7 @@ impl Server { self.wicketd_server.close().await.map_err(|error| { anyhow!("error closing wicketd server: {error}") })?; - self.artifact_server.close().await.map_err(|error| { + self.installinator_server.close().await.map_err(|error| { anyhow!("error closing artifact server: {error}") })?; self.nexus_tcp_proxy.shutdown(); @@ -257,7 +263,7 @@ impl Server { Err(err) => Err(format!("running wicketd server: {err}")), } } - res = self.artifact_server => { + res = self.installinator_server => { match res { Ok(()) => Err("artifact server exited unexpectedly".to_owned()), // The artifact server returns an anyhow::Error, which has a diff --git a/wicketd/src/mgs.rs b/wicketd/src/mgs.rs index 8cc773786c..da09ac5802 100644 --- a/wicketd/src/mgs.rs +++ b/wicketd/src/mgs.rs @@ -5,17 +5,16 @@ //! The collection of tasks used for interacting with MGS and maintaining //! runtime state. -use crate::{RackV1Inventory, SpInventory}; use futures::StreamExt; -use gateway_client::types::{SpIdentifier, SpIgnition}; -use schemars::JsonSchema; -use serde::Serialize; +use gateway_client::types::SpIgnition; use slog::{info, o, warn, Logger}; use std::collections::{BTreeMap, BTreeSet}; use std::net::SocketAddrV6; use tokio::sync::{mpsc, oneshot}; use tokio::time::{Duration, Instant}; use tokio_stream::StreamMap; +use wicket_common::inventory::{RackV1Inventory, SpIdentifier, SpInventory}; +use wicketd_api::GetInventoryResponse; use self::inventory::{ FetchedIgnitionState, FetchedSpData, IgnitionPresence, @@ -52,15 +51,6 @@ pub struct MgsHandle { tx: tokio::sync::mpsc::Sender, } -/// The response to a `get_inventory` call: the inventory known to wicketd, or a -/// notification that data is unavailable. -#[derive(Clone, Debug, JsonSchema, Serialize)] -#[serde(rename_all = "snake_case", tag = "type", content = "data")] -pub enum GetInventoryResponse { - Response { inventory: RackV1Inventory, mgs_last_seen: Duration }, - Unavailable, -} - /// Channel errors result only from system shutdown. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct ShutdownInProgress; diff --git a/wicketd/src/mgs/inventory.rs b/wicketd/src/mgs/inventory.rs index a9805b4c1d..7d9aa586ad 100644 --- a/wicketd/src/mgs/inventory.rs +++ b/wicketd/src/mgs/inventory.rs @@ -7,7 +7,6 @@ use gateway_client::types::RotState; use gateway_client::types::SpComponentCaboose; use gateway_client::types::SpComponentInfo; -use gateway_client::types::SpIdentifier; use gateway_client::types::SpIgnition; use gateway_client::types::SpState; use gateway_messages::SpComponent; @@ -21,8 +20,8 @@ use tokio::time::interval; use tokio::time::Duration; use tokio::time::Instant; use tokio_stream::wrappers::ReceiverStream; - -use crate::inventory::RotInventory; +use wicket_common::inventory::RotInventory; +use wicket_common::inventory::SpIdentifier; // Frequency at which we fetch state from our local ignition controller (via our // local sidecar SP) for the ignition state of all ignition targets in the rack. diff --git a/wicketd/src/preflight_check.rs b/wicketd/src/preflight_check.rs index 4cd17604a0..6863e41a84 100644 --- a/wicketd/src/preflight_check.rs +++ b/wicketd/src/preflight_check.rs @@ -9,19 +9,16 @@ use std::net::IpAddr; use std::sync::Arc; use std::sync::Mutex; use tokio::sync::oneshot; -use update_engine::events::EventReport; -use update_engine::GenericSpec; +use wicket_common::preflight_check::EventBuffer; +use wicket_common::preflight_check::EventReport; use wicket_common::rack_setup::UserSpecifiedRackNetworkConfig; mod uplink; -pub(crate) type UplinkEventReport = - EventReport>; - #[derive(Debug)] pub(crate) struct PreflightCheckerHandler { request_tx: flume::Sender, - uplink_event_buffer: Arc>>, + uplink_event_buffer: Arc>>, } impl PreflightCheckerHandler { @@ -78,12 +75,12 @@ impl PreflightCheckerHandler { Ok(()) } - pub(crate) fn uplink_event_report(&self) -> Option { + pub(crate) fn uplink_event_report(&self) -> Option { self.uplink_event_buffer .lock() .unwrap() .as_ref() - .map(|event_buffer| event_buffer.generate_report().into_generic()) + .map(|event_buffer| event_buffer.generate_report()) } } @@ -105,7 +102,7 @@ enum PreflightCheck { async fn preflight_task_main( request_rx: flume::Receiver, - uplink_event_buffer: Arc>>, + uplink_event_buffer: Arc>>, log: Logger, ) { while let Ok(request) = request_rx.recv_async().await { @@ -120,7 +117,7 @@ async fn preflight_task_main( } => { // New preflight check: create a new event buffer. *uplink_event_buffer.lock().unwrap() = - Some(uplink::EventBuffer::new(16)); + Some(EventBuffer::new(16)); // We've cleared the shared event buffer; release our caller // (they can now lock and check the event buffer while we run diff --git a/wicketd/src/preflight_check/uplink.rs b/wicketd/src/preflight_check/uplink.rs index 395fb8c795..36a4f61779 100644 --- a/wicketd/src/preflight_check/uplink.rs +++ b/wicketd/src/preflight_check/uplink.rs @@ -22,9 +22,6 @@ use omicron_common::api::internal::shared::PortSpeed as OmicronPortSpeed; use omicron_common::api::internal::shared::SwitchLocation; use omicron_common::OMICRON_DPD_TAG; use oxnet::IpNet; -use schemars::JsonSchema; -use serde::Deserialize; -use serde::Serialize; use slog::error; use slog::o; use slog::Logger; @@ -36,7 +33,6 @@ use std::sync::Arc; use std::sync::Mutex; use std::time::Duration; use std::time::Instant; -use thiserror::Error; use tokio::process::Command; use tokio::sync::mpsc; use trust_dns_resolver::config::NameServerConfigGroup; @@ -45,7 +41,16 @@ use trust_dns_resolver::config::ResolverOpts; use trust_dns_resolver::error::ResolveError; use trust_dns_resolver::error::ResolveErrorKind; use trust_dns_resolver::TokioAsyncResolver; -use update_engine::StepSpec; +use wicket_common::preflight_check::EventBuffer; +use wicket_common::preflight_check::StepContext; +use wicket_common::preflight_check::StepProgress; +use wicket_common::preflight_check::StepResult; +use wicket_common::preflight_check::StepSkipped; +use wicket_common::preflight_check::StepSuccess; +use wicket_common::preflight_check::StepWarning; +use wicket_common::preflight_check::UpdateEngine; +use wicket_common::preflight_check::UplinkPreflightStepId; +use wicket_common::preflight_check::UplinkPreflightTerminalError; use wicket_common::rack_setup::UserSpecifiedPortConfig; use wicket_common::rack_setup::UserSpecifiedRackNetworkConfig; @@ -865,73 +870,6 @@ struct RoutingSuccess { level2: L2Success, } -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, JsonSchema)] -#[serde(tag = "id", rename_all = "snake_case")] -pub(super) enum UplinkPreflightStepId { - ConfigureSwitch, - WaitForL1Link, - ConfigureAddress, - ConfigureRouting, - CheckExternalDnsConnectivity, - CheckExternalNtpConnectivity, - CleanupRouting, - CleanupAddress, - CleanupL1, -} - -type DpdError = dpd_client::Error; - -#[derive(Debug, Error)] -pub(crate) enum UplinkPreflightTerminalError { - #[error("invalid port name: {0}")] - InvalidPortName(String), - #[error("failed to connect to dpd to check for current configuration")] - GetCurrentConfig(#[source] DpdError), - #[error("uplink already configured - is rack already initialized?")] - UplinkAlreadyConfigured, - #[error("failed to create port {port_id:?}")] - ConfigurePort { - #[source] - err: DpdError, - port_id: PortId, - }, - #[error( - "failed to remove host OS route {destination} -> {nexthop}: {err}" - )] - RemoveHostRoute { err: String, destination: IpNet, nexthop: IpAddr }, - #[error("failed to remove uplink SMF property {property:?}: {err}")] - RemoveSmfProperty { property: String, err: String }, - #[error("failed to refresh uplink service config: {0}")] - RefreshUplinkSmf(String), - #[error("failed to clear settings for port {port_id:?}")] - UnconfigurePort { - #[source] - err: DpdError, - port_id: PortId, - }, -} - -impl update_engine::AsError for UplinkPreflightTerminalError { - fn as_error(&self) -> &(dyn std::error::Error + 'static) { - self - } -} - -#[derive(JsonSchema)] -pub(super) enum UplinkPreflightCheckSpec {} - -impl StepSpec for UplinkPreflightCheckSpec { - type Component = String; - type StepId = UplinkPreflightStepId; - type StepMetadata = (); - type ProgressMetadata = String; - type CompletionMetadata = Vec; - type SkippedMetadata = (); - type Error = UplinkPreflightTerminalError; -} - -update_engine::define_update_engine!(pub(super) UplinkPreflightCheckSpec); - #[derive(Debug, Default)] struct DnsLookupStep { // Usable output of this step: IP addrs of the NTP servers to use. diff --git a/wicketd/src/rss_config.rs b/wicketd/src/rss_config.rs index dde6d35da5..3894697dd2 100644 --- a/wicketd/src/rss_config.rs +++ b/wicketd/src/rss_config.rs @@ -5,11 +5,6 @@ //! Support for user-provided RSS configuration options. use crate::bootstrap_addrs::BootstrapPeers; -use crate::http_entrypoints::CertificateUploadResponse; -use crate::http_entrypoints::CurrentRssUserConfig; -use crate::http_entrypoints::CurrentRssUserConfigSensitive; -use crate::http_entrypoints::SetBgpAuthKeyStatus; -use crate::RackV1Inventory; use anyhow::anyhow; use anyhow::bail; use anyhow::Context; @@ -22,7 +17,6 @@ use bootstrap_agent_client::types::RackInitializeRequest; use bootstrap_agent_client::types::RecoverySiloConfig; use bootstrap_agent_client::types::UserId; use display_error_chain::DisplayErrorChain; -use gateway_client::types::SpType; use omicron_certificates::CertificateError; use omicron_common::address; use omicron_common::address::Ipv4Range; @@ -40,6 +34,8 @@ use std::mem; use std::net::IpAddr; use std::net::Ipv6Addr; use thiserror::Error; +use wicket_common::inventory::RackV1Inventory; +use wicket_common::inventory::SpType; use wicket_common::rack_setup::BgpAuthKey; use wicket_common::rack_setup::BgpAuthKeyId; use wicket_common::rack_setup::BgpAuthKeyStatus; @@ -50,6 +46,10 @@ use wicket_common::rack_setup::GetBgpAuthKeyInfoResponse; use wicket_common::rack_setup::PutRssUserConfigInsensitive; use wicket_common::rack_setup::UserSpecifiedPortConfig; use wicket_common::rack_setup::UserSpecifiedRackNetworkConfig; +use wicketd_api::CertificateUploadResponse; +use wicketd_api::CurrentRssUserConfig; +use wicketd_api::CurrentRssUserConfigSensitive; +use wicketd_api::SetBgpAuthKeyStatus; // TODO-correctness For now, we always use the same rack subnet when running // RSS. When we get to multirack, this will be wrong, but there are many other diff --git a/wicketd/src/update_tracker.rs b/wicketd/src/update_tracker.rs index 6de7090ce4..dee22f70c0 100644 --- a/wicketd/src/update_tracker.rs +++ b/wicketd/src/update_tracker.rs @@ -6,10 +6,6 @@ use crate::artifacts::WicketdArtifactStore; use crate::helpers::sps_to_string; -use crate::http_entrypoints::ClearUpdateStateResponse; -use crate::http_entrypoints::GetArtifactsAndEventReportsResponse; -use crate::http_entrypoints::StartUpdateOptions; -use crate::http_entrypoints::UpdateSimulatedResult; use crate::installinator_progress::IprStartReceiver; use crate::installinator_progress::IprUpdateTracker; use crate::mgs::make_mgs_client; @@ -33,8 +29,6 @@ use gateway_client::types::RotCfpaSlot; use gateway_client::types::RotImageError; use gateway_client::types::RotState; use gateway_client::types::SpComponentFirmwareSlot; -use gateway_client::types::SpIdentifier; -use gateway_client::types::SpType; use gateway_client::types::SpUpdateStatus; use gateway_messages::SpComponent; use gateway_messages::ROT_PAGE_SIZE; @@ -74,6 +68,11 @@ use update_engine::events::ProgressUnits; use update_engine::AbortHandle; use update_engine::StepSpec; use uuid::Uuid; +use wicket_common::inventory::SpIdentifier; +use wicket_common::inventory::SpType; +use wicket_common::rack_update::ClearUpdateStateResponse; +use wicket_common::rack_update::StartUpdateOptions; +use wicket_common::rack_update::UpdateSimulatedResult; use wicket_common::update_events::ComponentRegistrar; use wicket_common::update_events::EventBuffer; use wicket_common::update_events::EventReport; @@ -96,6 +95,7 @@ use wicket_common::update_events::UpdateComponent; use wicket_common::update_events::UpdateEngine; use wicket_common::update_events::UpdateStepId; use wicket_common::update_events::UpdateTerminalError; +use wicketd_api::GetArtifactsAndEventReportsResponse; #[derive(Debug)] struct SpUpdateData { diff --git a/wicketd/tests/integration_tests/commands.rs b/wicketd/tests/integration_tests/commands.rs deleted file mode 100644 index f14d608879..0000000000 --- a/wicketd/tests/integration_tests/commands.rs +++ /dev/null @@ -1,43 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! Tests for the executable commands in this repo. Most functionality is tested -//! elsewhere, so this really just sanity checks argument parsing, bad args, and -//! the --openapi mode. - -use std::path::PathBuf; - -use expectorate::assert_contents; -use omicron_test_utils::dev::test_cmds::{ - assert_exit_code, path_to_executable, run_command, EXIT_SUCCESS, -}; -use openapiv3::OpenAPI; -use subprocess::Exec; - -// name of wicketd executable -const CMD_WICKETD: &str = env!("CARGO_BIN_EXE_wicketd"); - -fn path_to_wicketd() -> PathBuf { - path_to_executable(CMD_WICKETD) -} - -#[test] -fn test_wicketd_openapi() { - let exec = Exec::cmd(path_to_wicketd()).arg("openapi"); - let (exit_status, stdout_text, stderr_text) = run_command(exec); - assert_exit_code(exit_status, EXIT_SUCCESS, &stderr_text); - assert_contents("tests/output/cmd-wicketd-openapi-stderr", &stderr_text); - - let spec: OpenAPI = serde_json::from_str(&stdout_text) - .expect("stdout was not valid OpenAPI"); - - // Check for lint errors. - let errors = openapi_lint::validate(&spec); - assert!(errors.is_empty(), "{}", errors.join("\n\n")); - - // Confirm that the output hasn't changed. It's expected that we'll change - // this file as the API evolves, but pay attention to the diffs to ensure - // that the changes match your expectations. - assert_contents("../openapi/wicketd.json", &stdout_text); -} diff --git a/wicketd/tests/integration_tests/mod.rs b/wicketd/tests/integration_tests/mod.rs index c73f49805c..656dd79818 100644 --- a/wicketd/tests/integration_tests/mod.rs +++ b/wicketd/tests/integration_tests/mod.rs @@ -2,7 +2,6 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -mod commands; mod inventory; mod setup; mod updates; diff --git a/wicketd/tests/integration_tests/setup.rs b/wicketd/tests/integration_tests/setup.rs index 62682a73ab..01f01e21e1 100644 --- a/wicketd/tests/integration_tests/setup.rs +++ b/wicketd/tests/integration_tests/setup.rs @@ -16,7 +16,7 @@ pub struct WicketdTestContext { // this way. pub wicketd_raw_client: ClientTestContext, pub artifact_addr: SocketAddrV6, - pub artifact_client: installinator_artifact_client::Client, + pub artifact_client: installinator_client::Client, pub server: wicketd::Server, pub gateway: GatewayTestContext, } @@ -62,14 +62,15 @@ impl WicketdTestContext { ) }; - let artifact_addr = assert_ipv6(server.artifact_server.local_addr()); + let artifact_addr = + assert_ipv6(server.installinator_server.local_addr()); let artifact_client = { let endpoint = format!( "http://[{}]:{}", artifact_addr.ip(), artifact_addr.port() ); - installinator_artifact_client::Client::new( + installinator_client::Client::new( &endpoint, log.new(slog::o!("component" => "artifact test client")), ) diff --git a/wicketd/tests/integration_tests/updates.rs b/wicketd/tests/integration_tests/updates.rs index 611d81c7f5..af3bbfe656 100644 --- a/wicketd/tests/integration_tests/updates.rs +++ b/wicketd/tests/integration_tests/updates.rs @@ -22,13 +22,13 @@ use update_engine::NestedError; use uuid::Uuid; use wicket::OutputKind; use wicket_common::{ - rack_update::{ClearUpdateStateResponse, SpIdentifier, SpType}, + inventory::{SpIdentifier, SpType}, + rack_update::{ClearUpdateStateResponse, StartUpdateOptions}, update_events::{StepEventKind, UpdateComponent}, }; use wicketd::{RunningUpdateState, StartUpdateError}; use wicketd_client::types::{ - GetInventoryParams, GetInventoryResponse, StartUpdateOptions, - StartUpdateParams, + GetInventoryParams, GetInventoryResponse, StartUpdateParams, }; // See documentation for extract_nested_artifact_pair in update_plan.rs for why @@ -430,10 +430,7 @@ async fn test_update_races() { .expect("bytes read and archived"); // Now start an update. - let sp = gateway_client::types::SpIdentifier { - slot: 0, - type_: gateway_client::types::SpType::Sled, - }; + let sp = SpIdentifier { slot: 0, type_: SpType::Sled }; let sps: BTreeSet<_> = vec![sp].into_iter().collect(); let (sender, receiver) = oneshot::channel(); diff --git a/wicketd/tests/output/cmd-wicketd-openapi-stderr b/wicketd/tests/output/cmd-wicketd-openapi-stderr deleted file mode 100644 index e69de29bb2..0000000000