diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index 9dd17c985d..3dade2e190 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@e7dd06a5731075458d8bbd3465396374ad0d20cb # v2 + uses: taiki-e/install-action@242f1c0c1a882c44e7d32b89af9f2a0bced36540 # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date diff --git a/Cargo.lock b/Cargo.lock index 2ed96cfaa5..1f24549473 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -326,7 +326,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc7b2dbe9169059af0f821e811180fddc971fc210c776c133c7819ccd6e478db" dependencies = [ - "rustix 0.38.25", + "rustix 0.38.30", "tempfile", "windows-sys 0.52.0", ] @@ -1906,7 +1906,7 @@ dependencies = [ [[package]] name = "dropshot" version = "0.9.1-dev" -source = "git+https://github.com/oxidecomputer/dropshot?branch=main#b19a9a5d049f4433547f9f3b11d10a9483fc6acf" +source = "git+https://github.com/oxidecomputer/dropshot?branch=main#711a7490d81416731cfe0f9fef366ed5f266a0ee" dependencies = [ "async-stream", "async-trait", @@ -1927,7 +1927,7 @@ dependencies = [ "paste", "percent-encoding", "proc-macro2", - "rustls", + "rustls 0.22.2", "rustls-pemfile 2.0.0", "schemars", "serde", @@ -1941,7 +1941,7 @@ dependencies = [ "slog-json", "slog-term", "tokio", - "tokio-rustls", + "tokio-rustls 0.25.0", "toml 0.8.8", "usdt", "uuid", @@ -1952,7 +1952,7 @@ dependencies = [ [[package]] name = "dropshot_endpoint" version = "0.9.1-dev" -source = "git+https://github.com/oxidecomputer/dropshot?branch=main#b19a9a5d049f4433547f9f3b11d10a9483fc6acf" +source = "git+https://github.com/oxidecomputer/dropshot?branch=main#711a7490d81416731cfe0f9fef366ed5f266a0ee" dependencies = [ "proc-macro2", "quote", @@ -2145,23 +2145,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" -dependencies = [ - "errno-dragonfly", - "libc", - "windows-sys 0.48.0", -] - -[[package]] -name = "errno-dragonfly" -version = "0.1.2" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ - "cc", "libc", + "windows-sys 0.52.0", ] [[package]] @@ -2216,7 +2205,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef033ed5e9bad94e55838ca0ca906db0e043f517adda0c8b79c7a8c66c93c1b5" dependencies = [ "cfg-if", - "rustix 0.38.25", + "rustix 0.38.30", "windows-sys 0.48.0", ] @@ -3064,15 +3053,30 @@ name = "hyper-rustls" version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http 0.2.11", + "hyper", + "rustls 0.21.9", + "tokio", + "tokio-rustls 0.24.1", +] + +[[package]] +name = "hyper-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "399c78f9338483cb7e630c8474b07268983c6bd5acee012e4211f9f7bb21b070" dependencies = [ "futures-util", "http 0.2.11", "hyper", "log", - "rustls", + "rustls 0.22.2", "rustls-native-certs", + "rustls-pki-types", "tokio", - "tokio-rustls", + "tokio-rustls 0.25.0", ] [[package]] @@ -3477,9 +3481,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "ipnetwork" @@ -3498,7 +3502,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.2", - "rustix 0.38.25", + "rustix 0.38.30", "windows-sys 0.48.0", ] @@ -3751,9 +3755,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.11" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "lock_api" @@ -4069,6 +4073,7 @@ dependencies = [ "chrono", "futures", "ipnetwork", + "nexus-types", "omicron-common", "omicron-passwords", "omicron-workspace-hack", @@ -4142,7 +4147,7 @@ dependencies = [ "headers", "http 0.2.11", "hyper", - "hyper-rustls", + "hyper-rustls 0.25.0", "internal-dns", "ipnetwork", "itertools 0.12.0", @@ -4172,7 +4177,7 @@ dependencies = [ "rcgen", "ref-cast", "regex", - "rustls", + "rustls 0.22.2", "samael", "serde", "serde_json", @@ -4203,6 +4208,26 @@ dependencies = [ "serde_json", ] +[[package]] +name = "nexus-deployment" +version = "0.1.0" +dependencies = [ + "anyhow", + "chrono", + "internal-dns", + "ipnet", + "ipnetwork", + "nexus-inventory", + "nexus-types", + "omicron-common", + "omicron-test-utils", + "omicron-workspace-hack", + "sled-agent-client", + "slog", + "thiserror", + "uuid", +] + [[package]] name = "nexus-inventory" version = "0.1.0" @@ -4313,6 +4338,7 @@ dependencies = [ "sled-agent-client", "steno", "strum", + "thiserror", "uuid", ] @@ -4762,7 +4788,8 @@ dependencies = [ "httptest", "hubtools", "hyper", - "hyper-rustls", + "hyper-rustls 0.25.0", + "illumos-utils", "internal-dns", "ipnetwork", "itertools 0.12.0", @@ -4772,6 +4799,7 @@ dependencies = [ "nexus-db-model", "nexus-db-queries", "nexus-defaults", + "nexus-deployment", "nexus-inventory", "nexus-test-interface", "nexus-test-utils", @@ -4808,7 +4836,8 @@ dependencies = [ "regex", "reqwest", "ring 0.17.7", - "rustls", + "rustls 0.22.2", + "rustls-pemfile 2.0.0", "samael", "schemars", "semver 1.0.21", @@ -4821,6 +4850,7 @@ dependencies = [ "slog", "slog-async", "slog-dtrace", + "slog-error-chain", "slog-term", "sp-sim", "steno", @@ -4873,6 +4903,7 @@ dependencies = [ "serde_json", "sled-agent-client", "slog", + "slog-error-chain", "strum", "subprocess", "tabled", @@ -5050,7 +5081,7 @@ dependencies = [ "regex", "reqwest", "ring 0.17.7", - "rustls", + "rustls 0.22.2", "slog", "subprocess", "tar", @@ -5111,7 +5142,6 @@ dependencies = [ "hex", "hmac", "hyper", - "hyper-rustls", "indexmap 2.1.0", "inout", "ipnetwork", @@ -5143,7 +5173,7 @@ dependencies = [ "regex-syntax 0.8.2", "reqwest", "ring 0.17.7", - "rustix 0.38.25", + "rustix 0.38.30", "schemars", "semver 1.0.21", "serde", @@ -6753,7 +6783,7 @@ dependencies = [ "http 0.2.11", "http-body", "hyper", - "hyper-rustls", + "hyper-rustls 0.24.2", "hyper-tls", "ipnet", "js-sys", @@ -6763,7 +6793,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls", + "rustls 0.21.9", "rustls-pemfile 1.0.3", "serde", "serde_json", @@ -6771,7 +6801,7 @@ dependencies = [ "system-configuration", "tokio", "tokio-native-tls", - "tokio-rustls", + "tokio-rustls 0.24.1", "tokio-util", "tower-service", "url", @@ -7072,15 +7102,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.25" +version = "0.38.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e" +checksum = "322394588aaf33c24007e8bb3238ee3e4c5c09c084ab32bc73890b99ff326bca" dependencies = [ "bitflags 2.4.0", "errno", "libc", - "linux-raw-sys 0.4.11", - "windows-sys 0.48.0", + "linux-raw-sys 0.4.13", + "windows-sys 0.52.0", ] [[package]] @@ -7091,18 +7121,33 @@ checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" dependencies = [ "log", "ring 0.17.7", - "rustls-webpki", + "rustls-webpki 0.101.7", "sct", ] +[[package]] +name = "rustls" +version = "0.22.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" +dependencies = [ + "log", + "ring 0.17.7", + "rustls-pki-types", + "rustls-webpki 0.102.1", + "subtle", + "zeroize", +] + [[package]] name = "rustls-native-certs" -version = "0.6.3" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" dependencies = [ "openssl-probe", - "rustls-pemfile 1.0.3", + "rustls-pemfile 2.0.0", + "rustls-pki-types", "schannel", "security-framework", ] @@ -7142,6 +7187,17 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "rustls-webpki" +version = "0.102.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef4ca26037c909dedb327b48c3327d0ba91d3dd3c4e05dad328f210ffb68e95b" +dependencies = [ + "ring 0.17.7", + "rustls-pki-types", + "untrusted 0.9.0", +] + [[package]] name = "rustversion" version = "1.0.14" @@ -8126,9 +8182,9 @@ dependencies = [ [[package]] name = "sqlparser" -version = "0.36.1" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eaa1e88e78d2c2460d78b7dc3f0c08dbb606ab4222f9aff36f420d36e307d87" +checksum = "5cc2c25a6c66789625ef164b4c7d2e548d627902280c13710d33da8222169964" dependencies = [ "log", "sqlparser_derive", @@ -8136,13 +8192,13 @@ dependencies = [ [[package]] name = "sqlparser_derive" -version = "0.1.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55fe75cb4a364c7f7ae06c7dbbc8d84bddd85d6cdf9975963c3935bc1991761e" +checksum = "01b2e185515564f15375f593fb966b5718bc624ba77fe49fa4616ad619690554" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.46", ] [[package]] @@ -8476,15 +8532,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.1" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" dependencies = [ "cfg-if", "fastrand", "redox_syscall 0.4.1", - "rustix 0.38.25", - "windows-sys 0.48.0", + "rustix 0.38.30", + "windows-sys 0.52.0", ] [[package]] @@ -8813,7 +8869,18 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls", + "rustls 0.21.9", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.2", + "rustls-pki-types", "tokio", ] diff --git a/Cargo.toml b/Cargo.toml index cb46c212e3..7821d0ddd8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,6 +42,7 @@ members = [ "nexus/db-model", "nexus/db-queries", "nexus/defaults", + "nexus/deployment", "nexus/inventory", "nexus/test-interface", "nexus/test-utils-macros", @@ -116,6 +117,7 @@ default-members = [ "nexus/db-model", "nexus/db-queries", "nexus/defaults", + "nexus/deployment", "nexus/inventory", "nexus/types", "oximeter/collector", @@ -219,7 +221,7 @@ httptest = "0.15.5" hubtools = { git = "https://github.com/oxidecomputer/hubtools.git", branch = "main" } humantime = "2.1.0" hyper = "0.14" -hyper-rustls = "0.24.2" +hyper-rustls = "0.25.0" hyper-staticfile = "0.9.5" illumos-utils = { path = "illumos-utils" } indexmap = "2.1.0" @@ -230,6 +232,7 @@ installinator-artifact-client = { path = "clients/installinator-artifact-client" installinator-common = { path = "installinator-common" } internal-dns = { path = "internal-dns" } ipcc = { path = "ipcc" } +ipnet = "2.9" ipnetwork = { version = "0.20", features = ["schemars"] } itertools = "0.12.0" key-manager = { path = "key-manager" } @@ -246,6 +249,7 @@ nexus-client = { path = "clients/nexus-client" } nexus-db-model = { path = "nexus/db-model" } nexus-db-queries = { path = "nexus/db-queries" } nexus-defaults = { path = "nexus/defaults" } +nexus-deployment = { path = "nexus/deployment" } nexus-inventory = { path = "nexus/inventory" } omicron-certificates = { path = "certificates" } omicron-passwords = { path = "passwords" } @@ -317,7 +321,8 @@ ring = "0.17.7" rpassword = "7.3.1" rstest = "0.18.2" rustfmt-wrapper = "0.2" -rustls = "0.21.9" +rustls = "0.22.2" +rustls-pemfile = "2.0.0" rustyline = "12.0.0" samael = { git = "https://github.com/njaremko/samael", features = ["xmlsec"], branch = "master" } schemars = "0.8.16" @@ -353,7 +358,7 @@ sp-sim = { path = "sp-sim" } sprockets-common = { git = "http://github.com/oxidecomputer/sprockets", rev = "77df31efa5619d0767ffc837ef7468101608aee9" } sprockets-host = { git = "http://github.com/oxidecomputer/sprockets", rev = "77df31efa5619d0767ffc837ef7468101608aee9" } sprockets-rot = { git = "http://github.com/oxidecomputer/sprockets", rev = "77df31efa5619d0767ffc837ef7468101608aee9" } -sqlparser = { version = "0.36.1", features = [ "visitor" ] } +sqlparser = { version = "0.41.0", features = [ "visitor" ] } static_assertions = "1.1.0" # Please do not change the Steno version to a Git dependency. It makes it # harder than expected to make breaking changes (even if you specify a specific @@ -368,7 +373,7 @@ syn = { version = "2.0" } tabled = "0.14" tar = "0.4" tempdir = "0.3" -tempfile = "3.8" +tempfile = "3.9" term = "0.7" termios = "0.3" textwrap = "0.16.0" diff --git a/clients/nexus-client/Cargo.toml b/clients/nexus-client/Cargo.toml index 2734142f9f..965e2a7dfb 100644 --- a/clients/nexus-client/Cargo.toml +++ b/clients/nexus-client/Cargo.toml @@ -8,6 +8,7 @@ license = "MPL-2.0" chrono.workspace = true futures.workspace = true ipnetwork.workspace = true +nexus-types.workspace = true omicron-common.workspace = true omicron-passwords.workspace = true progenitor.workspace = true diff --git a/clients/nexus-client/src/lib.rs b/clients/nexus-client/src/lib.rs index 3ecba7e710..1e1cbc31e7 100644 --- a/clients/nexus-client/src/lib.rs +++ b/clients/nexus-client/src/lib.rs @@ -22,6 +22,11 @@ progenitor::generate_api!( slog::debug!(log, "client response"; "result" => ?result); }), replace = { + // It's kind of unfortunate to pull in such a complex and unstable type + // as "blueprint" this way, but we have really useful functionality + // (e.g., diff'ing) that's implemented on our local type. + Blueprint = nexus_types::deployment::Blueprint, + Generation = omicron_common::api::external::Generation, Ipv4Network = ipnetwork::Ipv4Network, Ipv6Network = ipnetwork::Ipv6Network, IpNetwork = ipnetwork::IpNetwork, @@ -91,7 +96,7 @@ impl From ) -> Self { Self { dst_propolis_id: s.dst_propolis_id, - gen: s.gen.into(), + gen: s.gen, migration_id: s.migration_id, propolis_id: s.propolis_id, time_updated: s.time_updated, @@ -103,11 +108,7 @@ impl From for types::VmmRuntimeState { fn from(s: omicron_common::api::internal::nexus::VmmRuntimeState) -> Self { - Self { - gen: s.gen.into(), - state: s.state.into(), - time_updated: s.time_updated, - } + Self { gen: s.gen, state: s.state.into(), time_updated: s.time_updated } } } @@ -145,19 +146,13 @@ impl From } } -impl From for types::Generation { - fn from(s: omicron_common::api::external::Generation) -> Self { - Self(i64::from(&s) as u64) - } -} - impl From for types::DiskRuntimeState { fn from(s: omicron_common::api::internal::nexus::DiskRuntimeState) -> Self { Self { disk_state: s.disk_state.into(), - gen: s.gen.into(), + gen: s.gen, time_updated: s.time_updated, } } diff --git a/clients/sled-agent-client/src/lib.rs b/clients/sled-agent-client/src/lib.rs index ee2214c3c2..39de64ec62 100644 --- a/clients/sled-agent-client/src/lib.rs +++ b/clients/sled-agent-client/src/lib.rs @@ -27,12 +27,15 @@ progenitor::generate_api!( replace = { ByteCount = omicron_common::api::external::ByteCount, Generation = omicron_common::api::external::Generation, + MacAddr = omicron_common::api::external::MacAddr, Name = omicron_common::api::external::Name, SwitchLocation = omicron_common::api::external::SwitchLocation, Ipv6Network = ipnetwork::Ipv6Network, IpNetwork = ipnetwork::IpNetwork, PortFec = omicron_common::api::internal::shared::PortFec, PortSpeed = omicron_common::api::internal::shared::PortSpeed, + SourceNatConfig = omicron_common::api::internal::shared::SourceNatConfig, + Vni = omicron_common::api::external::Vni, } ); @@ -65,6 +68,24 @@ impl types::OmicronZoneType { types::OmicronZoneType::Oximeter { .. } => "oximeter", } } + + /// Identifies whether this is an NTP zone + pub fn is_ntp(&self) -> bool { + match self { + types::OmicronZoneType::BoundaryNtp { .. } + | types::OmicronZoneType::InternalNtp { .. } => true, + + types::OmicronZoneType::Clickhouse { .. } + | types::OmicronZoneType::ClickhouseKeeper { .. } + | types::OmicronZoneType::CockroachDb { .. } + | types::OmicronZoneType::Crucible { .. } + | types::OmicronZoneType::CruciblePantry { .. } + | types::OmicronZoneType::ExternalDns { .. } + | types::OmicronZoneType::InternalDns { .. } + | types::OmicronZoneType::Nexus { .. } + | types::OmicronZoneType::Oximeter { .. } => false, + } + } } impl omicron_common::api::external::ClientError for types::Error { @@ -243,31 +264,6 @@ impl From for omicron_common::api::external::DiskState { } } -impl From for types::Vni { - fn from(v: omicron_common::api::external::Vni) -> Self { - Self(u32::from(v)) - } -} - -impl From for omicron_common::api::external::Vni { - fn from(s: types::Vni) -> Self { - Self::try_from(s.0).unwrap() - } -} - -impl From for types::MacAddr { - fn from(s: omicron_common::api::external::MacAddr) -> Self { - Self::try_from(s.0.to_string()) - .unwrap_or_else(|e| panic!("{}: {}", s.0, e)) - } -} - -impl From for omicron_common::api::external::MacAddr { - fn from(s: types::MacAddr) -> Self { - s.parse().unwrap() - } -} - impl From for types::Ipv4Net { fn from(n: omicron_common::api::external::Ipv4Net) -> Self { Self::try_from(n.to_string()).unwrap_or_else(|e| panic!("{}: {}", n, e)) @@ -424,7 +420,7 @@ impl From use omicron_common::api::internal::nexus::HostIdentifier::*; match s { Ip(net) => Self::Ip(net.into()), - Vpc(vni) => Self::Vpc(vni.into()), + Vpc(vni) => Self::Vpc(vni), } } } @@ -505,23 +501,15 @@ impl From kind: s.kind.into(), name: s.name, ip: s.ip, - mac: s.mac.into(), + mac: s.mac, subnet: s.subnet.into(), - vni: s.vni.into(), + vni: s.vni, primary: s.primary, slot: s.slot, } } } -impl From - for types::SourceNatConfig -{ - fn from(s: omicron_common::api::internal::shared::SourceNatConfig) -> Self { - Self { ip: s.ip, first_port: s.first_port, last_port: s.last_port } - } -} - /// Exposes additional [`Client`] interfaces for use by the test suite. These /// are bonus endpoints, not generated in the real client. #[async_trait] diff --git a/common/src/address.rs b/common/src/address.rs index 78eaee0bb4..0c8df33868 100644 --- a/common/src/address.rs +++ b/common/src/address.rs @@ -168,6 +168,15 @@ pub const RSS_RESERVED_ADDRESSES: u16 = 32; // The maximum number of addresses per sled reserved for control plane services. pub const CP_SERVICES_RESERVED_ADDRESSES: u16 = 0xFFFF; +// Number of addresses reserved (by the Nexus deployment planner) for allocation +// by the sled itself. This is currently used for the first two addresses of +// the sled subnet, which are used for the sled global zone and the switch zone, +// if any. Note that RSS does not honor this yet (in fact, per the above +// RSS_RESERVED_ADDRESSES, it will _only_ choose from this range). And +// historically, systems did not have this reservation at all. So it's not safe +// to assume that addresses in this subnet are available. +pub const SLED_RESERVED_ADDRESSES: u16 = 32; + /// Wraps an [`Ipv6Network`] with a compile-time prefix length. #[derive(Debug, Clone, Copy, JsonSchema, Serialize, Hash, PartialEq, Eq)] #[schemars(rename = "Ipv6Subnet")] @@ -279,6 +288,19 @@ impl ReservedRackSubnet { } } +/// Return the list of DNS servers for the rack, given any address in the AZ +/// subnet +pub fn get_internal_dns_server_addresses(addr: Ipv6Addr) -> Vec { + let az_subnet = Ipv6Subnet::::new(addr); + let reserved_rack_subnet = ReservedRackSubnet::new(az_subnet); + let dns_subnets = + &reserved_rack_subnet.get_dns_subnets()[0..DNS_REDUNDANCY]; + dns_subnets + .iter() + .map(|dns_subnet| IpAddr::from(dns_subnet.dns_address().ip())) + .collect() +} + const SLED_AGENT_ADDRESS_INDEX: usize = 1; const SWITCH_ZONE_ADDRESS_INDEX: usize = 2; diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 899f15a04b..68fcb0f9fa 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -717,6 +717,7 @@ pub enum ResourceType { BackgroundTask, BgpConfig, BgpAnnounceSet, + Blueprint, Fleet, Silo, SiloUser, diff --git a/dev-tools/omdb/Cargo.toml b/dev-tools/omdb/Cargo.toml index 7544374906..e08d5f9477 100644 --- a/dev-tools/omdb/Cargo.toml +++ b/dev-tools/omdb/Cargo.toml @@ -33,6 +33,7 @@ serde.workspace = true serde_json.workspace = true sled-agent-client.workspace = true slog.workspace = true +slog-error-chain.workspace = true strum.workspace = true tabled.workspace = true textwrap.workspace = true diff --git a/dev-tools/omdb/src/bin/omdb/nexus.rs b/dev-tools/omdb/src/bin/omdb/nexus.rs index df5248b52d..fef069d536 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus.rs @@ -11,11 +11,13 @@ use chrono::SecondsFormat; use chrono::Utc; use clap::Args; use clap::Subcommand; +use futures::TryStreamExt; use nexus_client::types::ActivationReason; use nexus_client::types::BackgroundTask; use nexus_client::types::CurrentStatus; use nexus_client::types::LastResult; use serde::Deserialize; +use slog_error_chain::InlineErrorChain; use std::collections::BTreeMap; use tabled::Tabled; use uuid::Uuid; @@ -36,6 +38,8 @@ pub struct NexusArgs { enum NexusCommands { /// print information about background tasks BackgroundTasks(BackgroundTasksArgs), + /// print information about blueprints + Blueprints(BlueprintsArgs), } #[derive(Debug, Args)] @@ -54,6 +58,64 @@ enum BackgroundTasksCommands { Show, } +#[derive(Debug, Args)] +struct BlueprintsArgs { + #[command(subcommand)] + command: BlueprintsCommands, +} + +#[derive(Debug, Subcommand)] +enum BlueprintsCommands { + /// List all blueprints + List, + /// Show a blueprint + Show(BlueprintIdArgs), + /// Diff two blueprint + Diff(BlueprintIdsArgs), + /// Delete a blueprint + Delete(BlueprintIdArgs), + /// Set the current target blueprint + Target(BlueprintsTargetArgs), + /// Generate an initial blueprint from a specific inventory collection + GenerateFromCollection(CollectionIdArgs), + /// Generate a new blueprint + Regenerate, +} + +#[derive(Debug, Args)] +struct BlueprintIdArgs { + /// id of a blueprint + blueprint_id: Uuid, +} + +#[derive(Debug, Args)] +struct BlueprintIdsArgs { + /// id of first blueprint + blueprint1_id: Uuid, + /// id of second blueprint + blueprint2_id: Uuid, +} + +#[derive(Debug, Args)] +struct CollectionIdArgs { + /// id of an inventory collection + collection_id: Uuid, +} + +#[derive(Debug, Args)] +struct BlueprintsTargetArgs { + #[command(subcommand)] + command: BlueprintTargetCommands, +} + +#[derive(Debug, Subcommand)] +enum BlueprintTargetCommands { + /// Show the current target blueprint + Show, + /// Change the current target blueprint + Set(BlueprintIdArgs), +} + impl NexusArgs { /// Run a `omdb nexus` subcommand. pub(crate) async fn run_cmd( @@ -93,6 +155,40 @@ impl NexusArgs { NexusCommands::BackgroundTasks(BackgroundTasksArgs { command: BackgroundTasksCommands::Show, }) => cmd_nexus_background_tasks_show(&client).await, + + NexusCommands::Blueprints(BlueprintsArgs { + command: BlueprintsCommands::List, + }) => cmd_nexus_blueprints_list(&client).await, + NexusCommands::Blueprints(BlueprintsArgs { + command: BlueprintsCommands::Show(args), + }) => cmd_nexus_blueprints_show(&client, args).await, + NexusCommands::Blueprints(BlueprintsArgs { + command: BlueprintsCommands::Diff(args), + }) => cmd_nexus_blueprints_diff(&client, args).await, + NexusCommands::Blueprints(BlueprintsArgs { + command: BlueprintsCommands::Delete(args), + }) => cmd_nexus_blueprints_delete(&client, args).await, + NexusCommands::Blueprints(BlueprintsArgs { + command: + BlueprintsCommands::Target(BlueprintsTargetArgs { + command: BlueprintTargetCommands::Show, + }), + }) => cmd_nexus_blueprints_target_show(&client).await, + NexusCommands::Blueprints(BlueprintsArgs { + command: + BlueprintsCommands::Target(BlueprintsTargetArgs { + command: BlueprintTargetCommands::Set(args), + }), + }) => cmd_nexus_blueprints_target_set(&client, args).await, + NexusCommands::Blueprints(BlueprintsArgs { + command: BlueprintsCommands::Regenerate, + }) => cmd_nexus_blueprints_regenerate(&client).await, + NexusCommands::Blueprints(BlueprintsArgs { + command: BlueprintsCommands::GenerateFromCollection(args), + }) => { + cmd_nexus_blueprints_generate_from_collection(&client, args) + .await + } } } } @@ -629,3 +725,196 @@ fn reason_code(reason: ActivationReason) -> char { ActivationReason::Timeout => 'T', } } + +async fn cmd_nexus_blueprints_list( + client: &nexus_client::Client, +) -> Result<(), anyhow::Error> { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct BlueprintRow { + #[tabled(rename = "T")] + is_target: &'static str, + id: String, + parent: String, + time_created: String, + } + + let target_id = match client.blueprint_target_view().await { + Ok(result) => Some(result.into_inner().target_id), + Err(error) => { + // This request will fail if there's no target configured, so it's + // not necessarily a big deal. + eprintln!( + "warn: failed to fetch current target: {}", + InlineErrorChain::new(&error), + ); + None + } + }; + + let rows: Vec = client + .blueprint_list_stream(None, None) + .try_collect::>() + .await + .context("listing blueprints")? + .into_iter() + .map(|blueprint| { + let is_target = match target_id { + Some(target_id) if target_id == blueprint.id => "*", + _ => "", + }; + + BlueprintRow { + is_target, + id: blueprint.id.to_string(), + parent: blueprint + .parent_blueprint_id + .map(|s| s.to_string()) + .unwrap_or_else(|| String::from("")), + time_created: humantime::format_rfc3339_millis( + blueprint.time_created.into(), + ) + .to_string(), + } + }) + .collect(); + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("{}", table); + Ok(()) +} + +async fn cmd_nexus_blueprints_show( + client: &nexus_client::Client, + args: &BlueprintIdArgs, +) -> Result<(), anyhow::Error> { + let blueprint = client + .blueprint_view(&args.blueprint_id) + .await + .with_context(|| format!("fetching blueprint {}", args.blueprint_id))?; + println!("blueprint {}", blueprint.id); + println!( + "parent: {}", + blueprint + .parent_blueprint_id + .map(|u| u.to_string()) + .unwrap_or_else(|| String::from("")) + ); + println!( + "created by {}{}", + blueprint.creator, + if blueprint.creator.parse::().is_ok() { + " (likely a Nexus instance)" + } else { + "" + } + ); + println!( + "created at {}", + humantime::format_rfc3339_millis(blueprint.time_created.into(),) + ); + println!("comment: {}", blueprint.comment); + println!("zones:\n"); + for (sled_id, sled_zones) in &blueprint.omicron_zones { + println!( + " sled {}: Omicron zones at generation {}", + sled_id, sled_zones.generation + ); + for z in &sled_zones.zones { + println!(" {} {}", z.id, z.zone_type.label()); + } + } + + Ok(()) +} + +async fn cmd_nexus_blueprints_diff( + client: &nexus_client::Client, + args: &BlueprintIdsArgs, +) -> Result<(), anyhow::Error> { + let b1 = client.blueprint_view(&args.blueprint1_id).await.with_context( + || format!("fetching blueprint {}", args.blueprint1_id), + )?; + let b2 = client.blueprint_view(&args.blueprint2_id).await.with_context( + || format!("fetching blueprint {}", args.blueprint2_id), + )?; + println!("{}", b1.diff(&b2)); + Ok(()) +} + +async fn cmd_nexus_blueprints_delete( + client: &nexus_client::Client, + args: &BlueprintIdArgs, +) -> Result<(), anyhow::Error> { + let _ = client + .blueprint_delete(&args.blueprint_id) + .await + .with_context(|| format!("deleting blueprint {}", args.blueprint_id))?; + println!("blueprint {} deleted", args.blueprint_id); + Ok(()) +} + +async fn cmd_nexus_blueprints_target_show( + client: &nexus_client::Client, +) -> Result<(), anyhow::Error> { + let target = client + .blueprint_target_view() + .await + .context("fetching target blueprint")?; + println!("target blueprint: {}", target.target_id); + println!("set at: {}", target.time_set); + println!("enabled: {}", target.enabled); + Ok(()) +} + +async fn cmd_nexus_blueprints_target_set( + client: &nexus_client::Client, + args: &BlueprintIdArgs, +) -> Result<(), anyhow::Error> { + // Try to preserve the value of "enabled", if possible. + let enabled = client + .blueprint_target_view() + .await + .map(|current| current.into_inner().enabled) + .unwrap_or(true); + client + .blueprint_target_set(&nexus_client::types::BlueprintTargetSet { + target_id: args.blueprint_id, + enabled, + }) + .await + .with_context(|| { + format!("setting target to blueprint {}", args.blueprint_id) + })?; + eprintln!("set target blueprint to {}", args.blueprint_id); + Ok(()) +} + +async fn cmd_nexus_blueprints_generate_from_collection( + client: &nexus_client::Client, + args: &CollectionIdArgs, +) -> Result<(), anyhow::Error> { + let blueprint = client + .blueprint_generate_from_collection( + &nexus_client::types::CollectionId { + collection_id: args.collection_id, + }, + ) + .await + .context("creating blueprint from collection id")?; + eprintln!("created blueprint {} from collection id", blueprint.id); + Ok(()) +} + +async fn cmd_nexus_blueprints_regenerate( + client: &nexus_client::Client, +) -> Result<(), anyhow::Error> { + let blueprint = + client.blueprint_regenerate().await.context("generating blueprint")?; + eprintln!("generated new blueprint {}", blueprint.id); + Ok(()) +} diff --git a/dev-tools/omdb/tests/usage_errors.out b/dev-tools/omdb/tests/usage_errors.out index 3c5f099c61..2790b0ef83 100644 --- a/dev-tools/omdb/tests/usage_errors.out +++ b/dev-tools/omdb/tests/usage_errors.out @@ -289,6 +289,7 @@ Usage: omdb nexus [OPTIONS] Commands: background-tasks print information about background tasks + blueprints print information about blueprints help Print this message or the help of the given subcommand(s) Options: diff --git a/internal-dns/src/config.rs b/internal-dns/src/config.rs index 92f37f6124..bf1d9b763b 100644 --- a/internal-dns/src/config.rs +++ b/internal-dns/src/config.rs @@ -83,7 +83,7 @@ pub enum ZoneVariant { /// Used to construct the DNS name for a control plane host #[derive(Clone, Debug, PartialEq, PartialOrd)] -enum Host { +pub enum Host { /// Used to construct an AAAA record for a sled. Sled(Uuid), @@ -92,6 +92,10 @@ enum Host { } impl Host { + pub fn for_zone(id: Uuid, variant: ZoneVariant) -> Host { + Host::Zone { id, variant } + } + /// Returns the DNS name for this host, ignoring the zone part of the DNS /// name pub(crate) fn dns_name(&self) -> String { @@ -105,6 +109,12 @@ impl Host { } } } + + /// Returns the full-qualified DNS name, including the zone name of the + /// control plane's internal DNS zone + pub fn fqdn(&self) -> String { + format!("{}.{}", self.dns_name(), DNS_ZONE) + } } /// Builder for assembling DNS data for the control plane's DNS zone @@ -168,8 +178,12 @@ pub struct Zone { } impl Zone { + pub(crate) fn to_host(&self) -> Host { + Host::Zone { id: self.id, variant: self.variant } + } + pub(crate) fn dns_name(&self) -> String { - Host::Zone { id: self.id, variant: self.variant }.dns_name() + self.to_host().dns_name() } } @@ -393,7 +407,7 @@ impl DnsConfigBuilder { prio: 0, weight: 0, port, - target: format!("{}.{}", zone.dns_name(), DNS_ZONE), + target: zone.to_host().fqdn(), }) }) .collect(); @@ -412,11 +426,7 @@ impl DnsConfigBuilder { prio: 0, weight: 0, port, - target: format!( - "{}.{}", - Host::Sled(sled.0).dns_name(), - DNS_ZONE - ), + target: Host::Sled(sled.0).fqdn(), }) }) .collect(); diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index c50f482be4..52ee7034dd 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -30,6 +30,7 @@ headers.workspace = true hex.workspace = true http.workspace = true hyper.workspace = true +illumos-utils.workspace = true internal-dns.workspace = true ipnetwork.workspace = true macaddr.workspace = true @@ -63,6 +64,7 @@ sled-agent-client.workspace = true slog.workspace = true slog-async.workspace = true slog-dtrace.workspace = true +slog-error-chain.workspace = true slog-term.workspace = true steno.workspace = true tempfile.workspace = true @@ -76,6 +78,7 @@ uuid.workspace = true nexus-defaults.workspace = true nexus-db-model.workspace = true nexus-db-queries.workspace = true +nexus-deployment.workspace = true nexus-inventory.workspace = true nexus-types.workspace = true omicron-common.workspace = true @@ -84,6 +87,7 @@ oximeter.workspace = true oximeter-instruments = { workspace = true, features = ["http-instruments"] } oximeter-producer.workspace = true rustls = { workspace = true } +rustls-pemfile = { workspace = true } omicron-workspace-hack.workspace = true [dev-dependencies] diff --git a/nexus/db-model/src/external_ip.rs b/nexus/db-model/src/external_ip.rs index 6b3f8d5110..e95185658f 100644 --- a/nexus/db-model/src/external_ip.rs +++ b/nexus/db-model/src/external_ip.rs @@ -100,7 +100,9 @@ pub struct FloatingIp { pub project_id: Uuid, } -impl From for sled_agent_client::types::SourceNatConfig { +impl From + for omicron_common::api::internal::shared::SourceNatConfig +{ fn from(eip: ExternalIp) -> Self { Self { ip: eip.ip.ip(), diff --git a/nexus/db-model/src/inventory.rs b/nexus/db-model/src/inventory.rs index 4e3e5fad56..17d74be0aa 100644 --- a/nexus/db-model/src/inventory.rs +++ b/nexus/db-model/src/inventory.rs @@ -942,7 +942,7 @@ impl InvOmicronZone { let nic = match (self.nic_id, nic_row) { (Some(expected_id), Some(nic_row)) => { ensure!(expected_id == nic_row.id, "caller provided wrong NIC"); - Ok(nic_row.into_network_interface_for_zone(self.id)) + Ok(nic_row.into_network_interface_for_zone(self.id)?) } (None, None) => Err(anyhow!( "expected zone to have an associated NIC, but it doesn't" @@ -1125,13 +1125,9 @@ impl InvOmicronZoneNic { id: nic.id, name: Name::from(nic.name.clone()), ip: IpNetwork::from(nic.ip), - mac: MacAddr::from( - omicron_common::api::external::MacAddr::from( - nic.mac.clone(), - ), - ), + mac: MacAddr::from(nic.mac), subnet: IpNetwork::from(nic.subnet.clone()), - vni: SqlU32::from(nic.vni.0), + vni: SqlU32::from(u32::from(nic.vni)), is_primary: nic.primary, slot: SqlU8::from(nic.slot), })) @@ -1143,19 +1139,20 @@ impl InvOmicronZoneNic { pub fn into_network_interface_for_zone( self, zone_id: Uuid, - ) -> nexus_types::inventory::NetworkInterface { - nexus_types::inventory::NetworkInterface { + ) -> Result { + Ok(nexus_types::inventory::NetworkInterface { id: self.id, ip: self.ip.ip(), kind: nexus_types::inventory::NetworkInterfaceKind::Service( zone_id, ), - mac: (*self.mac).into(), + mac: *self.mac, name: self.name.into(), primary: self.is_primary, slot: *self.slot, - vni: nexus_types::inventory::Vni::from(*self.vni), + vni: omicron_common::api::external::Vni::try_from(*self.vni) + .context("parsing VNI")?, subnet: self.subnet.into(), - } + }) } } diff --git a/nexus/db-model/src/ipv4net.rs b/nexus/db-model/src/ipv4net.rs index cc4af0461e..abd5d6a3aa 100644 --- a/nexus/db-model/src/ipv4net.rs +++ b/nexus/db-model/src/ipv4net.rs @@ -2,6 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +use crate::vpc_subnet::RequestAddressError; use diesel::backend::Backend; use diesel::deserialize::{self, FromSql}; use diesel::pg::Pg; @@ -32,18 +33,30 @@ NewtypeDeref! { () pub struct Ipv4Net(external::Ipv4Net); } impl Ipv4Net { /// Check if an address is a valid user-requestable address for this subnet - pub fn check_requestable_addr(&self, addr: Ipv4Addr) -> bool { - self.contains(addr) - && ( - // First N addresses are reserved - self.iter() - .take(NUM_INITIAL_RESERVED_IP_ADDRESSES) - .all(|this| this != addr) - ) - && ( - // Last address in the subnet is reserved - addr != self.broadcast() - ) + pub fn check_requestable_addr( + &self, + addr: Ipv4Addr, + ) -> Result<(), RequestAddressError> { + if !self.contains(addr) { + return Err(RequestAddressError::OutsideSubnet( + addr.into(), + self.0 .0.into(), + )); + } + // Only the first N addresses are reserved + if self + .iter() + .take(NUM_INITIAL_RESERVED_IP_ADDRESSES) + .any(|this| this == addr) + { + return Err(RequestAddressError::Reserved); + } + // Last address in the subnet is reserved + if addr == self.broadcast() { + return Err(RequestAddressError::Broadcast); + } + + Ok(()) } } diff --git a/nexus/db-model/src/ipv6net.rs b/nexus/db-model/src/ipv6net.rs index 1297844761..3954b4145e 100644 --- a/nexus/db-model/src/ipv6net.rs +++ b/nexus/db-model/src/ipv6net.rs @@ -15,6 +15,8 @@ use serde::Deserialize; use serde::Serialize; use std::net::Ipv6Addr; +use crate::RequestAddressError; + #[derive( Clone, Copy, @@ -83,13 +85,25 @@ impl Ipv6Net { } /// Check if an address is a valid user-requestable address for this subnet - pub fn check_requestable_addr(&self, addr: Ipv6Addr) -> bool { + pub fn check_requestable_addr( + &self, + addr: Ipv6Addr, + ) -> Result<(), RequestAddressError> { + if !self.contains(addr) { + return Err(RequestAddressError::OutsideSubnet( + addr.into(), + self.0 .0.into(), + )); + } // Only the first N addresses are reserved - self.contains(addr) - && self - .iter() - .take(NUM_INITIAL_RESERVED_IP_ADDRESSES) - .all(|this| this != addr) + if self + .iter() + .take(NUM_INITIAL_RESERVED_IP_ADDRESSES) + .any(|this| this == addr) + { + return Err(RequestAddressError::Reserved); + } + Ok(()) } } diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index 6b89e5a270..8fdf05e876 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -407,10 +407,13 @@ impl DatabaseString for ProjectRole { #[cfg(test)] mod tests { + use crate::RequestAddressError; + use super::VpcSubnet; use ipnetwork::Ipv4Network; use ipnetwork::Ipv6Network; use omicron_common::api::external::IdentityMetadataCreateParams; + use omicron_common::api::external::IpNet; use omicron_common::api::external::Ipv4Net; use omicron_common::api::external::Ipv6Net; use std::net::IpAddr; @@ -515,18 +518,37 @@ mod tests { #[test] fn test_ip_subnet_check_requestable_address() { let subnet = super::Ipv4Net(Ipv4Net("192.168.0.0/16".parse().unwrap())); - assert!(subnet.check_requestable_addr("192.168.0.10".parse().unwrap())); - assert!(subnet.check_requestable_addr("192.168.1.0".parse().unwrap())); - assert!(!subnet.check_requestable_addr("192.168.0.0".parse().unwrap())); - assert!(subnet.check_requestable_addr("192.168.0.255".parse().unwrap())); - assert!( - !subnet.check_requestable_addr("192.168.255.255".parse().unwrap()) + subnet.check_requestable_addr("192.168.0.10".parse().unwrap()).unwrap(); + subnet.check_requestable_addr("192.168.1.0".parse().unwrap()).unwrap(); + let addr = "192.178.0.10".parse().unwrap(); + assert_eq!( + subnet.check_requestable_addr(addr), + Err(RequestAddressError::OutsideSubnet( + addr.into(), + IpNet::from(subnet.0).into() + )) + ); + assert_eq!( + subnet.check_requestable_addr("192.168.0.0".parse().unwrap()), + Err(RequestAddressError::Reserved) + ); + + subnet + .check_requestable_addr("192.168.0.255".parse().unwrap()) + .unwrap(); + + assert_eq!( + subnet.check_requestable_addr("192.168.255.255".parse().unwrap()), + Err(RequestAddressError::Broadcast) ); let subnet = super::Ipv6Net(Ipv6Net("fd00::/64".parse().unwrap())); - assert!(subnet.check_requestable_addr("fd00::a".parse().unwrap())); - assert!(!subnet.check_requestable_addr("fd00::1".parse().unwrap())); - assert!(subnet.check_requestable_addr("fd00::1:1".parse().unwrap())); + subnet.check_requestable_addr("fd00::a".parse().unwrap()).unwrap(); + assert_eq!( + subnet.check_requestable_addr("fd00::1".parse().unwrap()), + Err(RequestAddressError::Reserved) + ); + subnet.check_requestable_addr("fd00::1:1".parse().unwrap()).unwrap(); } /// Does some basic smoke checks on an impl of `DatabaseString` diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index ed819cba80..68991f1d75 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -938,6 +938,11 @@ table! { } } +allow_tables_to_appear_in_same_query! { + zpool, + physical_disk +} + table! { dataset (id) { id -> Uuid, diff --git a/nexus/db-model/src/vpc_subnet.rs b/nexus/db-model/src/vpc_subnet.rs index 2cc74c177b..99f2c5e3ac 100644 --- a/nexus/db-model/src/vpc_subnet.rs +++ b/nexus/db-model/src/vpc_subnet.rs @@ -14,6 +14,7 @@ use nexus_types::external_api::params; use nexus_types::external_api::views; use nexus_types::identity::Resource; use omicron_common::api::external; +use omicron_common::nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use serde::Deserialize; use serde::Serialize; use std::net::IpAddr; @@ -73,27 +74,27 @@ impl VpcSubnet { &self, addr: IpAddr, ) -> Result<(), external::Error> { - let subnet = match addr { - IpAddr::V4(addr) => { - if self.ipv4_block.check_requestable_addr(addr) { - return Ok(()); - } - ipnetwork::IpNetwork::V4(self.ipv4_block.0 .0) - } - IpAddr::V6(addr) => { - if self.ipv6_block.check_requestable_addr(addr) { - return Ok(()); - } - ipnetwork::IpNetwork::V6(self.ipv6_block.0 .0) - } - }; - Err(external::Error::invalid_request(&format!( - "Address '{}' not in subnet '{}' or is reserved for rack services", - addr, subnet, - ))) + match addr { + IpAddr::V4(addr) => self.ipv4_block.check_requestable_addr(addr), + IpAddr::V6(addr) => self.ipv6_block.check_requestable_addr(addr), + } + .map_err(|e| external::Error::invalid_request(e.to_string())) } } +#[derive(thiserror::Error, Debug, PartialEq)] +pub enum RequestAddressError { + #[error("{} is outside subnet {}", .0, .1)] + OutsideSubnet(IpAddr, ipnetwork::IpNetwork), + #[error( + "The first {} addresses of a subnet are reserved", + NUM_INITIAL_RESERVED_IP_ADDRESSES + )] + Reserved, + #[error("Cannot request a broadcast address")] + Broadcast, +} + impl From for views::VpcSubnet { fn from(subnet: VpcSubnet) -> Self { Self { diff --git a/nexus/db-queries/src/authz/api_resources.rs b/nexus/db-queries/src/authz/api_resources.rs index 8485b8f11f..444a00d5ad 100644 --- a/nexus/db-queries/src/authz/api_resources.rs +++ b/nexus/db-queries/src/authz/api_resources.rs @@ -250,6 +250,57 @@ impl ApiResourceWithRolesType for Fleet { // TODO: refactor synthetic resources below +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct BlueprintConfig; + +pub const BLUEPRINT_CONFIG: BlueprintConfig = BlueprintConfig; + +impl oso::PolarClass for BlueprintConfig { + fn get_polar_class_builder() -> oso::ClassBuilder { + oso::Class::builder() + .with_equality_check() + .add_attribute_getter("fleet", |_: &BlueprintConfig| FLEET) + } +} + +impl AuthorizedResource for BlueprintConfig { + fn load_roles<'a, 'b, 'c, 'd, 'e, 'f>( + &'a self, + opctx: &'b OpContext, + datastore: &'c DataStore, + authn: &'d authn::Context, + roleset: &'e mut RoleSet, + ) -> futures::future::BoxFuture<'f, Result<(), Error>> + where + 'a: 'f, + 'b: 'f, + 'c: 'f, + 'd: 'f, + 'e: 'f, + { + // There are no roles on the BlueprintConfig, only permissions. But we + // still need to load the Fleet-related roles to verify that the actor + // has the "admin" role on the Fleet (possibly conferred from a Silo + // role). + load_roles_for_resource_tree(&FLEET, opctx, datastore, authn, roleset) + .boxed() + } + + fn on_unauthorized( + &self, + _: &Authz, + error: Error, + _: AnyActor, + _: Action, + ) -> Error { + error + } + + fn polar_class(&self) -> oso::Class { + Self::get_polar_class() + } +} + /// ConsoleSessionList is a synthetic resource used for modeling who has access /// to create sessions. #[derive(Clone, Copy, Debug, Eq, PartialEq)] @@ -842,6 +893,14 @@ authz_resource! { // Miscellaneous resources nested directly below "Fleet" +authz_resource! { + name = "Blueprint", + parent = "Fleet", + primary_key = Uuid, + roles_allowed = false, + polar_snippet = FleetChild, +} + authz_resource! { name = "ConsoleSession", parent = "Fleet", diff --git a/nexus/db-queries/src/authz/omicron.polar b/nexus/db-queries/src/authz/omicron.polar index 87fdf72f6a..f9382401fd 100644 --- a/nexus/db-queries/src/authz/omicron.polar +++ b/nexus/db-queries/src/authz/omicron.polar @@ -365,6 +365,24 @@ resource DnsConfig { has_relation(fleet: Fleet, "parent_fleet", dns_config: DnsConfig) if dns_config.fleet = fleet; +# Describes the policy for accessing blueprints +resource BlueprintConfig { + permissions = [ + "list_children", # list blueprints + "create_child", # create blueprint + "read", # read the current target + "modify", # change the current target + ]; + + relations = { parent_fleet: Fleet }; + "create_child" if "admin" on "parent_fleet"; + "modify" if "admin" on "parent_fleet"; + "list_children" if "viewer" on "parent_fleet"; + "read" if "viewer" on "parent_fleet"; +} +has_relation(fleet: Fleet, "parent_fleet", list: BlueprintConfig) + if list.fleet = fleet; + # Describes the policy for reading and modifying low-level inventory resource Inventory { permissions = [ "read", "modify" ]; diff --git a/nexus/db-queries/src/authz/oso_generic.rs b/nexus/db-queries/src/authz/oso_generic.rs index 6098379287..9b842216b4 100644 --- a/nexus/db-queries/src/authz/oso_generic.rs +++ b/nexus/db-queries/src/authz/oso_generic.rs @@ -103,6 +103,7 @@ pub fn make_omicron_oso(log: &slog::Logger) -> Result { Action::get_polar_class(), AnyActor::get_polar_class(), AuthenticatedActor::get_polar_class(), + BlueprintConfig::get_polar_class(), Database::get_polar_class(), DnsConfig::get_polar_class(), Fleet::get_polar_class(), @@ -137,6 +138,7 @@ pub fn make_omicron_oso(log: &slog::Logger) -> Result { SiloImage::init(), // Fleet-level resources AddressLot::init(), + Blueprint::init(), LoopbackAddress::init(), Certificate::init(), ConsoleSession::init(), diff --git a/nexus/db-queries/src/authz/policy_test/resource_builder.rs b/nexus/db-queries/src/authz/policy_test/resource_builder.rs index f10c969038..dc18b2e47f 100644 --- a/nexus/db-queries/src/authz/policy_test/resource_builder.rs +++ b/nexus/db-queries/src/authz/policy_test/resource_builder.rs @@ -243,6 +243,7 @@ macro_rules! impl_dyn_authorized_resource_for_global { } impl_dyn_authorized_resource_for_global!(authz::oso_generic::Database); +impl_dyn_authorized_resource_for_global!(authz::BlueprintConfig); impl_dyn_authorized_resource_for_global!(authz::ConsoleSessionList); impl_dyn_authorized_resource_for_global!(authz::DeviceAuthRequestList); impl_dyn_authorized_resource_for_global!(authz::DnsConfig); diff --git a/nexus/db-queries/src/authz/policy_test/resources.rs b/nexus/db-queries/src/authz/policy_test/resources.rs index 8bdd97923b..9cc4e28790 100644 --- a/nexus/db-queries/src/authz/policy_test/resources.rs +++ b/nexus/db-queries/src/authz/policy_test/resources.rs @@ -64,6 +64,7 @@ pub async fn make_resources( // Global resources builder.new_resource(authz::DATABASE); builder.new_resource_with_users(authz::FLEET).await; + builder.new_resource(authz::BLUEPRINT_CONFIG); builder.new_resource(authz::CONSOLE_SESSION_LIST); builder.new_resource(authz::DNS_CONFIG); builder.new_resource(authz::DEVICE_AUTH_REQUEST_LIST); @@ -118,6 +119,13 @@ pub async fn make_resources( LookupType::ByName(device_access_token), )); + let blueprint_id = "b9e923f6-caf3-4c83-96f9-8ffe8c627dd2".parse().unwrap(); + builder.new_resource(authz::Blueprint::new( + authz::FLEET, + blueprint_id, + LookupType::ById(blueprint_id), + )); + let system_update_id = "9c86d713-1bc2-4927-9892-ada3eb6f5f62".parse().unwrap(); builder.new_resource(authz::SystemUpdate::new( diff --git a/nexus/db-queries/src/db/datastore/inventory.rs b/nexus/db-queries/src/db/datastore/inventory.rs index b7ff058234..bdacb0e7b9 100644 --- a/nexus/db-queries/src/db/datastore/inventory.rs +++ b/nexus/db-queries/src/db/datastore/inventory.rs @@ -1231,7 +1231,7 @@ impl DataStore { /// Attempt to read the given collection while limiting queries to `limit` /// records and returning nothing if `limit` is not large enough. - async fn inventory_collection_read_all_or_nothing( + pub async fn inventory_collection_read_all_or_nothing( &self, opctx: &OpContext, id: Uuid, diff --git a/nexus/db-queries/src/db/datastore/network_interface.rs b/nexus/db-queries/src/db/datastore/network_interface.rs index be12ea5231..d715bf3889 100644 --- a/nexus/db-queries/src/db/datastore/network_interface.rs +++ b/nexus/db-queries/src/db/datastore/network_interface.rs @@ -78,9 +78,9 @@ impl From for sled_client_types::NetworkInterface { kind, name: nic.name.into(), ip: nic.ip.ip(), - mac: sled_client_types::MacAddr::from(nic.mac.0), + mac: nic.mac.0, subnet: sled_client_types::IpNet::from(ip_subnet), - vni: sled_client_types::Vni::from(nic.vni.0), + vni: nic.vni.0, primary: nic.primary, slot: u8::try_from(nic.slot).unwrap(), } diff --git a/nexus/db-queries/src/db/datastore/zpool.rs b/nexus/db-queries/src/db/datastore/zpool.rs index 5d6c0844ef..79e5f5a55a 100644 --- a/nexus/db-queries/src/db/datastore/zpool.rs +++ b/nexus/db-queries/src/db/datastore/zpool.rs @@ -5,21 +5,29 @@ //! [`DataStore`] methods on [`Zpool`]s. use super::DataStore; +use crate::authz; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; +use crate::db::datastore::OpContext; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Asset; use crate::db::model::Sled; use crate::db::model::Zpool; +use crate::db::pagination::paginated; +use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; use diesel::upsert::excluded; +use nexus_db_model::PhysicalDiskKind; use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; +use uuid::Uuid; impl DataStore { /// Stores a new zpool in the database. @@ -57,4 +65,29 @@ impl DataStore { ), }) } + + /// Paginates through all zpools on U.2 disks in all sleds + pub async fn zpool_list_all_external( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + + use db::schema::physical_disk::dsl as dsl_physical_disk; + use db::schema::zpool::dsl as dsl_zpool; + paginated(dsl_zpool::zpool, dsl_zpool::id, pagparams) + .filter(dsl_zpool::time_deleted.is_null()) + .inner_join( + db::schema::physical_disk::table.on( + dsl_zpool::physical_disk_id.eq(dsl_physical_disk::id).and( + dsl_physical_disk::variant.eq(PhysicalDiskKind::U2), + ), + ), + ) + .select(Zpool::as_select()) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } } diff --git a/nexus/db-queries/tests/output/authz-roles.out b/nexus/db-queries/tests/output/authz-roles.out index 54fb6481a9..26cc13fc6a 100644 --- a/nexus/db-queries/tests/output/authz-roles.out +++ b/nexus/db-queries/tests/output/authz-roles.out @@ -26,6 +26,20 @@ resource: Fleet id "001de000-1334-4000-8000-000000000000" silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ unauthenticated ! ! ! ! ! ! ! ! +resource: authz::BlueprintConfig + + USER Q R LC RP M MP CC D + fleet-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + fleet-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + unauthenticated ! ! ! ! ! ! ! ! + resource: authz::ConsoleSessionList USER Q R LC RP M MP CC D @@ -922,6 +936,20 @@ resource: DeviceAccessToken "a-device-access-token" silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ unauthenticated ! ! ! ! ! ! ! ! +resource: Blueprint id "b9e923f6-caf3-4c83-96f9-8ffe8c627dd2" + + USER Q R LC RP M MP CC D + fleet-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + fleet-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + unauthenticated ! ! ! ! ! ! ! ! + resource: SystemUpdate id "9c86d713-1bc2-4927-9892-ada3eb6f5f62" USER Q R LC RP M MP CC D diff --git a/nexus/deployment/Cargo.toml b/nexus/deployment/Cargo.toml new file mode 100644 index 0000000000..b166f947bf --- /dev/null +++ b/nexus/deployment/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "nexus-deployment" +version = "0.1.0" +edition = "2021" + +[dependencies] +anyhow.workspace = true +chrono.workspace = true +internal-dns.workspace = true +ipnet.workspace = true +ipnetwork.workspace = true +nexus-types.workspace = true +omicron-common.workspace = true +slog.workspace = true +thiserror.workspace = true +uuid.workspace = true + +omicron-workspace-hack.workspace = true + +[dev-dependencies] +nexus-inventory.workspace = true +omicron-test-utils.workspace = true +sled-agent-client.workspace = true diff --git a/nexus/deployment/src/blueprint_builder.rs b/nexus/deployment/src/blueprint_builder.rs new file mode 100644 index 0000000000..689e2d8e2c --- /dev/null +++ b/nexus/deployment/src/blueprint_builder.rs @@ -0,0 +1,683 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Low-level facility for generating Blueprints + +use crate::ip_allocator::IpAllocator; +use anyhow::anyhow; +use internal_dns::config::Host; +use internal_dns::config::ZoneVariant; +use ipnet::IpAdd; +use nexus_types::deployment::Blueprint; +use nexus_types::deployment::OmicronZoneConfig; +use nexus_types::deployment::OmicronZoneDataset; +use nexus_types::deployment::OmicronZoneType; +use nexus_types::deployment::OmicronZonesConfig; +use nexus_types::deployment::Policy; +use nexus_types::deployment::SledResources; +use nexus_types::deployment::ZpoolName; +use nexus_types::inventory::Collection; +use omicron_common::address::get_internal_dns_server_addresses; +use omicron_common::address::get_sled_address; +use omicron_common::address::get_switch_zone_address; +use omicron_common::address::CP_SERVICES_RESERVED_ADDRESSES; +use omicron_common::address::NTP_PORT; +use omicron_common::address::SLED_RESERVED_ADDRESSES; +use omicron_common::api::external::Generation; +use std::collections::BTreeMap; +use std::collections::BTreeSet; +use std::net::Ipv6Addr; +use std::net::SocketAddrV6; +use thiserror::Error; +use uuid::Uuid; + +/// Errors encountered while assembling blueprints +#[derive(Debug, Error)] +pub enum Error { + #[error("sled {sled_id}: ran out of available addresses for sled")] + OutOfAddresses { sled_id: Uuid }, + #[error("programming error in planner")] + Planner(#[from] anyhow::Error), +} + +/// Describes whether an idempotent "ensure" operation resulted in action taken +/// or no action was necessary +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum Ensure { + /// action was taken + Added, + /// no action was necessary + NotNeeded, +} + +/// Helper for assembling a blueprint +/// +/// There are two basic ways to assemble a new blueprint: +/// +/// 1. Build one directly from a collection. Such blueprints have no parent +/// blueprint. They are not customizable. Use +/// [`BlueprintBuilder::build_initial_from_collection`] for this. This would +/// generally only be used once in the lifetime of a rack, to assemble the +/// first blueprint. +/// +/// 2. Build one _from_ another blueprint, called the "parent", making changes +/// as desired. Use [`BlueprintBuilder::new_based_on`] for this. Once the +/// new blueprint is created, there is no dependency on the parent one. +/// However, the new blueprint can only be made the system's target if its +/// parent is the current target. +pub struct BlueprintBuilder<'a> { + /// previous blueprint, on which this one will be based + parent_blueprint: &'a Blueprint, + + // These fields are used to allocate resources from sleds. + policy: &'a Policy, + sled_ip_allocators: BTreeMap, + + // These fields will become part of the final blueprint. See the + // corresponding fields in `Blueprint`. + omicron_zones: BTreeMap, + zones_in_service: BTreeSet, + creator: String, + comments: Vec, +} + +impl<'a> BlueprintBuilder<'a> { + /// Directly construct a `Blueprint` from the contents of a particular + /// collection (representing no changes from the collection state) + pub fn build_initial_from_collection( + collection: &'a Collection, + policy: &'a Policy, + creator: &str, + ) -> Result { + let omicron_zones = policy + .sleds + .keys() + .map(|sled_id| { + let zones = collection + .omicron_zones + .get(sled_id) + .map(|z| z.zones.clone()) + .ok_or_else(|| { + // We should not find a sled that's supposed to be + // in-service but is not part of the inventory. It's + // not that that can't ever happen. This could happen + // when a sled is first being added to the system. Of + // course it could also happen if this sled agent failed + // our inventory request. But this is the initial + // blueprint (so this shouldn't be the "add sled" case) + // and we want to get it right (so we don't want to + // leave out sleds whose sled agent happened to be down + // when we tried to do this). The operator (or, more + // likely, a support person) will have to sort out + // what's going on if this happens. + Error::Planner(anyhow!( + "building initial blueprint: sled {:?} is \ + supposed to be in service but has no zones \ + in inventory", + sled_id + )) + })?; + Ok((*sled_id, zones)) + }) + .collect::>()?; + let zones_in_service = + collection.all_omicron_zones().map(|z| z.id).collect(); + Ok(Blueprint { + id: Uuid::new_v4(), + omicron_zones: omicron_zones, + zones_in_service, + parent_blueprint_id: None, + time_created: chrono::Utc::now(), + creator: creator.to_owned(), + comment: format!("from collection {}", collection.id), + }) + } + + /// Construct a new `BlueprintBuilder` based on a previous blueprint, + /// starting with no changes from that state + pub fn new_based_on( + parent_blueprint: &'a Blueprint, + policy: &'a Policy, + creator: &str, + ) -> BlueprintBuilder<'a> { + BlueprintBuilder { + parent_blueprint, + policy, + sled_ip_allocators: BTreeMap::new(), + omicron_zones: BTreeMap::new(), + zones_in_service: parent_blueprint.zones_in_service.clone(), + creator: creator.to_owned(), + comments: Vec::new(), + } + } + + /// Assemble a final [`Blueprint`] based on the contents of the builder + pub fn build(mut self) -> Blueprint { + // Collect the Omicron zones config for each in-service sled. + let omicron_zones = self + .policy + .sleds + .keys() + .map(|sled_id| { + // Start with self.omicron_zones, which contains entries for any + // sled whose zones config is changing in this blueprint. + let zones = self + .omicron_zones + .remove(sled_id) + // If it's not there, use the config from the parent + // blueprint. + .or_else(|| { + self.parent_blueprint + .omicron_zones + .get(sled_id) + .cloned() + }) + // If it's not there either, then this must be a new sled + // and we haven't added any zones to it yet. Use the + // standard initial config. + .unwrap_or_else(|| OmicronZonesConfig { + generation: Generation::new(), + zones: vec![], + }); + (*sled_id, zones) + }) + .collect(); + Blueprint { + id: Uuid::new_v4(), + omicron_zones: omicron_zones, + zones_in_service: self.zones_in_service, + parent_blueprint_id: Some(self.parent_blueprint.id), + time_created: chrono::Utc::now(), + creator: self.creator, + comment: self.comments.join(", "), + } + } + + /// Sets the blueprints "comment" + /// + /// This is a short human-readable string summarizing the changes reflected + /// in the blueprint. This is only intended for debugging. + pub fn comment(&mut self, comment: S) + where + String: From, + { + self.comments.push(String::from(comment)); + } + + pub fn sled_ensure_zone_ntp( + &mut self, + sled_id: Uuid, + ) -> Result { + // If there's already an NTP zone on this sled, do nothing. + let has_ntp = self + .parent_blueprint + .omicron_zones + .get(&sled_id) + .map(|found_zones| { + found_zones.zones.iter().any(|z| z.zone_type.is_ntp()) + }) + .unwrap_or(false); + if has_ntp { + return Ok(Ensure::NotNeeded); + } + + let sled_info = self.sled_resources(sled_id)?; + let sled_subnet = sled_info.subnet; + let ip = self.sled_alloc_ip(sled_id)?; + let ntp_address = SocketAddrV6::new(ip, NTP_PORT, 0, 0); + + // Construct the list of internal DNS servers. + // + // It'd be tempting to get this list from the other internal NTP + // servers but there may not be any of those. We could also + // construct this list manually from the set of internal DNS servers + // actually deployed. Instead, we take the same approach as RSS: + // these are at known, fixed addresses relative to the AZ subnet + // (which itself is a known-prefix parent subnet of the sled subnet). + let dns_servers = + get_internal_dns_server_addresses(sled_subnet.net().network()); + + // The list of boundary NTP servers is not necessarily stored + // anywhere (unless there happens to be another internal NTP zone + // lying around). Recompute it based on what boundary servers + // currently exist. + let ntp_servers = self + .parent_blueprint + .all_omicron_zones() + .filter_map(|(_, z)| { + if matches!(z.zone_type, OmicronZoneType::BoundaryNtp { .. }) { + Some(Host::for_zone(z.id, ZoneVariant::Other).fqdn()) + } else { + None + } + }) + .collect(); + + let zone = OmicronZoneConfig { + id: Uuid::new_v4(), + underlay_address: ip, + zone_type: OmicronZoneType::InternalNtp { + address: ntp_address.to_string(), + ntp_servers, + dns_servers, + domain: None, + }, + }; + + self.sled_add_zone(sled_id, zone)?; + Ok(Ensure::Added) + } + + pub fn sled_ensure_zone_crucible( + &mut self, + sled_id: Uuid, + pool_name: ZpoolName, + ) -> Result { + // If this sled already has a Crucible zone on this pool, do nothing. + let has_crucible_on_this_pool = self + .parent_blueprint + .omicron_zones + .get(&sled_id) + .map(|found_zones| { + found_zones.zones.iter().any(|z| { + matches!( + &z.zone_type, + OmicronZoneType::Crucible { dataset, .. } + if dataset.pool_name == pool_name + ) + }) + }) + .unwrap_or(false); + if has_crucible_on_this_pool { + return Ok(Ensure::NotNeeded); + } + + let sled_info = self.sled_resources(sled_id)?; + if !sled_info.zpools.contains(&pool_name) { + return Err(Error::Planner(anyhow!( + "adding crucible zone for sled {:?}: \ + attempted to use unknown zpool {:?}", + sled_id, + pool_name + ))); + } + + let ip = self.sled_alloc_ip(sled_id)?; + let port = omicron_common::address::CRUCIBLE_PORT; + let address = SocketAddrV6::new(ip, port, 0, 0).to_string(); + let zone = OmicronZoneConfig { + id: Uuid::new_v4(), + underlay_address: ip, + zone_type: OmicronZoneType::Crucible { + address, + dataset: OmicronZoneDataset { pool_name }, + }, + }; + self.sled_add_zone(sled_id, zone)?; + Ok(Ensure::Added) + } + + fn sled_add_zone( + &mut self, + sled_id: Uuid, + zone: OmicronZoneConfig, + ) -> Result<(), Error> { + // Check the sled id and return an appropriate error if it's invalid. + let _ = self.sled_resources(sled_id)?; + + if !self.zones_in_service.insert(zone.id) { + return Err(Error::Planner(anyhow!( + "attempted to add zone that already exists: {}", + zone.id + ))); + } + + let sled_zones = + self.omicron_zones.entry(sled_id).or_insert_with(|| { + if let Some(old_sled_zones) = + self.parent_blueprint.omicron_zones.get(&sled_id) + { + OmicronZonesConfig { + generation: old_sled_zones.generation.next(), + zones: old_sled_zones.zones.clone(), + } + } else { + // The first generation is reserved to mean the one + // containing no zones. See + // OMICRON_ZONES_CONFIG_INITIAL_GENERATION. So we start + // with the next one. + OmicronZonesConfig { + generation: Generation::new().next(), + zones: vec![], + } + } + }); + + sled_zones.zones.push(zone); + Ok(()) + } + + /// Returns a newly-allocated underlay address suitable for use by Omicron + /// zones + fn sled_alloc_ip(&mut self, sled_id: Uuid) -> Result { + let sled_subnet = self.sled_resources(sled_id)?.subnet; + let allocator = + self.sled_ip_allocators.entry(sled_id).or_insert_with(|| { + let sled_subnet_addr = sled_subnet.net().network(); + let minimum = sled_subnet_addr + .saturating_add(u128::from(SLED_RESERVED_ADDRESSES)); + let maximum = sled_subnet_addr + .saturating_add(u128::from(CP_SERVICES_RESERVED_ADDRESSES)); + assert!(sled_subnet.net().contains(minimum)); + assert!(sled_subnet.net().contains(maximum)); + let mut allocator = IpAllocator::new(minimum, maximum); + + // We shouldn't need to explicitly reserve the sled's global + // zone and switch addresses because they should be out of our + // range, but we do so just to be sure. + let sled_gz_addr = *get_sled_address(sled_subnet).ip(); + assert!(sled_subnet.net().contains(sled_gz_addr)); + assert!(minimum > sled_gz_addr); + assert!(maximum > sled_gz_addr); + let switch_zone_addr = get_switch_zone_address(sled_subnet); + assert!(sled_subnet.net().contains(switch_zone_addr)); + assert!(minimum > switch_zone_addr); + assert!(maximum > switch_zone_addr); + + // Record each of the sled's zones' underlay addresses as + // allocated. + if let Some(sled_zones) = self.omicron_zones.get(&sled_id) { + for z in &sled_zones.zones { + allocator.reserve(z.underlay_address); + } + } + + allocator + }); + + allocator.alloc().ok_or_else(|| Error::OutOfAddresses { sled_id }) + } + + fn sled_resources(&self, sled_id: Uuid) -> Result<&SledResources, Error> { + self.policy.sleds.get(&sled_id).ok_or_else(|| { + Error::Planner(anyhow!( + "attempted to use sled that is not in service: {}", + sled_id + )) + }) + } +} + +#[cfg(test)] +pub mod test { + use super::BlueprintBuilder; + use ipnet::IpAdd; + use nexus_types::deployment::Policy; + use nexus_types::deployment::SledResources; + use nexus_types::deployment::ZpoolName; + use nexus_types::inventory::Collection; + use omicron_common::address::Ipv6Subnet; + use omicron_common::address::SLED_PREFIX; + use omicron_common::api::external::ByteCount; + use omicron_common::api::external::Generation; + use sled_agent_client::types::{ + Baseboard, Inventory, OmicronZoneConfig, OmicronZoneDataset, + OmicronZoneType, OmicronZonesConfig, SledRole, + }; + use std::collections::BTreeMap; + use std::collections::BTreeSet; + use std::net::Ipv6Addr; + use std::net::SocketAddrV6; + use std::str::FromStr; + use uuid::Uuid; + + /// Returns a collection and policy describing a pretty simple system + pub fn example() -> (Collection, Policy) { + let mut builder = nexus_inventory::CollectionBuilder::new("test-suite"); + + let sled_ids = [ + "72443b6c-b8bb-4ffa-ab3a-aeaa428ed79b", + "a5f3db3a-61aa-4f90-ad3e-02833c253bf5", + "0d168386-2551-44e8-98dd-ae7a7570f8a0", + ]; + let mut policy = Policy { sleds: BTreeMap::new() }; + for sled_id_str in sled_ids.iter() { + let sled_id: Uuid = sled_id_str.parse().unwrap(); + let sled_ip = policy_add_sled(&mut policy, sled_id); + let serial_number = format!("s{}", policy.sleds.len()); + builder + .found_sled_inventory( + "test-suite", + Inventory { + baseboard: Baseboard::Gimlet { + identifier: serial_number, + model: String::from("model1"), + revision: 0, + }, + reservoir_size: ByteCount::from(1024), + sled_role: SledRole::Gimlet, + sled_agent_address: SocketAddrV6::new( + sled_ip, 12345, 0, 0, + ) + .to_string(), + sled_id, + usable_hardware_threads: 10, + usable_physical_ram: ByteCount::from(1024 * 1024), + }, + ) + .unwrap(); + + let zpools = &policy.sleds.get(&sled_id).unwrap().zpools; + let ip1 = sled_ip.saturating_add(1); + let zones: Vec<_> = std::iter::once(OmicronZoneConfig { + id: Uuid::new_v4(), + underlay_address: sled_ip.saturating_add(1), + zone_type: OmicronZoneType::InternalNtp { + address: SocketAddrV6::new(ip1, 12345, 0, 0).to_string(), + dns_servers: vec![], + domain: None, + ntp_servers: vec![], + }, + }) + .chain(zpools.iter().enumerate().map(|(i, zpool_name)| { + let ip = sled_ip.saturating_add(u128::try_from(i + 2).unwrap()); + OmicronZoneConfig { + id: Uuid::new_v4(), + underlay_address: ip, + zone_type: OmicronZoneType::Crucible { + address: String::from("[::1]:12345"), + dataset: OmicronZoneDataset { + pool_name: zpool_name.clone(), + }, + }, + } + })) + .collect(); + + builder + .found_sled_omicron_zones( + "test-suite", + sled_id, + OmicronZonesConfig { + generation: Generation::new().next(), + zones, + }, + ) + .unwrap(); + } + + let collection = builder.build(); + + (collection, policy) + } + + pub fn policy_add_sled(policy: &mut Policy, sled_id: Uuid) -> Ipv6Addr { + let i = policy.sleds.len() + 1; + let sled_ip: Ipv6Addr = + format!("fd00:1122:3344:{}::1", i + 1).parse().unwrap(); + + let zpools: BTreeSet = [ + "oxp_be776cf5-4cba-4b7d-8109-3dfd020f22ee", + "oxp_aee23a17-b2ce-43f2-9302-c738d92cca28", + "oxp_f7940a6b-c865-41cf-ad61-1b831d594286", + ] + .iter() + .map(|name_str| { + ZpoolName::from_str(name_str).expect("not a valid zpool name") + }) + .collect(); + + let subnet = Ipv6Subnet::::new(sled_ip); + policy.sleds.insert(sled_id, SledResources { zpools, subnet }); + sled_ip + } + + #[test] + fn test_initial() { + // Test creating a blueprint from a collection and verifying that it + // describes no changes. + let (collection, policy) = example(); + let blueprint_initial = + BlueprintBuilder::build_initial_from_collection( + &collection, + &policy, + "the_test", + ) + .expect("failed to create initial blueprint"); + + // Since collections don't include what was in service, we have to + // provide that ourselves. For our purposes though we don't care. + let zones_in_service = blueprint_initial.zones_in_service.clone(); + let diff = blueprint_initial + .diff_from_collection(&collection, &zones_in_service); + println!( + "collection -> initial blueprint (expected no changes):\n{}", + diff + ); + assert_eq!(diff.sleds_added().count(), 0); + assert_eq!(diff.sleds_removed().count(), 0); + assert_eq!(diff.sleds_changed().count(), 0); + + // Test a no-op blueprint. + let builder = BlueprintBuilder::new_based_on( + &blueprint_initial, + &policy, + "test_basic", + ); + let blueprint = builder.build(); + let diff = blueprint_initial.diff(&blueprint); + println!( + "initial blueprint -> next blueprint (expected no changes):\n{}", + diff + ); + assert_eq!(diff.sleds_added().count(), 0); + assert_eq!(diff.sleds_removed().count(), 0); + assert_eq!(diff.sleds_changed().count(), 0); + } + + #[test] + fn test_basic() { + let (collection, mut policy) = example(); + let blueprint1 = BlueprintBuilder::build_initial_from_collection( + &collection, + &policy, + "the_test", + ) + .expect("failed to create initial blueprint"); + + let mut builder = + BlueprintBuilder::new_based_on(&blueprint1, &policy, "test_basic"); + + // The initial blueprint should have internal NTP zones on all the + // existing sleds, plus Crucible zones on all pools. So if we ensure + // all these zones exist, we should see no change. + for (sled_id, sled_resources) in &policy.sleds { + builder.sled_ensure_zone_ntp(*sled_id).unwrap(); + for pool_name in &sled_resources.zpools { + builder + .sled_ensure_zone_crucible(*sled_id, pool_name.clone()) + .unwrap(); + } + } + + let blueprint2 = builder.build(); + let diff = blueprint1.diff(&blueprint2); + println!( + "initial blueprint -> next blueprint (expected no changes):\n{}", + diff + ); + assert_eq!(diff.sleds_added().count(), 0); + assert_eq!(diff.sleds_removed().count(), 0); + assert_eq!(diff.sleds_changed().count(), 0); + + // The next step is adding these zones to a new sled. + let new_sled_id = Uuid::new_v4(); + let _ = policy_add_sled(&mut policy, new_sled_id); + let mut builder = + BlueprintBuilder::new_based_on(&blueprint2, &policy, "test_basic"); + builder.sled_ensure_zone_ntp(new_sled_id).unwrap(); + let new_sled_resources = policy.sleds.get(&new_sled_id).unwrap(); + for pool_name in &new_sled_resources.zpools { + builder + .sled_ensure_zone_crucible(new_sled_id, pool_name.clone()) + .unwrap(); + } + + let blueprint3 = builder.build(); + let diff = blueprint2.diff(&blueprint3); + println!("expecting new NTP and Crucible zones:\n{}", diff); + + // No sleds were changed or removed. + assert_eq!(diff.sleds_changed().count(), 0); + assert_eq!(diff.sleds_removed().count(), 0); + + // One sled was added. + let sleds: Vec<_> = diff.sleds_added().collect(); + assert_eq!(sleds.len(), 1); + let (sled_id, new_sled_zones) = sleds[0]; + assert_eq!(sled_id, new_sled_id); + // The generation number should be newer than the initial default. + assert!(new_sled_zones.generation > Generation::new()); + + // All zones' underlay addresses ought to be on the sled's subnet. + for z in &new_sled_zones.zones { + assert!(new_sled_resources + .subnet + .net() + .contains(z.underlay_address)); + } + + // Check for an NTP zone. Its sockaddr's IP should also be on the + // sled's subnet. + assert!(new_sled_zones.zones.iter().any(|z| { + if let OmicronZoneType::InternalNtp { address, .. } = &z.zone_type { + let sockaddr = address.parse::().unwrap(); + assert!(new_sled_resources + .subnet + .net() + .contains(*sockaddr.ip())); + true + } else { + false + } + })); + let crucible_pool_names = new_sled_zones + .zones + .iter() + .filter_map(|z| { + if let OmicronZoneType::Crucible { address, dataset } = + &z.zone_type + { + let sockaddr = address.parse::().unwrap(); + let ip = sockaddr.ip(); + assert!(new_sled_resources.subnet.net().contains(*ip)); + Some(dataset.pool_name.clone()) + } else { + None + } + }) + .collect::>(); + assert_eq!(crucible_pool_names, new_sled_resources.zpools); + } +} diff --git a/nexus/deployment/src/ip_allocator.rs b/nexus/deployment/src/ip_allocator.rs new file mode 100644 index 0000000000..a32fe936af --- /dev/null +++ b/nexus/deployment/src/ip_allocator.rs @@ -0,0 +1,120 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Very simple allocator for picking addresses from a sled's subnet + +use ipnet::IpAdd; +use std::net::Ipv6Addr; + +/// Very simple allocator for picking addresses from a sled's subnet +/// +/// The current implementation takes the max address seen so far and uses the +/// next one. This will never reuse old IPs. That avoids a bunch of +/// operational issues. It does mean we will eventually run out of IPs. But we +/// do have a big space right now (2^16). +// This overlaps with the bump allocator that's used in RSS. That one is not +// general enough to use here, though this one could potentially be used there. +pub struct IpAllocator { + last: Ipv6Addr, + maximum: Ipv6Addr, +} + +impl IpAllocator { + /// Make an allocator that allocates addresses from the range `(minimum, + /// maximum)` (exclusive). + pub fn new(minimum: Ipv6Addr, maximum: Ipv6Addr) -> IpAllocator { + IpAllocator { last: minimum, maximum } + } + + /// Mark the given address reserved so that it will never be returned by + /// `alloc()`. + /// + /// The given address can be outside the range provided to + /// `IpAllocator::new()`, in which case this reservation will be ignored. + pub fn reserve(&mut self, addr: Ipv6Addr) { + if addr < self.maximum && addr > self.last { + self.last = addr; + } + } + + /// Allocate an unused address from this allocator's range + pub fn alloc(&mut self) -> Option { + let next = self.last.saturating_add(1); + if next == self.last { + // We ran out of the entire IPv6 address space. + return None; + } + + if next >= self.maximum { + // We ran out of our allotted range. + return None; + } + + self.last = next; + Some(next) + } +} + +#[cfg(test)] +mod test { + use super::IpAllocator; + use std::collections::BTreeSet; + use std::net::Ipv6Addr; + + #[test] + fn test_basic() { + let range_start: Ipv6Addr = "fd00::d0".parse().unwrap(); + let range_end: Ipv6Addr = "fd00::e8".parse().unwrap(); + let reserved: BTreeSet = [ + // These first two are deliberately out of range. + "fd00::ff".parse().unwrap(), + "fd00::c0".parse().unwrap(), + "fd00::d3".parse().unwrap(), + "fd00::d7".parse().unwrap(), + ] + .iter() + .copied() + .collect(); + + let mut allocator = IpAllocator::new(range_start, range_end); + for r in &reserved { + allocator.reserve(*r); + } + + let mut allocated = BTreeSet::new(); + while let Some(addr) = allocator.alloc() { + println!("allocated: {}", addr); + assert!(!reserved.contains(&addr)); + assert!(!allocated.contains(&addr)); + allocated.insert(addr); + } + + assert_eq!( + allocated, + [ + // Because d7 was reserved, everything up to it is also skipped. + // It doesn't have to work that way, but it currently does. + "fd00::d8".parse().unwrap(), + "fd00::d9".parse().unwrap(), + "fd00::da".parse().unwrap(), + "fd00::db".parse().unwrap(), + "fd00::dc".parse().unwrap(), + "fd00::dd".parse().unwrap(), + "fd00::de".parse().unwrap(), + "fd00::df".parse().unwrap(), + "fd00::e0".parse().unwrap(), + "fd00::e1".parse().unwrap(), + "fd00::e2".parse().unwrap(), + "fd00::e3".parse().unwrap(), + "fd00::e4".parse().unwrap(), + "fd00::e5".parse().unwrap(), + "fd00::e6".parse().unwrap(), + "fd00::e7".parse().unwrap(), + ] + .iter() + .copied() + .collect() + ); + } +} diff --git a/nexus/deployment/src/lib.rs b/nexus/deployment/src/lib.rs new file mode 100644 index 0000000000..fd182ae613 --- /dev/null +++ b/nexus/deployment/src/lib.rs @@ -0,0 +1,120 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! # Omicron deployment management +//! +//! **This system is still under development. Some of what's below is +//! more aspirational than real.** +//! +//! ## Overview +//! +//! "Deployment management" here refers broadly to managing the lifecycle of +//! software components. That includes deployment, undeployment, upgrade, +//! bringing into service, and removing from service. It includes +//! dynamically-deployed components (like most Omicron zones, like Nexus and +//! CockroachDB) as well as components that are tied to fixed physical hardware +//! (like the host operating system and device firmware). This system will +//! potentially manage configuration, too. See RFD 418 for background and a +//! survey of considerations here. +//! +//! The basic idea is that you have: +//! +//! * **fleet policy**: describes things like how many CockroachDB nodes there +//! should be, how many Nexus nodes there should be, the target system version +//! that all software should be running, which sleds are currently in service, +//! etc. +//! +//! * **inventory \[collections\]**: describe what software is currently +//! running on which hardware components, including versions and +//! configuration. This includes all control-plane-managed software, +//! including device firmware, host operating system, Omicron zones, etc. +//! +//! * **\[deployment\] blueprints**: describe what software _should_ be running +//! on which hardware components, including versions and configuration. Like +//! inventory collections, the plan covers all control-plane-managed software +//! and configuration. Plans must be specific enough that multiple Nexus +//! instances can attempt to realize the same blueprint concurrently without +//! stomping on each other. (For example, it's not specific enough to say +//! "there should be one more CockroachDB node" or even "there should be six +//! CockroachDB nodes" because two Nexus instances might _both_ decide to +//! provision a new node and then we'd have too many.) Plans must also be +//! incremental enough that any execution of them should not break the system. +//! For example, if between two consecutive blueprints the version of every +//! CockroachDB node changed, then concurrent blueprint execution might try to +//! update them all at once, bringing the whole Cockroach cluster down. In +//! this case, we need to use a sequence of blueprints that each only updates +//! one node at a time to ensure that the system keeps working. +//! +//! At any given time, the system has exactly one _target_ blueprint. The +//! deployment system is always attempting to make reality match this +//! blueprint. The system can be aware of more than one deployment blueprint, +//! including past ones, later ones, those generated by Oxide support, etc. +//! +//! In terms of carrying it out, here's the basic idea: +//! +//! ```ignored +//! The Planner +//! +//! fleet policy (latest inventory) (latest blueprint) +//! \ | / +//! \ | / +//! +----------+ | +----------/ +//! | | | +//! v v v +//! +//! "planner" +//! (eventually a background task) +//! | +//! v no +//! is a new blueprint necessary? ------> done +//! | +//! | yes +//! v +//! generate a new blueprint +//! | +//! | +//! v +//! commit blueprint to database +//! | +//! | +//! v +//! done +//! +//! +//! The Executor (better name?) +//! +//! latest committed blueprint latest inventory +//! | | +//! | | +//! +----+ +----+ +//! | | +//! v v +//! +//! "executor" +//! (background task) +//! | +//! v +//! determine actions needed +//! take actions +//! ``` +//! +//! The **planner** evaluates whether the current (target) blueprint is +//! consistent with the current policy. If not, the task generates a new +//! blueprint that _is_ consistent with the current policy and attempts to make +//! that the new target. (Multiple Nexus instances could try to do this +//! concurrently. CockroachDB's strong consistency ensures that only one can +//! win. The other Nexus instances must go back to evaluating the winning +//! blueprint before trying to change it again -- otherwise two Nexus instances +//! might fight over two equivalent blueprints.) +//! +//! An **execution** task periodically evaluates whether the state reflected in +//! the latest inventory collection is consistent with the current target +//! blueprint. If not, it executes operations to bring reality into line with +//! the blueprint. This means provisioning new zones, removing old zones, +//! adding instances to DNS, removing instances from DNS, carrying out firmware +//! updates, etc. + +pub mod blueprint_builder; +mod ip_allocator; +pub mod planner; diff --git a/nexus/deployment/src/planner.rs b/nexus/deployment/src/planner.rs new file mode 100644 index 0000000000..f228a7a150 --- /dev/null +++ b/nexus/deployment/src/planner.rs @@ -0,0 +1,230 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! High-level facilities for generating Blueprints +//! +//! See crate-level documentation for details. + +use crate::blueprint_builder::BlueprintBuilder; +use crate::blueprint_builder::Ensure; +use crate::blueprint_builder::Error; +use nexus_types::deployment::Blueprint; +use nexus_types::deployment::Policy; +use slog::{info, Logger}; + +pub struct Planner<'a> { + log: Logger, + policy: &'a Policy, + blueprint: BlueprintBuilder<'a>, +} + +impl<'a> Planner<'a> { + pub fn new_based_on( + log: Logger, + parent_blueprint: &'a Blueprint, + policy: &'a Policy, + creator: &str, + ) -> Planner<'a> { + let blueprint = + BlueprintBuilder::new_based_on(parent_blueprint, policy, creator); + Planner { log, policy, blueprint } + } + + pub fn plan(mut self) -> Result { + self.do_plan()?; + Ok(self.blueprint.build()) + } + + fn do_plan(&mut self) -> Result<(), Error> { + // The only thing this planner currently knows how to do is add services + // to a sled that's missing them. So let's see if we're in that case. + + // Internal DNS is a prerequisite for bringing up all other zones. At + // this point, we assume that internal DNS (as a service) is already + // functioning. At some point, this function will have to grow the + // ability to determine whether more internal DNS zones need to be + // added and where they should go. And the blueprint builder will need + // to grow the ability to provision one. + + for (sled_id, sled_info) in &self.policy.sleds { + // Check for an NTP zone. Every sled should have one. If it's not + // there, all we can do is provision that one zone. We have to wait + // for that to succeed and synchronize the clock before we can + // provision anything else. + if self.blueprint.sled_ensure_zone_ntp(*sled_id)? == Ensure::Added { + info!( + &self.log, + "found sled missing NTP zone (will add one)"; + "sled_id" => ?sled_id + ); + self.blueprint + .comment(&format!("sled {}: add NTP zone", sled_id)); + // Don't make any other changes to this sled. However, this + // change is compatible with any other changes to other sleds, + // so we can "continue" here rather than "break". + continue; + } + + // Every zpool on the sled should have a Crucible zone on it. + let mut ncrucibles_added = 0; + for zpool_name in &sled_info.zpools { + if self + .blueprint + .sled_ensure_zone_crucible(*sled_id, zpool_name.clone())? + == Ensure::Added + { + info!( + &self.log, + "found sled zpool missing Crucible zone (will add one)"; + "sled_id" => ?sled_id, + "zpool_name" => ?zpool_name, + ); + ncrucibles_added += 1; + } + } + + if ncrucibles_added > 0 { + // Don't make any other changes to this sled. However, this + // change is compatible with any other changes to other sleds, + // so we can "continue" here rather than "break". + // (Yes, it's currently the last thing in the loop, but being + // explicit here means we won't forget to do this when more code + // is added below.) + self.blueprint.comment(&format!("sled {}: add zones", sled_id)); + continue; + } + } + + Ok(()) + } +} + +#[cfg(test)] +mod test { + use super::Planner; + use crate::blueprint_builder::test::example; + use crate::blueprint_builder::test::policy_add_sled; + use crate::blueprint_builder::BlueprintBuilder; + use omicron_common::api::external::Generation; + use omicron_test_utils::dev::test_setup_log; + use sled_agent_client::types::OmicronZoneType; + + /// Runs through a basic sequence of blueprints for adding a sled + #[test] + fn test_basic_add_sled() { + let logctx = test_setup_log("planner_basic_add_sled"); + + // Use our example inventory collection. + let (collection, mut policy) = example(); + + // Build the initial blueprint. We don't bother verifying it here + // because there's a separate test for that. + let blueprint1 = BlueprintBuilder::build_initial_from_collection( + &collection, + &policy, + "the_test", + ) + .expect("failed to create initial blueprint"); + + // Now run the planner. It should do nothing because our initial + // system didn't have any issues that the planner currently knows how to + // fix. + let blueprint2 = Planner::new_based_on( + logctx.log.clone(), + &blueprint1, + &policy, + "no-op?", + ) + .plan() + .expect("failed to plan"); + + let diff = blueprint1.diff(&blueprint2); + println!("1 -> 2 (expected no changes):\n{}", diff); + assert_eq!(diff.sleds_added().count(), 0); + assert_eq!(diff.sleds_removed().count(), 0); + assert_eq!(diff.sleds_changed().count(), 0); + + // Now add a new sled. + let new_sled_id = + "7097f5b3-5896-4fff-bd97-63a9a69563a9".parse().unwrap(); + let _ = policy_add_sled(&mut policy, new_sled_id); + + // Check that the first step is to add an NTP zone + let blueprint3 = Planner::new_based_on( + logctx.log.clone(), + &blueprint2, + &policy, + "test: add NTP?", + ) + .plan() + .expect("failed to plan"); + + let diff = blueprint2.diff(&blueprint3); + println!("2 -> 3 (expect new NTP zone on new sled):\n{}", diff); + let sleds = diff.sleds_added().collect::>(); + let (sled_id, sled_zones) = sleds[0]; + // We have defined elsewhere that the first generation contains no + // zones. So the first one with zones must be newer. See + // OMICRON_ZONES_CONFIG_INITIAL_GENERATION. + assert!(sled_zones.generation > Generation::new()); + assert_eq!(sled_id, new_sled_id); + assert_eq!(sled_zones.zones.len(), 1); + assert!(matches!( + sled_zones.zones[0].zone_type, + OmicronZoneType::InternalNtp { .. } + )); + assert_eq!(diff.sleds_removed().count(), 0); + assert_eq!(diff.sleds_changed().count(), 0); + + // Check that the next step is to add Crucible zones + let blueprint4 = Planner::new_based_on( + logctx.log.clone(), + &blueprint3, + &policy, + "test: add Crucible zones?", + ) + .plan() + .expect("failed to plan"); + + let diff = blueprint3.diff(&blueprint4); + println!("3 -> 4 (expect Crucible zones):\n{}", diff); + assert_eq!(diff.sleds_added().count(), 0); + assert_eq!(diff.sleds_removed().count(), 0); + let sleds = diff.sleds_changed().collect::>(); + assert_eq!(sleds.len(), 1); + let (sled_id, sled_changes) = &sleds[0]; + assert_eq!( + sled_changes.generation_after, + sled_changes.generation_before.next() + ); + assert_eq!(*sled_id, new_sled_id); + assert_eq!(sled_changes.zones_removed().count(), 0); + assert_eq!(sled_changes.zones_changed().count(), 0); + let zones = sled_changes.zones_added().collect::>(); + assert_eq!(zones.len(), 3); + for zone in &zones { + let OmicronZoneType::Crucible { .. } = zone.zone_type else { + panic!("unexpectedly added a non-Crucible zone"); + }; + } + + // Check that there are no more steps + let blueprint5 = Planner::new_based_on( + logctx.log.clone(), + &blueprint4, + &policy, + "test: no-op?", + ) + .plan() + .expect("failed to plan"); + + let diff = blueprint4.diff(&blueprint5); + println!("4 -> 5 (expect no changes):\n{}", diff); + assert_eq!(diff.sleds_added().count(), 0); + assert_eq!(diff.sleds_removed().count(), 0); + assert_eq!(diff.sleds_changed().count(), 0); + + logctx.cleanup_successful(); + } +} diff --git a/nexus/inventory/src/collector.rs b/nexus/inventory/src/collector.rs index ab9af3f9e0..ad5ae7d024 100644 --- a/nexus/inventory/src/collector.rs +++ b/nexus/inventory/src/collector.rs @@ -526,8 +526,13 @@ mod test { zone_id: Uuid, ) -> sim::Server { // Start a simulated sled agent. - let config = - sim::Config::for_testing(sled_id, sim::SimMode::Auto, None, None); + let config = sim::Config::for_testing( + sled_id, + sim::SimMode::Auto, + None, + None, + Some(vec![]), + ); let agent = sim::Server::start(&config, &log, false).await.unwrap(); // Pretend to put some zones onto this sled. We don't need to test this diff --git a/nexus/src/app/deployment.rs b/nexus/src/app/deployment.rs new file mode 100644 index 0000000000..9439cdc6d5 --- /dev/null +++ b/nexus/src/app/deployment.rs @@ -0,0 +1,364 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Configuration of the deployment system + +use nexus_db_queries::authz; +use nexus_db_queries::authz::Action; +use nexus_db_queries::authz::ApiResource; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::pagination::Paginator; +use nexus_deployment::blueprint_builder::BlueprintBuilder; +use nexus_deployment::planner::Planner; +use nexus_types::deployment::Blueprint; +use nexus_types::deployment::BlueprintTarget; +use nexus_types::deployment::BlueprintTargetSet; +use nexus_types::deployment::Policy; +use nexus_types::deployment::SledResources; +use nexus_types::deployment::ZpoolName; +use nexus_types::identity::Asset; +use omicron_common::address::Ipv6Subnet; +use omicron_common::address::SLED_PREFIX; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::LookupResult; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; +use slog_error_chain::InlineErrorChain; +use std::collections::BTreeMap; +use std::collections::BTreeSet; +use std::num::NonZeroU32; +use std::str::FromStr; +use uuid::Uuid; + +/// "limit" used in SQL queries that paginate through all sleds, zpools, etc. +// unsafe: `new_unchecked` is only unsound if the argument is 0. +const SQL_BATCH_SIZE: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1000) }; + +/// "limit" used in SQL queries that fetch inventory data. Unlike the batch +/// size above, this is a limit on the *total* number of records returned. If +/// it's too small, the whole operation will fail. See +/// oxidecomputer/omicron#4629. +// unsafe: `new_unchecked` is only unsound if the argument is 0. +const SQL_LIMIT_INVENTORY: NonZeroU32 = + unsafe { NonZeroU32::new_unchecked(1000) }; + +/// Temporary in-memory store of blueprints +/// +/// Blueprints eventually need to be stored in the database. That will obviate +/// the need for this structure. +pub struct Blueprints { + all_blueprints: BTreeMap, + target: BlueprintTarget, +} + +impl Blueprints { + pub fn new() -> Blueprints { + Blueprints { + all_blueprints: BTreeMap::new(), + target: BlueprintTarget { + target_id: None, + enabled: false, + time_set: chrono::Utc::now(), + }, + } + } +} + +/// Common structure for collecting information that the planner needs +struct PlanningContext { + policy: Policy, + creator: String, +} + +impl super::Nexus { + // Once we store blueprints in the database, this function will likely just + // delegate to a corresponding datastore function. + pub async fn blueprint_list( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + opctx.authorize(Action::ListChildren, &authz::BLUEPRINT_CONFIG).await?; + Ok(self + .blueprints + .lock() + .unwrap() + .all_blueprints + .values() + .filter_map(|f| match pagparams.marker { + None => Some(f.clone()), + Some(marker) if f.id > *marker => Some(f.clone()), + _ => None, + }) + .collect()) + } + + // Once we store blueprints in the database, this function will likely just + // delegate to a corresponding datastore function. + pub async fn blueprint_view( + &self, + opctx: &OpContext, + blueprint_id: Uuid, + ) -> LookupResult { + let blueprint = authz::Blueprint::new( + authz::FLEET, + blueprint_id, + LookupType::ById(blueprint_id), + ); + opctx.authorize(Action::Read, &blueprint).await?; + self.blueprints + .lock() + .unwrap() + .all_blueprints + .get(&blueprint_id) + .cloned() + .ok_or_else(|| blueprint.not_found()) + } + + // Once we store blueprints in the database, this function will likely just + // delegate to a corresponding datastore function. + pub async fn blueprint_delete( + &self, + opctx: &OpContext, + blueprint_id: Uuid, + ) -> DeleteResult { + let blueprint = authz::Blueprint::new( + authz::FLEET, + blueprint_id, + LookupType::ById(blueprint_id), + ); + opctx.authorize(Action::Delete, &blueprint).await?; + + let mut blueprints = self.blueprints.lock().unwrap(); + if let Some(target_id) = blueprints.target.target_id { + if target_id == blueprint_id { + return Err(Error::conflict(format!( + "blueprint {} is the current target and cannot be deleted", + blueprint_id + ))); + } + } + + if blueprints.all_blueprints.remove(&blueprint_id).is_none() { + return Err(blueprint.not_found()); + } + + Ok(()) + } + + pub async fn blueprint_target_view( + &self, + opctx: &OpContext, + ) -> Result { + self.blueprint_target(opctx).await.map(|(target, _)| target) + } + + // This is a stand-in for a datastore function that fetches the current + // target information and the target blueprint's contents. This helper + // exists to combine the authz check with the lookup, which is what the + // datastore function will eventually do. + async fn blueprint_target( + &self, + opctx: &OpContext, + ) -> Result<(BlueprintTarget, Option), Error> { + opctx.authorize(Action::Read, &authz::BLUEPRINT_CONFIG).await?; + let blueprints = self.blueprints.lock().unwrap(); + Ok(( + blueprints.target.clone(), + blueprints.target.target_id.and_then(|target_id| { + blueprints.all_blueprints.get(&target_id).cloned() + }), + )) + } + + // Once we store blueprints in the database, this function will likely just + // delegate to a corresponding datastore function. + pub async fn blueprint_target_set( + &self, + opctx: &OpContext, + params: BlueprintTargetSet, + ) -> Result { + opctx.authorize(Action::Modify, &authz::BLUEPRINT_CONFIG).await?; + let new_target_id = params.target_id; + let enabled = params.enabled; + let mut blueprints = self.blueprints.lock().unwrap(); + if let Some(blueprint) = blueprints.all_blueprints.get(&new_target_id) { + if blueprint.parent_blueprint_id != blueprints.target.target_id { + return Err(Error::conflict(&format!( + "blueprint {:?}: parent is {:?}, which is not the current \ + target {:?}", + new_target_id, + blueprint + .parent_blueprint_id + .map(|p| p.to_string()) + .unwrap_or_else(|| String::from("")), + blueprints + .target + .target_id + .map(|p| p.to_string()) + .unwrap_or_else(|| String::from("")), + ))); + } + blueprints.target = BlueprintTarget { + target_id: Some(new_target_id), + enabled, + time_set: chrono::Utc::now(), + }; + + // When we add a background task executing the target blueprint, + // this is the point where we'd signal it to update its target. + Ok(blueprints.target.clone()) + } else { + Err(Error::not_found_by_id(ResourceType::Blueprint, &new_target_id)) + } + } + + async fn blueprint_planning_context( + &self, + opctx: &OpContext, + ) -> Result { + let creator = self.id.to_string(); + let datastore = self.datastore(); + + let sled_rows = { + let mut all_sleds = Vec::new(); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + let batch = + datastore.sled_list(opctx, &p.current_pagparams()).await?; + paginator = + p.found_batch(&batch, &|s: &nexus_db_model::Sled| s.id()); + all_sleds.extend(batch); + } + all_sleds + }; + + let mut zpools_by_sled_id = { + let mut zpools = BTreeMap::new(); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + let batch = datastore + .zpool_list_all_external(opctx, &p.current_pagparams()) + .await?; + paginator = + p.found_batch(&batch, &|z: &nexus_db_model::Zpool| z.id()); + for z in batch { + let sled_zpool_names = + zpools.entry(z.sled_id).or_insert_with(BTreeSet::new); + // It's unfortunate that Nexus knows how Sled Agent + // constructs zpool names, but there's not currently an + // alternative. + let zpool_name_generated = + illumos_utils::zpool::ZpoolName::new_external(z.id()) + .to_string(); + let zpool_name = ZpoolName::from_str(&zpool_name_generated) + .map_err(|e| { + Error::internal_error(&format!( + "unexpectedly failed to parse generated \ + zpool name: {}: {}", + zpool_name_generated, e + )) + })?; + sled_zpool_names.insert(zpool_name); + } + } + zpools + }; + + let sleds = sled_rows + .into_iter() + .map(|sled_row| { + let sled_id = sled_row.id(); + let subnet = Ipv6Subnet::::new(sled_row.ip()); + let zpools = zpools_by_sled_id + .remove(&sled_id) + .unwrap_or_else(BTreeSet::new); + let sled_info = SledResources { subnet, zpools }; + (sled_id, sled_info) + }) + .collect(); + + Ok(PlanningContext { creator, policy: Policy { sleds } }) + } + + // Once we store blueprints in the database, this function will likely just + // delegate to a corresponding datastore function. + async fn blueprint_add( + &self, + opctx: &OpContext, + blueprint: Blueprint, + ) -> Result<(), Error> { + opctx.authorize(Action::Modify, &authz::BLUEPRINT_CONFIG).await?; + let mut blueprints = self.blueprints.lock().unwrap(); + assert!(blueprints + .all_blueprints + .insert(blueprint.id, blueprint) + .is_none()); + Ok(()) + } + + pub async fn blueprint_generate_from_collection( + &self, + opctx: &OpContext, + collection_id: Uuid, + ) -> CreateResult { + let collection = self + .datastore() + .inventory_collection_read_all_or_nothing( + opctx, + collection_id, + SQL_LIMIT_INVENTORY, + ) + .await?; + let planning_context = self.blueprint_planning_context(opctx).await?; + let blueprint = BlueprintBuilder::build_initial_from_collection( + &collection, + &planning_context.policy, + &planning_context.creator, + ) + .map_err(|error| { + Error::internal_error(&format!( + "error generating initial blueprint from collection {}: {}", + collection_id, + InlineErrorChain::new(&error) + )) + })?; + + self.blueprint_add(&opctx, blueprint.clone()).await?; + Ok(blueprint) + } + + pub async fn blueprint_create_regenerate( + &self, + opctx: &OpContext, + ) -> CreateResult { + let (_, maybe_parent) = self.blueprint_target(opctx).await?; + let Some(parent_blueprint) = maybe_parent else { + return Err(Error::conflict( + "cannot regenerate blueprint without existing target", + )); + }; + + let planning_context = self.blueprint_planning_context(opctx).await?; + let planner = Planner::new_based_on( + opctx.log.clone(), + &parent_blueprint, + &planning_context.policy, + &planning_context.creator, + ); + let blueprint = planner.plan().map_err(|error| { + Error::internal_error(&format!( + "error generating blueprint: {}", + InlineErrorChain::new(&error) + )) + })?; + + self.blueprint_add(&opctx, blueprint.clone()).await?; + Ok(blueprint) + } +} diff --git a/nexus/src/app/external_endpoints.rs b/nexus/src/app/external_endpoints.rs index 0a6dd41db6..bcfec667ce 100644 --- a/nexus/src/app/external_endpoints.rs +++ b/nexus/src/app/external_endpoints.rs @@ -429,19 +429,21 @@ impl TryFrom for TlsCertificate { // Assemble a rustls CertifiedKey with both the certificate and the key. let certified_key = { - let private_key_der = private_key - .private_key_to_der() - .context("serializing private key to DER")?; - let rustls_private_key = rustls::PrivateKey(private_key_der); + let mut cursor = std::io::Cursor::new(db_cert.key.clone()); + let rustls_private_key = rustls_pemfile::private_key(&mut cursor) + .expect("parsing private key PEM") + .expect("no private keys found"); let rustls_signing_key = - rustls::sign::any_supported_type(&rustls_private_key) - .context("parsing DER private key")?; + rustls::crypto::ring::sign::any_supported_type( + &rustls_private_key, + ) + .context("parsing DER private key")?; let rustls_certs = certs_pem .iter() .map(|x509| { x509.to_der() .context("serializing cert to DER") - .map(rustls::Certificate) + .map(rustls::pki_types::CertificateDer::from) }) .collect::>()?; Arc::new(CertifiedKey::new(rustls_certs, rustls_signing_key)) @@ -563,6 +565,7 @@ pub(crate) async fn read_all_endpoints( /// session. /// /// See the module-level comment for more details. +#[derive(Debug)] pub struct NexusCertResolver { log: slog::Logger, config_rx: watch::Receiver>, diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 4045269878..778c5e2fe1 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -39,8 +39,8 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::NameOrId; use omicron_common::api::external::UpdateResult; -use omicron_common::api::external::Vni; use omicron_common::api::internal::nexus; +use omicron_common::api::internal::shared::SourceNatConfig; use propolis_client::support::tungstenite::protocol::frame::coding::CloseCode; use propolis_client::support::tungstenite::protocol::CloseFrame; use propolis_client::support::tungstenite::Message as WebSocketMessage; @@ -52,7 +52,6 @@ use sled_agent_client::types::InstanceMigrationTargetParams; use sled_agent_client::types::InstanceProperties; use sled_agent_client::types::InstancePutMigrationIdsBody; use sled_agent_client::types::InstancePutStateBody; -use sled_agent_client::types::SourceNatConfig; use std::matches; use std::net::SocketAddr; use std::sync::Arc; @@ -1089,7 +1088,7 @@ impl super::Nexus { // matter which one we use because all NICs must be in the // same VPC; see the check in project_create_instance.) let firewall_rules = if let Some(nic) = nics.first() { - let vni = Vni::try_from(nic.vni.0)?; + let vni = nic.vni; let vpc = self .db_datastore .resolve_vni_to_vpc(opctx, db::model::Vni(vni)) diff --git a/nexus/src/app/instance_network.rs b/nexus/src/app/instance_network.rs index 3db749f43b..8f97642c88 100644 --- a/nexus/src/app/instance_network.rs +++ b/nexus/src/app/instance_network.rs @@ -136,9 +136,9 @@ impl super::Nexus { let nic_id = nic.id; let mapping = SetVirtualNetworkInterfaceHost { virtual_ip: nic.ip, - virtual_mac: nic.mac.clone(), + virtual_mac: nic.mac, physical_host_ip, - vni: nic.vni.clone(), + vni: nic.vni, }; let log = self.log.clone(); @@ -225,7 +225,7 @@ impl super::Nexus { let nic_id = nic.id; let mapping = DeleteVirtualNetworkInterfaceHost { virtual_ip: nic.ip, - vni: nic.vni.clone(), + vni: nic.vni, }; let log = self.log.clone(); @@ -404,7 +404,7 @@ impl super::Nexus { first_port: target_ip.first_port, last_port: target_ip.last_port, sled_address: sled_address.into(), - vni: DbVni(network_interface.vni.clone().into()), + vni: DbVni(network_interface.vni), mac: nexus_db_model::MacAddr( omicron_common::api::external::MacAddr(mac_address), ), diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 5af45985db..80bfd5ef22 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -37,6 +37,7 @@ mod address_lot; pub(crate) mod background; mod bgp; mod certificate; +mod deployment; mod device_auth; mod disk; mod external_dns; @@ -179,6 +180,10 @@ pub struct Nexus { /// Default Crucible region allocation strategy default_region_allocation_strategy: RegionAllocationStrategy, + + /// information about blueprints (deployment configurations) + // This will go away once these are stored in the database. + blueprints: std::sync::Mutex, } impl Nexus { @@ -411,6 +416,7 @@ impl Nexus { .pkg .default_region_allocation_strategy .clone(), + blueprints: std::sync::Mutex::new(deployment::Blueprints::new()), }; // TODO-cleanup all the extra Arcs here seems wrong @@ -512,10 +518,6 @@ impl Nexus { } let mut rustls_cfg = rustls::ServerConfig::builder() - .with_safe_default_cipher_suites() - .with_safe_default_kx_groups() - .with_safe_default_protocol_versions() - .unwrap() .with_no_client_auth() .with_cert_resolver(Arc::new(NexusCertResolver::new( self.log.new(o!("component" => "NexusCertResolver")), diff --git a/nexus/src/app/vpc.rs b/nexus/src/app/vpc.rs index c47f499c41..3a6278053a 100644 --- a/nexus/src/app/vpc.rs +++ b/nexus/src/app/vpc.rs @@ -28,7 +28,6 @@ use omicron_common::api::external::LookupResult; use omicron_common::api::external::LookupType; use omicron_common::api::external::NameOrId; use omicron_common::api::external::UpdateResult; -use omicron_common::api::external::Vni; use omicron_common::api::external::VpcFirewallRuleUpdateParams; use omicron_common::api::internal::nexus::HostIdentifier; use sled_agent_client::types::NetworkInterface; @@ -259,7 +258,7 @@ impl super::Nexus { debug!(self.log, "resolved {} rules for sleds", rules_for_sled.len()); let sled_rules_request = sled_agent_client::types::VpcFirewallRulesEnsureBody { - vni: vpc.vni.0.into(), + vni: vpc.vni.0, rules: rules_for_sled, }; @@ -480,7 +479,7 @@ impl super::Nexus { let mut nics = HashSet::new(); let mut targets = Vec::with_capacity(rule.targets.len()); let mut push_target_nic = |nic: &NetworkInterface| { - if nics.insert((*nic.vni, (*nic.mac).clone())) { + if nics.insert((nic.vni, *nic.mac)) { targets.push(nic.clone()); } }; @@ -589,10 +588,8 @@ impl super::Nexus { .unwrap_or(&no_interfaces) { host_addrs.push( - HostIdentifier::Vpc(Vni::try_from( - *interface.vni, - )?) - .into(), + HostIdentifier::Vpc(interface.vni) + .into(), ) } } diff --git a/nexus/src/internal_api/http_entrypoints.rs b/nexus/src/internal_api/http_entrypoints.rs index 9a20911893..63578e360a 100644 --- a/nexus/src/internal_api/http_entrypoints.rs +++ b/nexus/src/internal_api/http_entrypoints.rs @@ -25,6 +25,8 @@ use dropshot::ResultsPage; use dropshot::TypedBody; use hyper::Body; use nexus_db_model::Ipv4NatEntryView; +use nexus_types::deployment::Blueprint; +use nexus_types::deployment::BlueprintTargetSet; use nexus_types::internal_api::params::SwitchPutRequest; use nexus_types::internal_api::params::SwitchPutResponse; use nexus_types::internal_api::views::to_list; @@ -34,6 +36,7 @@ use omicron_common::api::external::http_pagination::data_page_params_for; use omicron_common::api::external::http_pagination::PaginatedById; use omicron_common::api::external::http_pagination::ScanById; use omicron_common::api::external::http_pagination::ScanParams; +use omicron_common::api::external::Error; use omicron_common::api::internal::nexus::DiskRuntimeState; use omicron_common::api::internal::nexus::ProducerEndpoint; use omicron_common::api::internal::nexus::SledInstanceState; @@ -42,6 +45,7 @@ use oximeter::types::ProducerResults; use oximeter_producer::{collect, ProducerIdPathParams}; use schemars::JsonSchema; use serde::Deserialize; +use serde::Serialize; use std::collections::BTreeMap; use std::sync::Arc; use uuid::Uuid; @@ -74,6 +78,14 @@ pub(crate) fn internal_api() -> NexusApiDescription { api.register(bgtask_list)?; api.register(bgtask_view)?; + api.register(blueprint_list)?; + api.register(blueprint_view)?; + api.register(blueprint_delete)?; + api.register(blueprint_target_view)?; + api.register(blueprint_target_set)?; + api.register(blueprint_generate_from_collection)?; + api.register(blueprint_regenerate)?; + Ok(()) } @@ -591,3 +603,196 @@ async fn ipv4_nat_changeset( }; apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await } + +// APIs for managing blueprints +// +// These are not (yet) intended for use by any other programs. Eventually, we +// will want this functionality part of the public API. But we don't want to +// commit to any of this yet. These properly belong in an RFD 399-style +// "Service and Support API". Absent that, we stick them here. + +/// Lists blueprints +#[endpoint { + method = GET, + path = "/deployment/blueprints/all", +}] +async fn blueprint_list( + rqctx: RequestContext>, + query_params: Query, +) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.nexus; + let query = query_params.into_inner(); + let opctx = crate::context::op_context_for_internal_api(&rqctx).await; + let pagparams = data_page_params_for(&rqctx, &query)?; + let blueprints = nexus.blueprint_list(&opctx, &pagparams).await?; + Ok(HttpResponseOk(ScanById::results_page( + &query, + blueprints, + &|_, blueprint: &Blueprint| blueprint.id, + )?)) + }; + + apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Fetches one blueprint +#[endpoint { + method = GET, + path = "/deployment/blueprints/all/{blueprint_id}", +}] +async fn blueprint_view( + rqctx: RequestContext>, + path_params: Path, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + let blueprint = nexus.blueprint_view(&opctx, path.blueprint_id).await?; + Ok(HttpResponseOk(blueprint)) + }; + apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Deletes one blueprint +#[endpoint { + method = DELETE, + path = "/deployment/blueprints/all/{blueprint_id}", +}] +async fn blueprint_delete( + rqctx: RequestContext>, + path_params: Path, +) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + nexus.blueprint_delete(&opctx, path.blueprint_id).await?; + Ok(HttpResponseDeleted()) + }; + apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +// Managing the current target blueprint + +/// Describes what blueprint, if any, the system is currently working toward +#[derive(Debug, Serialize, JsonSchema)] +pub struct BlueprintTarget { + /// id of the blueprint that the system is trying to make real + pub target_id: Uuid, + /// policy: should the system actively work towards this blueprint + /// + /// This should generally be left enabled. + pub enabled: bool, + /// when this blueprint was made the target + pub time_set: chrono::DateTime, +} + +impl TryFrom for BlueprintTarget { + type Error = Error; + + fn try_from( + value: nexus_types::deployment::BlueprintTarget, + ) -> Result { + Ok(BlueprintTarget { + target_id: value.target_id.ok_or_else(|| { + Error::conflict("no target blueprint has been configured") + })?, + enabled: value.enabled, + time_set: value.time_set, + }) + } +} + +/// Fetches the current target blueprint, if any +#[endpoint { + method = GET, + path = "/deployment/blueprints/target", +}] +async fn blueprint_target_view( + rqctx: RequestContext>, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let target = nexus.blueprint_target_view(&opctx).await?; + Ok(HttpResponseOk(BlueprintTarget::try_from(target)?)) + }; + apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Make the specified blueprint the new target +#[endpoint { + method = POST, + path = "/deployment/blueprints/target", +}] +async fn blueprint_target_set( + rqctx: RequestContext>, + target: TypedBody, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let target = target.into_inner(); + let result = nexus.blueprint_target_set(&opctx, target).await?; + Ok(HttpResponseOk( + BlueprintTarget::try_from(result) + .map_err(|e| Error::conflict(e.to_string()))?, + )) + }; + apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +// Generating blueprints + +#[derive(Debug, Deserialize, JsonSchema)] +struct CollectionId { + collection_id: Uuid, +} + +/// Generates a new blueprint matching the specified inventory collection +#[endpoint { + method = POST, + path = "/deployment/blueprints/generate-from-collection", +}] +async fn blueprint_generate_from_collection( + rqctx: RequestContext>, + params: TypedBody, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let collection_id = params.into_inner().collection_id; + let result = nexus + .blueprint_generate_from_collection(&opctx, collection_id) + .await?; + Ok(HttpResponseOk(result)) + }; + apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Generates a new blueprint for the current system, re-evaluating anything +/// that's changed since the last one was generated +#[endpoint { + method = POST, + path = "/deployment/blueprints/regenerate", +}] +async fn blueprint_regenerate( + rqctx: RequestContext>, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let result = nexus.blueprint_create_regenerate(&opctx).await?; + Ok(HttpResponseOk(result)) + }; + apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await +} diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 19d5f747d8..da21602cb1 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -1107,6 +1107,7 @@ pub async fn start_sled_agent( sim_mode, Some(nexus_address), Some(update_directory), + None, ); let server = sim::Server::start(&config, &log, true) .await diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 99ef165188..044f87f7c1 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -608,12 +608,7 @@ async fn test_instance_start_creates_networking_state( // TODO(#3107) Remove this bifurcation when Nexus programs all mappings // itself. if agent.id != sled_id { - assert_sled_v2p_mappings( - agent, - &nics[0], - guest_nics[0].vni.clone().into(), - ) - .await; + assert_sled_v2p_mappings(agent, &nics[0], guest_nics[0].vni).await; } else { assert!(agent.v2p_mappings.lock().await.is_empty()); } @@ -807,12 +802,8 @@ async fn test_instance_migrate_v2p(cptestctx: &ControlPlaneTestContext) { // all mappings explicitly (without skipping the instance's current // sled) this bifurcation should be removed. if sled_agent.id != original_sled_id { - assert_sled_v2p_mappings( - sled_agent, - &nics[0], - guest_nics[0].vni.clone().into(), - ) - .await; + assert_sled_v2p_mappings(sled_agent, &nics[0], guest_nics[0].vni) + .await; } else { assert!(sled_agent.v2p_mappings.lock().await.is_empty()); } @@ -860,12 +851,8 @@ async fn test_instance_migrate_v2p(cptestctx: &ControlPlaneTestContext) { // agent will have updated any mappings there. Remove this bifurcation // when Nexus programs all mappings explicitly. if sled_agent.id != dst_sled_id { - assert_sled_v2p_mappings( - sled_agent, - &nics[0], - guest_nics[0].vni.clone().into(), - ) - .await; + assert_sled_v2p_mappings(sled_agent, &nics[0], guest_nics[0].vni) + .await; } } } @@ -4248,12 +4235,8 @@ async fn test_instance_v2p_mappings(cptestctx: &ControlPlaneTestContext) { // TODO(#3107) Remove this bifurcation when Nexus programs all mappings // itself. if sled_agent.id != sled_id { - assert_sled_v2p_mappings( - sled_agent, - &nics[0], - guest_nics[0].vni.clone().into(), - ) - .await; + assert_sled_v2p_mappings(sled_agent, &nics[0], guest_nics[0].vni) + .await; } else { assert!(sled_agent.v2p_mappings.lock().await.is_empty()); } diff --git a/nexus/tests/integration_tests/sleds.rs b/nexus/tests/integration_tests/sleds.rs index 5e399cbe84..b551cf51b5 100644 --- a/nexus/tests/integration_tests/sleds.rs +++ b/nexus/tests/integration_tests/sleds.rs @@ -101,10 +101,10 @@ async fn test_physical_disk_create_list_delete( let sleds_url = "/v1/system/hardware/sleds"; assert_eq!(sleds_list(&external_client, &sleds_url).await.len(), 1); - // Verify that there are no disks. + // The test framework may set up some disks initially. let disks_url = format!("/v1/system/hardware/sleds/{SLED_AGENT_UUID}/disks"); - assert!(physical_disks_list(&external_client, &disks_url).await.is_empty()); + let disks_initial = physical_disks_list(&external_client, &disks_url).await; // Insert a new disk using the internal API, observe it in the external API let sled_id = Uuid::from_str(&SLED_AGENT_UUID).unwrap(); @@ -118,14 +118,22 @@ async fn test_physical_disk_create_list_delete( ) .await; let disks = physical_disks_list(&external_client, &disks_url).await; - assert_eq!(disks.len(), 1); - assert_eq!(disks[0].vendor, "v"); - assert_eq!(disks[0].serial, "s"); - assert_eq!(disks[0].model, "m"); + assert_eq!(disks.len(), disks_initial.len() + 1); + let _new_disk = disks + .iter() + .find(|found_disk| { + found_disk.vendor == "v" + && found_disk.serial == "s" + && found_disk.model == "m" + }) + .expect("did not find the new disk"); // Delete that disk using the internal API, observe it in the external API delete_physical_disk(&internal_client, "v", "s", "m", sled_id).await; - assert!(physical_disks_list(&external_client, &disks_url).await.is_empty()); + assert_eq!( + physical_disks_list(&external_client, &disks_url).await, + disks_initial + ); } #[nexus_test] diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index 90ec67c0e6..dff0f73be7 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -16,6 +16,7 @@ serde.workspace = true serde_json.workspace = true steno.workspace = true strum.workspace = true +thiserror.workspace = true uuid.workspace = true api_identity.workspace = true diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs new file mode 100644 index 0000000000..95404a2c17 --- /dev/null +++ b/nexus/types/src/deployment.rs @@ -0,0 +1,564 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Types representing deployed software and configuration +//! +//! For more on this, see the crate-level documentation for `nexus/deployment`. +//! +//! This lives in nexus/types because it's used by both nexus/db-model and +//! nexus/deployment. (It could as well just live in nexus/db-model, but +//! nexus/deployment does not currently know about nexus/db-model and it's +//! convenient to separate these concerns.) + +use crate::inventory::Collection; +pub use crate::inventory::OmicronZoneConfig; +pub use crate::inventory::OmicronZoneDataset; +pub use crate::inventory::OmicronZoneType; +pub use crate::inventory::OmicronZonesConfig; +pub use crate::inventory::ZpoolName; +use omicron_common::address::Ipv6Subnet; +use omicron_common::address::SLED_PREFIX; +use omicron_common::api::external::Generation; +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use std::collections::BTreeMap; +use std::collections::BTreeSet; +use uuid::Uuid; + +/// Fleet-wide deployment policy +/// +/// The **policy** represents the deployment controls that people (operators and +/// support engineers) can modify directly under normal operation. In the +/// limit, this would include things like: which sleds are supposed to be part +/// of the system, how many CockroachDB nodes should be part of the cluster, +/// what system version the system should be running, etc. It would _not_ +/// include things like which services should be running on which sleds or which +/// host OS version should be on each sled because that's up to the control +/// plane to decide. (To be clear, the intent is that for extenuating +/// circumstances, people could exercise control over such things, but that +/// would not be part of normal operation.) +/// +/// The current policy is pretty limited. It's aimed primarily at supporting +/// the add/remove sled use case. +pub struct Policy { + /// set of sleds that are supposed to be part of the control plane, along + /// with information about resources available to the planner + pub sleds: BTreeMap, +} + +/// Describes the resources available on each sled for the planner +pub struct SledResources { + /// zpools on this sled + /// + /// (used to allocate storage for control plane zones with persistent + /// storage) + pub zpools: BTreeSet, + + /// the IPv6 subnet of this sled on the underlay network + /// + /// (implicitly specifies the whole range of addresses that the planner can + /// use for control plane components) + pub subnet: Ipv6Subnet, +} + +/// Describes a complete set of software and configuration for the system +// Blueprints are a fundamental part of how the system modifies itself. Each +// blueprint completely describes all of the software and configuration +// that the control plane manages. See the nexus/deployment crate-level +// documentation for details. +// +// Blueprints are different from policy. Policy describes the things that an +// operator would generally want to control. The blueprint describes the +// details of implementing that policy that an operator shouldn't have to deal +// with. For example, the operator might write policy that says "I want +// 5 external DNS zones". The system could then generate a blueprint that +// _has_ 5 external DNS zones on 5 specific sleds. The blueprint includes all +// the details needed to achieve that, including which image these zones should +// run, which zpools their persistent data should be stored on, their public and +// private IP addresses, their internal DNS names, etc. +// +// It must be possible for multiple Nexus instances to execute the same +// blueprint concurrently and converge to the same thing. Thus, these _cannot_ +// be how a blueprint works: +// +// - "add a Nexus zone" -- two Nexus instances operating concurrently would +// add _two_ Nexus zones (which is wrong) +// - "ensure that there is a Nexus zone on this sled with this id" -- the IP +// addresses and images are left unspecified. Two Nexus instances could pick +// different IPs or images for the zone. +// +// This is why blueprints must be so detailed. The key principle here is that +// **all the work of ensuring that the system do the right thing happens in one +// process (the update planner in one Nexus instance). Once a blueprint has +// been committed, everyone is on the same page about how to execute it.** The +// intent is that this makes both planning and executing a lot easier. In +// particular, by the time we get to execution, all the hard choices have +// already been made. +// +// Currently, blueprints are limited to describing only the set of Omicron +// zones deployed on each host and some supporting configuration (e.g., DNS). +// This is aimed at supporting add/remove sleds. The plan is to grow this to +// include more of the system as we support more use cases. +#[derive(Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize)] +pub struct Blueprint { + /// unique identifier for this blueprint + pub id: Uuid, + + /// mapping: sled id -> zones deployed on each sled + /// A sled is considered part of the control plane cluster iff it has an + /// entry in this map. + pub omicron_zones: BTreeMap, + + /// Omicron zones considered in-service (which generally means that they + /// should appear in DNS) + pub zones_in_service: BTreeSet, + + /// which blueprint this blueprint is based on + pub parent_blueprint_id: Option, + + /// when this blueprint was generated (for debugging) + pub time_created: chrono::DateTime, + /// identity of the component that generated the blueprint (for debugging) + /// This would generally be the Uuid of a Nexus instance. + pub creator: String, + /// human-readable string describing why this blueprint was created + /// (for debugging) + pub comment: String, +} + +impl Blueprint { + /// Iterate over all the Omicron zones in the blueprint, along with + /// associated sled id + pub fn all_omicron_zones( + &self, + ) -> impl Iterator { + self.omicron_zones + .iter() + .flat_map(|(sled_id, z)| z.zones.iter().map(|z| (*sled_id, z))) + } + + /// Iterate over the ids of all sleds in the blueprint + pub fn sleds(&self) -> impl Iterator + '_ { + self.omicron_zones.keys().copied() + } + + /// Summarize the difference between two blueprints + pub fn diff<'a>(&'a self, other: &'a Blueprint) -> OmicronZonesDiff<'a> { + OmicronZonesDiff { + before_label: format!("blueprint {}", self.id), + before_zones: self.omicron_zones.clone(), + before_zones_in_service: &self.zones_in_service, + after_label: format!("blueprint {}", other.id), + after_zones: &other.omicron_zones, + after_zones_in_service: &other.zones_in_service, + } + } + + /// Summarize the difference between a collection and a blueprint + /// + /// This gives an idea about what would change about a running system if one + /// were to execute the blueprint. + /// + /// Note that collections do not currently include information about what + /// zones are in-service, so the caller must provide that information. + pub fn diff_from_collection<'a>( + &'a self, + collection: &'a Collection, + before_zones_in_service: &'a BTreeSet, + ) -> OmicronZonesDiff<'a> { + let before_zones = collection + .omicron_zones + .iter() + .map(|(sled_id, zones_found)| (*sled_id, zones_found.zones.clone())) + .collect(); + OmicronZonesDiff { + before_label: format!("collection {}", collection.id), + before_zones, + before_zones_in_service, + after_label: format!("blueprint {}", self.id), + after_zones: &self.omicron_zones, + after_zones_in_service: &self.zones_in_service, + } + } +} + +/// Describes which blueprint the system is currently trying to make real +// This is analogous to the db model type until we have that. +#[derive(Debug, Clone)] +pub struct BlueprintTarget { + pub target_id: Option, + pub enabled: bool, + pub time_set: chrono::DateTime, +} + +/// Specifies what blueprint, if any, the system should be working toward +#[derive(Deserialize, JsonSchema)] +pub struct BlueprintTargetSet { + pub target_id: Uuid, + pub enabled: bool, +} + +/// Summarizes the differences between two blueprints +pub struct OmicronZonesDiff<'a> { + before_label: String, + // We store an owned copy of "before_zones" to make it easier to support + // collections here, where we need to assemble this map ourselves. + before_zones: BTreeMap, + before_zones_in_service: &'a BTreeSet, + after_label: String, + after_zones: &'a BTreeMap, + after_zones_in_service: &'a BTreeSet, +} + +/// Describes a sled that appeared on both sides of a diff (possibly changed) +pub struct DiffSledCommon<'a> { + /// id of the sled + pub sled_id: Uuid, + /// generation of the "zones" configuration on the left side + pub generation_before: Generation, + /// generation of the "zones" configuration on the right side + pub generation_after: Generation, + zones_added: Vec<&'a OmicronZoneConfig>, + zones_removed: Vec<&'a OmicronZoneConfig>, + zones_common: Vec>, +} + +impl<'a> DiffSledCommon<'a> { + /// Iterate over zones added between the blueprints + pub fn zones_added( + &self, + ) -> impl Iterator + '_ { + self.zones_added.iter().copied() + } + + /// Iterate over zones removed between the blueprints + pub fn zones_removed( + &self, + ) -> impl Iterator + '_ { + self.zones_removed.iter().copied() + } + + /// Iterate over zones that are common to both blueprints + pub fn zones_in_common( + &self, + ) -> impl Iterator> + '_ { + self.zones_common.iter().copied() + } + + /// Iterate over zones that changed between the blue prints + pub fn zones_changed( + &self, + ) -> impl Iterator> + '_ { + self.zones_in_common() + .filter(|z| z.changed_how != DiffZoneChangedHow::NoChanges) + } +} + +/// Describes a zone that was common to both sides of a diff +#[derive(Debug, Copy, Clone)] +pub struct DiffZoneCommon<'a> { + /// full zone configuration before + pub zone_before: &'a OmicronZoneConfig, + /// full zone configuration after + pub zone_after: &'a OmicronZoneConfig, + /// summary of what changed, if anything + pub changed_how: DiffZoneChangedHow, +} + +/// Describes how a zone changed across two blueprints, if at all +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub enum DiffZoneChangedHow { + /// the zone did not change between these two blueprints + NoChanges, + /// the zone details are the same, but it was brought into service + AddedToService, + /// the zone details are the same, but it was removed from service + RemovedFromService, + /// the zone's details (i.e., configuration) changed + DetailsChanged, +} + +impl<'a> OmicronZonesDiff<'a> { + fn sleds_before(&self) -> BTreeSet { + self.before_zones.keys().copied().collect() + } + + fn sleds_after(&self) -> BTreeSet { + self.after_zones.keys().copied().collect() + } + + /// Iterate over sleds only present in the second blueprint of a diff + pub fn sleds_added( + &self, + ) -> impl Iterator + '_ { + let sled_ids = self + .sleds_after() + .difference(&self.sleds_before()) + .copied() + .collect::>(); + + sled_ids + .into_iter() + .map(|sled_id| (sled_id, self.after_zones.get(&sled_id).unwrap())) + } + + /// Iterate over sleds only present in the first blueprint of a diff + pub fn sleds_removed( + &self, + ) -> impl Iterator + '_ { + let sled_ids = self + .sleds_before() + .difference(&self.sleds_after()) + .copied() + .collect::>(); + sled_ids + .into_iter() + .map(|sled_id| (sled_id, self.before_zones.get(&sled_id).unwrap())) + } + + /// Iterate over sleds present in both blueprints in a diff + pub fn sleds_in_common( + &'a self, + ) -> impl Iterator)> + '_ { + let sled_ids = self + .sleds_before() + .intersection(&self.sleds_after()) + .copied() + .collect::>(); + sled_ids.into_iter().map(|sled_id| { + let b1sledzones = self.before_zones.get(&sled_id).unwrap(); + let b2sledzones = self.after_zones.get(&sled_id).unwrap(); + + // Assemble separate summaries of the zones, indexed by zone id. + #[derive(Debug)] + struct ZoneInfo<'a> { + zone: &'a OmicronZoneConfig, + in_service: bool, + } + + let b1zones: BTreeMap = b1sledzones + .zones + .iter() + .map(|zone| { + ( + zone.id, + ZoneInfo { + zone, + in_service: self + .before_zones_in_service + .contains(&zone.id), + }, + ) + }) + .collect(); + let mut b2zones: BTreeMap = b2sledzones + .zones + .iter() + .map(|zone| { + ( + zone.id, + ZoneInfo { + zone, + in_service: self + .after_zones_in_service + .contains(&zone.id), + }, + ) + }) + .collect(); + let mut zones_removed = vec![]; + let mut zones_changed = vec![]; + + // Now go through each zone and compare them. + for (zone_id, b1z_info) in &b1zones { + if let Some(b2z_info) = b2zones.remove(zone_id) { + let changed_how = if b1z_info.zone != b2z_info.zone { + DiffZoneChangedHow::DetailsChanged + } else if b1z_info.in_service && !b2z_info.in_service { + DiffZoneChangedHow::RemovedFromService + } else if !b1z_info.in_service && b2z_info.in_service { + DiffZoneChangedHow::AddedToService + } else { + DiffZoneChangedHow::NoChanges + }; + zones_changed.push(DiffZoneCommon { + zone_before: b1z_info.zone, + zone_after: b2z_info.zone, + changed_how, + }); + } else { + zones_removed.push(b1z_info.zone); + } + } + + // Since we removed common zones above, anything else exists only in + // b2 and was therefore added. + let zones_added = + b2zones.into_values().map(|b2z_info| b2z_info.zone).collect(); + + ( + sled_id, + DiffSledCommon { + sled_id, + generation_before: b1sledzones.generation, + generation_after: b2sledzones.generation, + zones_added, + zones_removed, + zones_common: zones_changed, + }, + ) + }) + } + + pub fn sleds_changed( + &'a self, + ) -> impl Iterator)> + '_ { + self.sleds_in_common().filter(|(_, sled_changes)| { + sled_changes.zones_added().next().is_some() + || sled_changes.zones_removed().next().is_some() + || sled_changes.zones_changed().next().is_some() + }) + } + + fn print_whole_sled( + &self, + f: &mut std::fmt::Formatter<'_>, + prefix: char, + label: &str, + bbsledzones: &OmicronZonesConfig, + sled_id: Uuid, + ) -> std::fmt::Result { + writeln!(f, "{} sled {} ({})", prefix, sled_id, label)?; + writeln!( + f, + "{} zone config generation {}", + prefix, bbsledzones.generation + )?; + for z in &bbsledzones.zones { + writeln!( + f, + "{} zone {} type {} ({})", + prefix, + z.id, + z.zone_type.label(), + label + )?; + } + + Ok(()) + } +} + +/// Implements diff(1)-like output for diff'ing two blueprints +impl<'a> std::fmt::Display for OmicronZonesDiff<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f, "diff {} {}", self.before_label, self.after_label)?; + writeln!(f, "--- {}", self.before_label)?; + writeln!(f, "+++ {}", self.after_label)?; + + for (sled_id, sled_zones) in self.sleds_removed() { + self.print_whole_sled(f, '-', "removed", sled_zones, sled_id)?; + } + + for (sled_id, sled_changes) in self.sleds_in_common() { + // Print a line about the sled itself and zone config generation, + // regardless of whether anything has changed. + writeln!(f, " sled {}", sled_id)?; + if sled_changes.generation_before != sled_changes.generation_after { + writeln!( + f, + "- zone config generation {}", + sled_changes.generation_before + )?; + writeln!( + f, + "+ zone config generation {}", + sled_changes.generation_after + )?; + } else { + writeln!( + f, + " zone config generation {}", + sled_changes.generation_before + )?; + } + + for zone in sled_changes.zones_removed() { + writeln!( + f, + "- zone {} type {} (removed)", + zone.id, + zone.zone_type.label(), + )?; + } + + for zone_changes in sled_changes.zones_in_common() { + let zone_id = zone_changes.zone_before.id; + let zone_type = zone_changes.zone_before.zone_type.label(); + let zone2_type = zone_changes.zone_after.zone_type.label(); + match zone_changes.changed_how { + DiffZoneChangedHow::DetailsChanged => { + writeln!( + f, + "- zone {} type {} (changed)", + zone_id, zone_type, + )?; + writeln!( + f, + "+ zone {} type {} (changed)", + zone_id, zone2_type, + )?; + } + DiffZoneChangedHow::RemovedFromService => { + writeln!( + f, + "- zone {} type {} (in service)", + zone_id, zone_type, + )?; + writeln!( + f, + "+ zone {} type {} (removed from service)", + zone_id, zone2_type, + )?; + } + DiffZoneChangedHow::AddedToService => { + writeln!( + f, + "- zone {} type {} (not in service)", + zone_id, zone_type, + )?; + writeln!( + f, + "+ zone {} type {} (added to service)", + zone_id, zone2_type, + )?; + } + DiffZoneChangedHow::NoChanges => { + writeln!( + f, + " zone {} type {} (unchanged)", + zone_id, zone_type, + )?; + } + } + } + + for zone in sled_changes.zones_added() { + writeln!( + f, + "+ zone {} type {} (added)", + zone.id, + zone.zone_type.label(), + )?; + } + } + + for (sled_id, sled_zones) in self.sleds_added() { + self.print_whole_sled(f, '+', "added", sled_zones, sled_id)?; + } + + Ok(()) + } +} diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index d3f269ef5d..a33bc0b8bb 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -88,6 +88,9 @@ id_path_param!(GroupPath, group_id, "group"); id_path_param!(SledPath, sled_id, "sled"); id_path_param!(SwitchPath, switch_id, "switch"); +// Internal API parameters +id_path_param!(BlueprintPath, blueprint_id, "blueprint"); + pub struct SledSelector { /// ID of the sled pub sled: Uuid, diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index c85597e94c..cf312d3b82 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -428,7 +428,7 @@ pub struct Switch { /// /// Physical disks reside in a particular sled and are used to store both /// Instance Disk data as well as internal metadata. -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize, JsonSchema)] pub struct PhysicalDisk { #[serde(flatten)] pub identity: AssetIdentityMetadata, diff --git a/nexus/types/src/identity.rs b/nexus/types/src/identity.rs index 7837ed5bd9..ededb926df 100644 --- a/nexus/types/src/identity.rs +++ b/nexus/types/src/identity.rs @@ -43,7 +43,7 @@ pub trait Resource { /// Identity-related metadata that's included in "asset" public API objects /// (which generally have no name or description) -#[derive(Clone, Debug, Deserialize, PartialEq, Serialize, JsonSchema)] +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize, JsonSchema)] pub struct AssetIdentityMetadata { /// unique, immutable, system-controlled identifier for each resource pub id: Uuid, diff --git a/nexus/types/src/inventory.rs b/nexus/types/src/inventory.rs index b27d7277ba..c99e51af4f 100644 --- a/nexus/types/src/inventory.rs +++ b/nexus/types/src/inventory.rs @@ -17,6 +17,7 @@ pub use gateway_client::types::PowerState; pub use gateway_client::types::RotSlot; pub use gateway_client::types::SpType; use omicron_common::api::external::ByteCount; +pub use omicron_common::api::internal::shared::SourceNatConfig; pub use sled_agent_client::types::NetworkInterface; pub use sled_agent_client::types::NetworkInterfaceKind; pub use sled_agent_client::types::OmicronZoneConfig; @@ -24,8 +25,6 @@ pub use sled_agent_client::types::OmicronZoneDataset; pub use sled_agent_client::types::OmicronZoneType; pub use sled_agent_client::types::OmicronZonesConfig; pub use sled_agent_client::types::SledRole; -pub use sled_agent_client::types::SourceNatConfig; -pub use sled_agent_client::types::Vni; pub use sled_agent_client::types::ZpoolName; use std::collections::BTreeMap; use std::collections::BTreeSet; @@ -128,6 +127,13 @@ impl Collection { .get(&which) .and_then(|by_bb| by_bb.get(baseboard_id)) } + + /// Iterate over all the Omicron zones in the collection + pub fn all_omicron_zones( + &self, + ) -> impl Iterator { + self.omicron_zones.values().flat_map(|z| z.zones.zones.iter()) + } } /// A unique baseboard id found during a collection diff --git a/nexus/types/src/lib.rs b/nexus/types/src/lib.rs index a48c4d3b00..494573e834 100644 --- a/nexus/types/src/lib.rs +++ b/nexus/types/src/lib.rs @@ -29,6 +29,7 @@ //! rules, so our model layer knows about our views. That seems to be a //! relatively minor offense, so it's the way we leave things for now. +pub mod deployment; pub mod external_api; pub mod identity; pub mod internal_api; diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index a1d70d838b..b5cbb25c66 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -125,6 +125,240 @@ } } }, + "/deployment/blueprints/all": { + "get": { + "summary": "Lists blueprints", + "operationId": "blueprint_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlueprintResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/deployment/blueprints/all/{blueprint_id}": { + "get": { + "summary": "Fetches one blueprint", + "operationId": "blueprint_view", + "parameters": [ + { + "in": "path", + "name": "blueprint_id", + "description": "ID of the blueprint", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Blueprint" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "summary": "Deletes one blueprint", + "operationId": "blueprint_delete", + "parameters": [ + { + "in": "path", + "name": "blueprint_id", + "description": "ID of the blueprint", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/deployment/blueprints/generate-from-collection": { + "post": { + "summary": "Generates a new blueprint matching the specified inventory collection", + "operationId": "blueprint_generate_from_collection", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CollectionId" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Blueprint" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/deployment/blueprints/regenerate": { + "post": { + "summary": "Generates a new blueprint for the current system, re-evaluating anything", + "description": "that's changed since the last one was generated", + "operationId": "blueprint_regenerate", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Blueprint" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/deployment/blueprints/target": { + "get": { + "summary": "Fetches the current target blueprint, if any", + "operationId": "blueprint_target_view", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlueprintTarget" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "post": { + "summary": "Make the specified blueprint the new target", + "operationId": "blueprint_target_set", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlueprintTargetSet" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlueprintTarget" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/disk/{disk_id}/remove-read-only-parent": { "post": { "summary": "Request removal of a read_only_parent from a disk", @@ -1844,42 +2078,171 @@ "range" ] }, - "ByteCount": { - "description": "Byte count to express memory or storage capacity.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "Certificate": { + "Blueprint": { + "description": "Describes a complete set of software and configuration for the system", "type": "object", "properties": { - "cert": { + "comment": { + "description": "human-readable string describing why this blueprint was created (for debugging)", "type": "string" }, - "key": { + "creator": { + "description": "identity of the component that generated the blueprint (for debugging) This would generally be the Uuid of a Nexus instance.", "type": "string" + }, + "id": { + "description": "unique identifier for this blueprint", + "type": "string", + "format": "uuid" + }, + "omicron_zones": { + "description": "mapping: sled id -> zones deployed on each sled A sled is considered part of the control plane cluster iff it has an entry in this map.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/OmicronZonesConfig" + } + }, + "parent_blueprint_id": { + "nullable": true, + "description": "which blueprint this blueprint is based on", + "type": "string", + "format": "uuid" + }, + "time_created": { + "description": "when this blueprint was generated (for debugging)", + "type": "string", + "format": "date-time" + }, + "zones_in_service": { + "description": "Omicron zones considered in-service (which generally means that they should appear in DNS)", + "type": "array", + "items": { + "type": "string", + "format": "uuid" + }, + "uniqueItems": true } }, "required": [ - "cert", - "key" + "comment", + "creator", + "id", + "omicron_zones", + "time_created", + "zones_in_service" ] }, - "Cumulativedouble": { - "description": "A cumulative or counter data type.", + "BlueprintResultsPage": { + "description": "A single page of results", "type": "object", "properties": { - "start_time": { - "type": "string", - "format": "date-time" + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Blueprint" + } }, - "value": { - "type": "number", - "format": "double" + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" } }, "required": [ - "start_time", + "items" + ] + }, + "BlueprintTarget": { + "description": "Describes what blueprint, if any, the system is currently working toward", + "type": "object", + "properties": { + "enabled": { + "description": "policy: should the system actively work towards this blueprint\n\nThis should generally be left enabled.", + "type": "boolean" + }, + "target_id": { + "description": "id of the blueprint that the system is trying to make real", + "type": "string", + "format": "uuid" + }, + "time_set": { + "description": "when this blueprint was made the target", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "enabled", + "target_id", + "time_set" + ] + }, + "BlueprintTargetSet": { + "description": "Specifies what blueprint, if any, the system should be working toward", + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "target_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "enabled", + "target_id" + ] + }, + "ByteCount": { + "description": "Byte count to express memory or storage capacity.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "Certificate": { + "type": "object", + "properties": { + "cert": { + "type": "string" + }, + "key": { + "type": "string" + } + }, + "required": [ + "cert", + "key" + ] + }, + "CollectionId": { + "type": "object", + "properties": { + "collection_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "collection_id" + ] + }, + "Cumulativedouble": { + "description": "A cumulative or counter data type.", + "type": "object", + "properties": { + "start_time": { + "type": "string", + "format": "date-time" + }, + "value": { + "type": "number", + "format": "double" + } + }, + "required": [ + "start_time", "value" ] }, @@ -3825,6 +4188,16 @@ } ] }, + "IpNet": { + "anyOf": [ + { + "$ref": "#/components/schemas/Ipv4Net" + }, + { + "$ref": "#/components/schemas/Ipv6Net" + } + ] + }, "IpNetwork": { "oneOf": [ { @@ -3912,6 +4285,10 @@ "vni" ] }, + "Ipv4Net": { + "description": "An IPv4 subnet, including prefix and subnet mask", + "type": "string" + }, "Ipv4Network": { "type": "string", "pattern": "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\/(3[0-2]|[0-2]?[0-9])$" @@ -3934,6 +4311,10 @@ "last" ] }, + "Ipv6Net": { + "description": "An IPv6 subnet, including prefix and subnet mask", + "type": "string" + }, "Ipv6Network": { "type": "string", "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\")[/](12[0-8]|1[0-1][0-9]|[0-9]?[0-9])$" @@ -4272,6 +4653,99 @@ "minLength": 1, "maxLength": 63 }, + "NetworkInterface": { + "description": "Information required to construct a virtual network interface", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "ip": { + "type": "string", + "format": "ip" + }, + "kind": { + "$ref": "#/components/schemas/NetworkInterfaceKind" + }, + "mac": { + "$ref": "#/components/schemas/MacAddr" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "primary": { + "type": "boolean" + }, + "slot": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "subnet": { + "$ref": "#/components/schemas/IpNet" + }, + "vni": { + "$ref": "#/components/schemas/Vni" + } + }, + "required": [ + "id", + "ip", + "kind", + "mac", + "name", + "primary", + "slot", + "subnet", + "vni" + ] + }, + "NetworkInterfaceKind": { + "description": "The type of network interface", + "oneOf": [ + { + "description": "A vNIC attached to a guest instance", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "instance" + ] + } + }, + "required": [ + "id", + "type" + ] + }, + { + "description": "A vNIC associated with an internal service", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "service" + ] + } + }, + "required": [ + "id", + "type" + ] + } + ] + }, "NewPasswordHash": { "title": "A password hash in PHC string format", "description": "Password hashes must be in PHC (Password Hashing Competition) string format. Passwords must be hashed with Argon2id. Password hashes may be rejected if the parameters appear not to be secure enough.", @@ -4281,6 +4755,410 @@ "description": "Unique name for a saga [`Node`]\n\nEach node requires a string name that's unique within its DAG. The name is used to identify its output. Nodes that depend on a given node (either directly or indirectly) can access the node's output using its name.", "type": "string" }, + "OmicronZoneConfig": { + "description": "Describes one Omicron-managed zone running on a sled", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "underlay_address": { + "type": "string", + "format": "ipv6" + }, + "zone_type": { + "$ref": "#/components/schemas/OmicronZoneType" + } + }, + "required": [ + "id", + "underlay_address", + "zone_type" + ] + }, + "OmicronZoneDataset": { + "description": "Describes a persistent ZFS dataset associated with an Omicron zone", + "type": "object", + "properties": { + "pool_name": { + "$ref": "#/components/schemas/ZpoolName" + } + }, + "required": [ + "pool_name" + ] + }, + "OmicronZoneType": { + "description": "Describes what kind of zone this is (i.e., what component is running in it) as well as any type-specific configuration", + "oneOf": [ + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dns_servers": { + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "domain": { + "nullable": true, + "type": "string" + }, + "nic": { + "description": "The service vNIC providing outbound connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/components/schemas/NetworkInterface" + } + ] + }, + "ntp_servers": { + "type": "array", + "items": { + "type": "string" + } + }, + "snat_cfg": { + "description": "The SNAT configuration for outbound connections.", + "allOf": [ + { + "$ref": "#/components/schemas/SourceNatConfig" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "boundary_ntp" + ] + } + }, + "required": [ + "address", + "dns_servers", + "nic", + "ntp_servers", + "snat_cfg", + "type" + ] + }, + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "clickhouse" + ] + } + }, + "required": [ + "address", + "dataset", + "type" + ] + }, + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "clickhouse_keeper" + ] + } + }, + "required": [ + "address", + "dataset", + "type" + ] + }, + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "cockroach_db" + ] + } + }, + "required": [ + "address", + "dataset", + "type" + ] + }, + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "crucible" + ] + } + }, + "required": [ + "address", + "dataset", + "type" + ] + }, + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "crucible_pantry" + ] + } + }, + "required": [ + "address", + "type" + ] + }, + { + "type": "object", + "properties": { + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "dns_address": { + "description": "The address at which the external DNS server is reachable.", + "type": "string" + }, + "http_address": { + "description": "The address at which the external DNS server API is reachable.", + "type": "string" + }, + "nic": { + "description": "The service vNIC providing external connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/components/schemas/NetworkInterface" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "external_dns" + ] + } + }, + "required": [ + "dataset", + "dns_address", + "http_address", + "nic", + "type" + ] + }, + { + "type": "object", + "properties": { + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "dns_address": { + "type": "string" + }, + "gz_address": { + "description": "The addresses in the global zone which should be created\n\nFor the DNS service, which exists outside the sleds's typical subnet - adding an address in the GZ is necessary to allow inter-zone traffic routing.", + "type": "string", + "format": "ipv6" + }, + "gz_address_index": { + "description": "The address is also identified with an auxiliary bit of information to ensure that the created global zone address can have a unique name.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "http_address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "internal_dns" + ] + } + }, + "required": [ + "dataset", + "dns_address", + "gz_address", + "gz_address_index", + "http_address", + "type" + ] + }, + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dns_servers": { + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "domain": { + "nullable": true, + "type": "string" + }, + "ntp_servers": { + "type": "array", + "items": { + "type": "string" + } + }, + "type": { + "type": "string", + "enum": [ + "internal_ntp" + ] + } + }, + "required": [ + "address", + "dns_servers", + "ntp_servers", + "type" + ] + }, + { + "type": "object", + "properties": { + "external_dns_servers": { + "description": "External DNS servers Nexus can use to resolve external hosts.", + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "external_ip": { + "description": "The address at which the external nexus server is reachable.", + "type": "string", + "format": "ip" + }, + "external_tls": { + "description": "Whether Nexus's external endpoint should use TLS", + "type": "boolean" + }, + "internal_address": { + "description": "The address at which the internal nexus server is reachable.", + "type": "string" + }, + "nic": { + "description": "The service vNIC providing external connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/components/schemas/NetworkInterface" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "nexus" + ] + } + }, + "required": [ + "external_dns_servers", + "external_ip", + "external_tls", + "internal_address", + "nic", + "type" + ] + }, + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "oximeter" + ] + } + }, + "required": [ + "address", + "type" + ] + } + ] + }, + "OmicronZonesConfig": { + "description": "Describes the set of Omicron-managed zones running on a sled", + "type": "object", + "properties": { + "generation": { + "description": "generation number of this configuration\n\nThis generation number is owned by the control plane (i.e., RSS or Nexus, depending on whether RSS-to-Nexus handoff has happened). It should not be bumped within Sled Agent.\n\nSled Agent rejects attempts to set the configuration to a generation older than the one it's currently running.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "zones": { + "description": "list of running zones", + "type": "array", + "items": { + "$ref": "#/components/schemas/OmicronZoneConfig" + } + } + }, + "required": [ + "generation", + "zones" + ] + }, "OximeterInfo": { "description": "Message used to notify Nexus that this oximeter instance is up and running.", "type": "object", @@ -5620,6 +6498,10 @@ "format": "uint32", "minimum": 0 }, + "ZpoolName": { + "description": "Zpool names are of the format ox{i,p}_. They are either Internal or External, and should be unique", + "type": "string" + }, "ZpoolPutRequest": { "description": "Sent by a sled agent on startup to Nexus to request further instruction", "type": "object", diff --git a/oximeter/db/src/sql/mod.rs b/oximeter/db/src/sql/mod.rs index 1f84e208d2..5d9685d19f 100644 --- a/oximeter/db/src/sql/mod.rs +++ b/oximeter/db/src/sql/mod.rs @@ -40,6 +40,7 @@ use sqlparser::ast::BinaryOperator; use sqlparser::ast::Cte; use sqlparser::ast::Distinct; use sqlparser::ast::Expr; +use sqlparser::ast::GroupByExpr; use sqlparser::ast::Ident; use sqlparser::ast::Join; use sqlparser::ast::JoinConstraint; @@ -554,7 +555,7 @@ impl RestrictedQuery { from: vec![cte_from], lateral_views: vec![], selection: None, - group_by: vec![], + group_by: GroupByExpr::Expressions(vec![]), cluster_by: vec![], distribute_by: vec![], sort_by: vec![], @@ -601,9 +602,11 @@ impl RestrictedQuery { body: Box::new(SetExpr::Select(Box::new(select))), order_by: vec![], limit: None, + limit_by: vec![], offset: None, fetch: None, locks: vec![], + for_clause: None, }) } @@ -633,6 +636,8 @@ impl RestrictedQuery { alias: None, args: None, with_hints: vec![], + version: None, + partitions: vec![], }, joins: vec![], }; @@ -678,7 +683,7 @@ impl RestrictedQuery { from: vec![from], lateral_views: vec![], selection, - group_by: vec![], + group_by: GroupByExpr::Expressions(vec![]), cluster_by: vec![], distribute_by: vec![], sort_by: vec![], @@ -714,6 +719,8 @@ impl RestrictedQuery { alias: None, args: None, with_hints: vec![], + version: None, + partitions: vec![], }, joins: vec![], }; @@ -746,7 +753,7 @@ impl RestrictedQuery { from: vec![from], lateral_views: vec![], selection, - group_by: vec![], + group_by: GroupByExpr::Expressions(vec![]), cluster_by: vec![], distribute_by: vec![], sort_by: vec![], diff --git a/package-manifest.toml b/package-manifest.toml index b722218fd3..fa6bba7a96 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -348,6 +348,7 @@ source.paths = [ { from = "smf/switch_zone_setup/manifest.xml", to = "/var/svc/manifest/site/switch_zone_setup/manifest.xml" }, { from = "smf/switch_zone_setup/switch_zone_setup", to = "/opt/oxide/bin/switch_zone_setup" }, { from = "smf/switch_zone_setup/support_authorized_keys", to = "/opt/oxide/support/authorized_keys" }, + { from = "/opt/ooce/pgsql-13/lib/amd64", to = "/opt/ooce/pgsql-13/lib/amd64" }, ] output.type = "zone" output.intermediate_only = true @@ -566,7 +567,8 @@ source.packages = [ "mg-ddm.tar.gz", "mgd.tar.gz", "switch_zone_setup.tar.gz", - "xcvradm.tar.gz" + "xcvradm.tar.gz", + "omicron-omdb.tar.gz" ] output.type = "zone" @@ -588,7 +590,8 @@ source.packages = [ "mg-ddm.tar.gz", "mgd.tar.gz", "switch_zone_setup.tar.gz", - "sp-sim-stub.tar.gz" + "sp-sim-stub.tar.gz", + "omicron-omdb.tar.gz" ] output.type = "zone" @@ -610,7 +613,8 @@ source.packages = [ "mg-ddm.tar.gz", "mgd.tar.gz", "switch_zone_setup.tar.gz", - "sp-sim-softnpu.tar.gz" + "sp-sim-softnpu.tar.gz", + "omicron-omdb.tar.gz" ] output.type = "zone" @@ -626,6 +630,15 @@ source.paths = [ output.type = "zone" output.intermediate_only = true +[package.omicron-omdb] +service_name = "omdb" +only_for_targets.image = "standard" +source.type = "local" +source.rust.binary_names = ["omdb"] +source.rust.release = true +output.type = "zone" +output.intermediate_only = true + [package.oxlog] service_name = "oxlog" only_for_targets.image = "standard" diff --git a/sled-agent/Cargo.toml b/sled-agent/Cargo.toml index b734248f32..5bd205b32e 100644 --- a/sled-agent/Cargo.toml +++ b/sled-agent/Cargo.toml @@ -53,11 +53,11 @@ propolis-client.workspace = true propolis-mock-server.workspace = true # Only used by the simulated sled agent rand = { workspace = true, features = ["getrandom"] } reqwest = { workspace = true, features = ["rustls-tls", "stream"] } -schemars = { workspace = true, features = [ "chrono", "uuid1" ] } +schemars = { workspace = true, features = ["chrono", "uuid1"] } semver.workspace = true serde.workspace = true serde_human_bytes.workspace = true -serde_json = {workspace = true, features = ["raw_value"]} +serde_json = { workspace = true, features = ["raw_value"] } sha3.workspace = true sled-agent-client.workspace = true sled-hardware.workspace = true @@ -70,7 +70,7 @@ smf.workspace = true tar.workspace = true thiserror.workspace = true tofino.workspace = true -tokio = { workspace = true, features = [ "full" ] } +tokio = { workspace = true, features = ["full"] } toml.workspace = true usdt.workspace = true uuid.workspace = true diff --git a/sled-agent/src/bin/sled-agent-sim.rs b/sled-agent/src/bin/sled-agent-sim.rs index 4b3bc9e432..8de9a3c423 100644 --- a/sled-agent/src/bin/sled-agent-sim.rs +++ b/sled-agent/src/bin/sled-agent-sim.rs @@ -122,6 +122,7 @@ async fn do_run() -> Result<(), CmdError> { args.sim_mode, Some(args.nexus_addr), Some(tmp.path()), + None, ) }; diff --git a/sled-agent/src/params.rs b/sled-agent/src/params.rs index 8417546e3b..9120bafa9a 100644 --- a/sled-agent/src/params.rs +++ b/sled-agent/src/params.rs @@ -702,7 +702,7 @@ impl From for sled_agent_client::types::OmicronZoneType { dns_servers, domain, ntp_servers, - snat_cfg: snat_cfg.into(), + snat_cfg, nic: nic.into(), }, OmicronZoneType::Clickhouse { address, dataset } => { diff --git a/sled-agent/src/rack_setup/plan/service.rs b/sled-agent/src/rack_setup/plan/service.rs index 441c7fd842..bed82a7a01 100644 --- a/sled-agent/src/rack_setup/plan/service.rs +++ b/sled-agent/src/rack_setup/plan/service.rs @@ -10,7 +10,8 @@ use crate::rack_setup::config::SetupServiceConfig as Config; use camino::Utf8PathBuf; use dns_service_client::types::DnsConfigParams; use illumos_utils::zpool::ZpoolName; -use internal_dns::{ServiceName, DNS_ZONE}; +use internal_dns::config::{Host, ZoneVariant}; +use internal_dns::ServiceName; use omicron_common::address::{ get_sled_address, get_switch_zone_address, Ipv6Subnet, ReservedRackSubnet, DENDRITE_PORT, DNS_HTTP_PORT, DNS_PORT, DNS_REDUNDANCY, MAX_DNS_REDUNDANCY, @@ -659,7 +660,8 @@ impl Plan { let ntp_address = SocketAddrV6::new(address, NTP_PORT, 0, 0); let (zone_type, svcname) = if idx < BOUNDARY_NTP_COUNT { - boundary_ntp_servers.push(format!("{}.host.{}", id, DNS_ZONE)); + boundary_ntp_servers + .push(Host::for_zone(id, ZoneVariant::Other).fqdn()); let (nic, snat_cfg) = svc_port_builder.next_snat(id)?; ( OmicronZoneType::BoundaryNtp { diff --git a/sled-agent/src/sim/config.rs b/sled-agent/src/sim/config.rs index 81e11dc1c2..7a20dd5709 100644 --- a/sled-agent/src/sim/config.rs +++ b/sled-agent/src/sim/config.rs @@ -84,6 +84,7 @@ impl Config { sim_mode: SimMode, nexus_address: Option, update_directory: Option<&Utf8Path>, + zpools: Option>, ) -> Config { // This IP range is guaranteed by RFC 6666 to discard traffic. // For tests that don't use a Nexus, we use this address to simulate a @@ -94,6 +95,10 @@ impl Config { // updates, make up a path that doesn't exist. let update_directory = update_directory.unwrap_or_else(|| "/nonexistent".into()); + let zpools = zpools.unwrap_or_else(|| { + // By default, create 10 "virtual" U.2s, with 1 TB of storage. + vec![ConfigZpool { size: 1 << 40 }; 10] + }); Config { id, sim_mode, @@ -104,7 +109,7 @@ impl Config { ..Default::default() }, storage: ConfigStorage { - zpools: vec![], + zpools, ip: IpAddr::from(Ipv6Addr::LOCALHOST), }, updates: ConfigUpdates { diff --git a/smf/profile/profile b/smf/profile/profile index 8f613d4d56..73256cd6fd 100644 --- a/smf/profile/profile +++ b/smf/profile/profile @@ -4,7 +4,7 @@ PATH+=:/opt/ooce/bin case "$HOSTNAME" in oxz_switch) # Add tools like xcvradm, swadm & ddmadm to the PATH by default - PATH+=:/opt/oxide/bin:/opt/oxide/dendrite/bin:/opt/oxide/mg-ddm/bin + PATH+=:/opt/oxide/bin:/opt/oxide/dendrite/bin:/opt/oxide/mg-ddm/bin:/opt/oxide/omdb/bin ;; oxz_cockroachdb*) PATH+=:/opt/oxide/cockroachdb/bin diff --git a/test-utils/src/certificates.rs b/test-utils/src/certificates.rs index 54da013e0c..aac50a2ca8 100644 --- a/test-utils/src/certificates.rs +++ b/test-utils/src/certificates.rs @@ -5,14 +5,14 @@ //! Utilities for tests that need certificates. // Utility structure for making a test certificate -pub struct CertificateChain { - root_cert: rustls::Certificate, - intermediate_cert: rustls::Certificate, - end_cert: rustls::Certificate, +pub struct CertificateChain<'a> { + root_cert: rustls::pki_types::CertificateDer<'a>, + intermediate_cert: rustls::pki_types::CertificateDer<'a>, + end_cert: rustls::pki_types::CertificateDer<'a>, end_keypair: rcgen::Certificate, } -impl CertificateChain { +impl<'a> CertificateChain<'a> { pub fn new>(subject_alt_name: S) -> Self { let params = rcgen::CertificateParams::new(vec![subject_alt_name.into()]); @@ -36,17 +36,17 @@ impl CertificateChain { let end_keypair = rcgen::Certificate::from_params(params) .expect("failed to generate end-entity keys"); - let root_cert = rustls::Certificate( + let root_cert = rustls::pki_types::CertificateDer::from( root_keypair .serialize_der() .expect("failed to serialize root cert"), ); - let intermediate_cert = rustls::Certificate( + let intermediate_cert = rustls::pki_types::CertificateDer::from( intermediate_keypair .serialize_der_with_signer(&root_keypair) .expect("failed to serialize intermediate cert"), ); - let end_cert = rustls::Certificate( + let end_cert = rustls::pki_types::CertificateDer::from( end_keypair .serialize_der_with_signer(&intermediate_keypair) .expect("failed to serialize end-entity cert"), @@ -63,7 +63,7 @@ impl CertificateChain { self.end_keypair.serialize_private_key_pem() } - fn cert_chain(&self) -> Vec { + fn cert_chain(&self) -> Vec> { vec![ self.end_cert.clone(), self.intermediate_cert.clone(), @@ -76,12 +76,12 @@ impl CertificateChain { } } -fn tls_cert_to_pem(certs: &Vec) -> String { +fn tls_cert_to_pem(certs: &Vec) -> String { let mut serialized_certs = String::new(); for cert in certs { let encoded_cert = pem::encode(&pem::Pem::new( "CERTIFICATE".to_string(), - cert.0.clone(), + cert.to_vec(), )); serialized_certs.push_str(&encoded_cert); diff --git a/tools/hubris_checksums b/tools/hubris_checksums index 707c67fe0c..478d8f192e 100644 --- a/tools/hubris_checksums +++ b/tools/hubris_checksums @@ -1,7 +1,7 @@ -09f0342eed777495ac0a852f219d2dec45fdc1b860f938f95736851b1627cad7 build-gimlet-c-image-default-v1.0.4.zip -aef9279ba6d1d0ffa64586d71cdf5933eddbe048ce1a10f5f611128a84b53642 build-gimlet-d-image-default-v1.0.4.zip -989f89f0060239b77d92fe068ceae1be406591c997224256c617d77b2ccbf1b0 build-gimlet-e-image-default-v1.0.4.zip -8e41a139bc62ff86b8343989889491739bb90eb46e1a02585252adf3ee540db9 build-psc-b-image-default-v1.0.4.zip -76e35e71714921a1ca5f7f8314fc596e3b5fe1dfd422c59fdc9a62c1ebfeec0e build-psc-c-image-default-v1.0.4.zip -a406045b1d545fd063bb989c84a774e4d09a445618d4a8889ce232a3b45884a7 build-sidecar-b-image-default-v1.0.4.zip -69ba3ac372388058f8a6e58230e7e2964990609f18c0960357d17bfc16f25bae build-sidecar-c-image-default-v1.0.4.zip +6567a0775d5f0b7ff09d97f149532a627222971eadd89ea0dac186c9a825846d build-gimlet-c-image-default-v1.0.5.zip +1190b27246d8c8c20837d957266ac9e90e32934841b9acc2990d2762a3b53a16 build-gimlet-d-image-default-v1.0.5.zip +79e644ffbbd7195ff2699c90ee26f277edac40b385fc5bb8e7821a4611ad7c11 build-gimlet-e-image-default-v1.0.5.zip +bf83e0311e18fc716dd5a315106aa965d278c4f481892fe124bc376b2e23581e build-psc-b-image-default-v1.0.5.zip +0dd1de9c3d3c686e8a05525fbed48c6532b608b34c77214b7fe15a8f54b0f3cb build-psc-c-image-default-v1.0.5.zip +c024d5546288d0d953735b3a0221ee0e218cc27ed1e26eede5c91c9a8137c592 build-sidecar-b-image-default-v1.0.5.zip +de79320022718be94c81dc7d44b5229ce0956aff9c1ffa11e8c3ff8961af49bb build-sidecar-c-image-default-v1.0.5.zip diff --git a/tools/hubris_version b/tools/hubris_version index 0cce8d745a..37e565d060 100644 --- a/tools/hubris_version +++ b/tools/hubris_version @@ -1 +1 @@ -TAGS=(gimlet-v1.0.4 psc-v1.0.4 sidecar-v1.0.4) +TAGS=(gimlet-v1.0.5 psc-v1.0.5 sidecar-v1.0.5) diff --git a/update-common/src/artifacts/artifacts_with_plan.rs b/update-common/src/artifacts/artifacts_with_plan.rs index 94c7294d48..9b579af29a 100644 --- a/update-common/src/artifacts/artifacts_with_plan.rs +++ b/update-common/src/artifacts/artifacts_with_plan.rs @@ -6,6 +6,7 @@ use super::ExtractedArtifactDataHandle; use super::UpdatePlan; use super::UpdatePlanBuilder; use crate::errors::RepositoryError; +use anyhow::anyhow; use camino_tempfile::Utf8TempDir; use debug_ignore::DebugIgnore; use omicron_common::update::ArtifactHash; @@ -55,10 +56,29 @@ impl ArtifactsWithPlan { log: &Logger, ) -> Result where - T: io::Read + io::Seek, + T: io::Read + io::Seek + Send + 'static, { // Create a temporary directory to hold the extracted TUF repository. - let dir = unzip_into_tempdir(zip_data, log)?; + let dir = { + let log = log.clone(); + tokio::task::spawn_blocking(move || { + // This is an expensive synchronous method, so run it on the + // blocking thread pool. + // + // TODO: at the moment we don't restrict the size of the + // extracted contents or its memory usage, making it + // susceptible to zip bombs and other related attacks. + // https://github.com/zip-rs/zip/issues/228. We need to think + // about this at some point. + unzip_into_tempdir(zip_data, &log) + }) + .await + .map_err(|join_error| { + RepositoryError::Extract( + anyhow!(join_error).context("unzip_into_tempdir panicked"), + ) + })?? + }; // Time is unavailable during initial setup, so ignore expiration. Even // if time were available, we might want to be able to load older diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index fad2603240..c34ba58372 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -226,67 +226,59 @@ zip = { version = "0.6.6", default-features = false, features = ["bzip2", "defla [target.x86_64-unknown-linux-gnu.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.19.0", features = ["unstable"] } -rustix = { version = "0.38.25", features = ["fs", "termios"] } +rustix = { version = "0.38.30", features = ["fs", "termios"] } [target.x86_64-unknown-linux-gnu.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.19.0", features = ["unstable"] } -rustix = { version = "0.38.25", features = ["fs", "termios"] } +rustix = { version = "0.38.30", features = ["fs", "termios"] } [target.x86_64-apple-darwin.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -errno = { version = "0.3.2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24.2" } +errno = { version = "0.3.8", default-features = false, features = ["std"] } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.19.0", features = ["unstable"] } -rustix = { version = "0.38.25", features = ["fs", "termios"] } +rustix = { version = "0.38.30", features = ["fs", "termios"] } [target.x86_64-apple-darwin.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -errno = { version = "0.3.2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24.2" } +errno = { version = "0.3.8", default-features = false, features = ["std"] } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.19.0", features = ["unstable"] } -rustix = { version = "0.38.25", features = ["fs", "termios"] } +rustix = { version = "0.38.30", features = ["fs", "termios"] } [target.aarch64-apple-darwin.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -errno = { version = "0.3.2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24.2" } +errno = { version = "0.3.8", default-features = false, features = ["std"] } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.19.0", features = ["unstable"] } -rustix = { version = "0.38.25", features = ["fs", "termios"] } +rustix = { version = "0.38.30", features = ["fs", "termios"] } [target.aarch64-apple-darwin.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -errno = { version = "0.3.2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24.2" } +errno = { version = "0.3.8", default-features = false, features = ["std"] } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.19.0", features = ["unstable"] } -rustix = { version = "0.38.25", features = ["fs", "termios"] } +rustix = { version = "0.38.30", features = ["fs", "termios"] } [target.x86_64-unknown-illumos.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -errno = { version = "0.3.2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24.2" } +errno = { version = "0.3.8", default-features = false, features = ["std"] } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.19.0", features = ["unstable"] } -rustix = { version = "0.38.25", features = ["fs", "termios"] } +rustix = { version = "0.38.30", features = ["fs", "termios"] } toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] } [target.x86_64-unknown-illumos.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } -errno = { version = "0.3.2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24.2" } +errno = { version = "0.3.8", default-features = false, features = ["std"] } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.19.0", features = ["unstable"] } -rustix = { version = "0.38.25", features = ["fs", "termios"] } +rustix = { version = "0.38.30", features = ["fs", "termios"] } toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] }