diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index 6e8b323e1f..2cdfa158ad 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@4820827bd312afaf667a328f1d0fe0fb4f6751b1 # v2 + uses: taiki-e/install-action@a94d7ba8955e0861119ed8d3fddb8823ef7a97a8 # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date diff --git a/Cargo.lock b/Cargo.lock index 8b72b1e179..9eeb22632e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -101,15 +101,6 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - [[package]] name = "anstream" version = "0.6.11" @@ -482,9 +473,9 @@ dependencies = [ [[package]] name = "bhyve_api" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361#8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361" +source = "git+https://github.com/oxidecomputer/propolis?rev=dd788a311a382b09ce1d3e35f7777b378e09fdf7#dd788a311a382b09ce1d3e35f7777b378e09fdf7" dependencies = [ - "bhyve_api_sys 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361)", + "bhyve_api_sys 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=dd788a311a382b09ce1d3e35f7777b378e09fdf7)", "libc", "strum", ] @@ -501,7 +492,7 @@ dependencies = [ [[package]] name = "bhyve_api_sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361#8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361" +source = "git+https://github.com/oxidecomputer/propolis?rev=dd788a311a382b09ce1d3e35f7777b378e09fdf7#dd788a311a382b09ce1d3e35f7777b378e09fdf7" dependencies = [ "libc", "strum", @@ -1025,21 +1016,6 @@ dependencies = [ "libloading", ] -[[package]] -name = "clap" -version = "2.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" -dependencies = [ - "ansi_term", - "atty", - "bitflags 1.3.2", - "strsim 0.8.0", - "textwrap 0.11.0", - "unicode-width", - "vec_map", -] - [[package]] name = "clap" version = "4.5.4" @@ -1314,7 +1290,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.4", + "clap", "criterion-plot", "futures", "is-terminal", @@ -1415,7 +1391,7 @@ dependencies = [ [[package]] name = "crucible-agent-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=5677c7be81b60d9ba9c30991d10376f279a1d3b7#5677c7be81b60d9ba9c30991d10376f279a1d3b7" +source = "git+https://github.com/oxidecomputer/crucible?rev=1ef72f3c935e7dc936bf43310c04668fb60d7a20#1ef72f3c935e7dc936bf43310c04668fb60d7a20" dependencies = [ "anyhow", "chrono", @@ -1431,7 +1407,7 @@ dependencies = [ [[package]] name = "crucible-pantry-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=5677c7be81b60d9ba9c30991d10376f279a1d3b7#5677c7be81b60d9ba9c30991d10376f279a1d3b7" +source = "git+https://github.com/oxidecomputer/crucible?rev=1ef72f3c935e7dc936bf43310c04668fb60d7a20#1ef72f3c935e7dc936bf43310c04668fb60d7a20" dependencies = [ "anyhow", "chrono", @@ -1448,7 +1424,7 @@ dependencies = [ [[package]] name = "crucible-smf" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/crucible?rev=5677c7be81b60d9ba9c30991d10376f279a1d3b7#5677c7be81b60d9ba9c30991d10376f279a1d3b7" +source = "git+https://github.com/oxidecomputer/crucible?rev=1ef72f3c935e7dc936bf43310c04668fb60d7a20#1ef72f3c935e7dc936bf43310c04668fb60d7a20" dependencies = [ "crucible-workspace-hack", "libc", @@ -1805,9 +1781,9 @@ checksum = "a7993efb860416547839c115490d4951c6d0f8ec04a3594d9dd99d50ed7ec170" [[package]] name = "diesel" -version = "2.1.5" +version = "2.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03fc05c17098f21b89bc7d98fe1dd3cce2c11c2ad8e145f2a44fe08ed28eb559" +checksum = "ff236accb9a5069572099f0b350a92e9560e8e63a9b8d546162f4a5e03026bb2" dependencies = [ "bitflags 2.4.2", "byteorder", @@ -1957,7 +1933,7 @@ dependencies = [ "camino", "camino-tempfile", "chrono", - "clap 4.5.4", + "clap", "dns-service-client", "dropshot", "expectorate", @@ -2089,7 +2065,7 @@ dependencies = [ "paste", "percent-encoding", "proc-macro2", - "rustls 0.22.2", + "rustls 0.22.4", "rustls-pemfile 2.1.2", "schemars", "serde", @@ -2264,7 +2240,7 @@ dependencies = [ "async-trait", "base64 0.22.0", "chrono", - "clap 4.5.4", + "clap", "colored", "dhcproto", "http 0.2.12", @@ -2686,7 +2662,7 @@ name = "gateway-cli" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.4", + "clap", "futures", "gateway-client", "gateway-messages", @@ -3358,7 +3334,7 @@ dependencies = [ "hyper 1.1.0", "hyper-util", "log", - "rustls 0.22.2", + "rustls 0.22.4", "rustls-native-certs", "rustls-pki-types", "tokio", @@ -3499,7 +3475,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "bhyve_api 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361)", + "bhyve_api 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=dd788a311a382b09ce1d3e35f7777b378e09fdf7)", "byteorder", "camino", "camino-tempfile", @@ -3609,7 +3585,7 @@ dependencies = [ "bytes", "camino", "cancel-safe-futures", - "clap 4.5.4", + "clap", "display-error-chain", "futures", "hex", @@ -3670,7 +3646,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "clap 4.5.4", + "clap", "dropshot", "expectorate", "hyper 0.14.28", @@ -3752,7 +3728,7 @@ name = "internal-dns-cli" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.4", + "clap", "dropshot", "internal-dns", "omicron-common", @@ -3993,7 +3969,7 @@ dependencies = [ "anstyle", "anyhow", "camino", - "clap 4.5.4", + "clap", "colored", "futures", "libc", @@ -4103,7 +4079,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fefdf21230d6143476a28adbee3d930e2b68a3d56443c777cae3fe9340eebff9" dependencies = [ - "clap 4.5.4", + "clap", "escape8259", "termcolor", "threadpool", @@ -4160,7 +4136,7 @@ version = "0.2.4" source = "git+https://github.com/oxidecomputer/lpc55_support#96f064eaae5e95930efaab6c29fd1b2e22225dac" dependencies = [ "bitfield", - "clap 4.5.4", + "clap", "packed_struct", "serde", ] @@ -4554,6 +4530,8 @@ dependencies = [ "serde", "serde_json", "sled-agent-client", + "slog", + "slog-error-chain", "steno", "strum", "thiserror", @@ -4622,7 +4600,7 @@ dependencies = [ "rcgen", "ref-cast", "regex", - "rustls 0.22.2", + "rustls 0.22.4", "samael", "schemars", "semver 1.0.22", @@ -4781,6 +4759,7 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", + "debug-ignore", "expectorate", "gateway-client", "illumos-utils", @@ -4788,6 +4767,7 @@ dependencies = [ "internal-dns", "ipnet", "ipnetwork", + "maplit", "nexus-config", "nexus-inventory", "nexus-types", @@ -4795,9 +4775,11 @@ dependencies = [ "omicron-test-utils", "omicron-uuid-kinds", "omicron-workspace-hack", + "proptest", "rand 0.8.5", "sled-agent-client", "slog", + "test-strategy", "thiserror", "typed-rng", "uuid 1.8.0", @@ -5281,7 +5263,7 @@ dependencies = [ "anyhow", "camino", "camino-tempfile", - "clap 4.5.4", + "clap", "dropshot", "expectorate", "futures", @@ -5315,7 +5297,7 @@ dependencies = [ "anyhow", "base64 0.22.0", "camino", - "clap 4.5.4", + "clap", "dropshot", "expectorate", "futures", @@ -5366,7 +5348,7 @@ dependencies = [ "camino-tempfile", "cancel-safe-futures", "chrono", - "clap 4.5.4", + "clap", "criterion", "crucible-agent-client", "crucible-pantry-client", @@ -5434,14 +5416,14 @@ dependencies = [ "pq-sys", "pretty_assertions", "progenitor-client", - "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361)", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=dd788a311a382b09ce1d3e35f7777b378e09fdf7)", "rand 0.8.5", "rcgen", "ref-cast", "regex", "reqwest", "ring 0.17.8", - "rustls 0.22.2", + "rustls 0.22.4", "rustls-pemfile 2.1.2", "samael", "schemars", @@ -5483,7 +5465,7 @@ dependencies = [ "camino", "camino-tempfile", "chrono", - "clap 4.5.4", + "clap", "crossterm", "crucible-agent-client", "csv", @@ -5525,7 +5507,7 @@ dependencies = [ "strum", "subprocess", "tabled", - "textwrap 0.16.1", + "textwrap", "tokio", "unicode-width", "uuid 1.8.0", @@ -5537,7 +5519,7 @@ version = "0.1.0" dependencies = [ "anyhow", "camino", - "clap 4.5.4", + "clap", "expectorate", "futures", "hex", @@ -5604,7 +5586,7 @@ dependencies = [ "cancel-safe-futures", "cfg-if", "chrono", - "clap 4.5.4", + "clap", "crucible-agent-client", "derive_more", "display-error-chain", @@ -5648,7 +5630,7 @@ dependencies = [ "oximeter-instruments", "oximeter-producer", "pretty_assertions", - "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361)", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=dd788a311a382b09ce1d3e35f7777b378e09fdf7)", "propolis-mock-server", "rand 0.8.5", "rcgen", @@ -5711,7 +5693,7 @@ dependencies = [ "regex", "reqwest", "ring 0.17.8", - "rustls 0.22.2", + "rustls 0.22.4", "slog", "subprocess", "tar", @@ -5752,7 +5734,7 @@ dependencies = [ "bytes", "chrono", "cipher", - "clap 4.5.4", + "clap", "clap_builder", "console", "const-oid", @@ -6131,7 +6113,7 @@ dependencies = [ "anyhow", "camino", "chrono", - "clap 4.5.4", + "clap", "dropshot", "expectorate", "futures", @@ -6175,7 +6157,7 @@ dependencies = [ "bytes", "camino", "chrono", - "clap 4.5.4", + "clap", "crossterm", "dropshot", "expectorate", @@ -6248,7 +6230,7 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", - "clap 4.5.4", + "clap", "dropshot", "nexus-client", "omicron-common", @@ -6270,7 +6252,7 @@ dependencies = [ "anyhow", "camino", "chrono", - "clap 4.5.4", + "clap", "omicron-workspace-hack", "sigpipe", "uuid 1.8.0", @@ -7092,7 +7074,7 @@ dependencies = [ [[package]] name = "propolis-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361#8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361" +source = "git+https://github.com/oxidecomputer/propolis?rev=dd788a311a382b09ce1d3e35f7777b378e09fdf7#dd788a311a382b09ce1d3e35f7777b378e09fdf7" dependencies = [ "async-trait", "base64 0.21.7", @@ -7113,17 +7095,17 @@ dependencies = [ [[package]] name = "propolis-mock-server" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361#8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361" +source = "git+https://github.com/oxidecomputer/propolis?rev=dd788a311a382b09ce1d3e35f7777b378e09fdf7#dd788a311a382b09ce1d3e35f7777b378e09fdf7" dependencies = [ "anyhow", "atty", "base64 0.21.7", - "clap 4.5.4", + "clap", "dropshot", "futures", "hyper 0.14.28", "progenitor", - "propolis_types 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361)", + "propolis_types 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=dd788a311a382b09ce1d3e35f7777b378e09fdf7)", "rand 0.8.5", "reqwest", "schemars", @@ -7164,7 +7146,7 @@ dependencies = [ [[package]] name = "propolis_types" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361#8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361" +source = "git+https://github.com/oxidecomputer/propolis?rev=dd788a311a382b09ce1d3e35f7777b378e09fdf7#dd788a311a382b09ce1d3e35f7777b378e09fdf7" dependencies = [ "schemars", "serde", @@ -7407,7 +7389,7 @@ dependencies = [ "assert_matches", "camino", "camino-tempfile", - "clap 4.5.4", + "clap", "dns-service-client", "dropshot", "expectorate", @@ -7950,9 +7932,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.22.2" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" dependencies = [ "log", "ring 0.17.8", @@ -8968,7 +8950,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "clap 4.5.4", + "clap", "dropshot", "futures", "gateway-messages", @@ -9146,12 +9128,6 @@ dependencies = [ "vte", ] -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - [[package]] name = "strsim" version = "0.10.0" @@ -9210,30 +9186,6 @@ dependencies = [ "syn 2.0.59", ] -[[package]] -name = "structopt" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10" -dependencies = [ - "clap 2.34.0", - "lazy_static", - "structopt-derive", -] - -[[package]] -name = "structopt-derive" -version = "0.4.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" -dependencies = [ - "heck 0.3.3", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "strum" version = "0.26.2" @@ -9517,15 +9469,6 @@ dependencies = [ "syn 2.0.59", ] -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - [[package]] name = "textwrap" version = "0.16.1" @@ -9708,13 +9651,11 @@ dependencies = [ [[package]] name = "tofino" version = "0.1.0" -source = "git+http://github.com/oxidecomputer/tofino?branch=main#8283f8021068f055484b653f0cc6b4d5c0979dc1" +source = "git+http://github.com/oxidecomputer/tofino?branch=main#1b66b89c3727d2191082df057b068ec52560e334" dependencies = [ "anyhow", "cc", - "chrono", "illumos-devinfo", - "structopt", ] [[package]] @@ -9799,7 +9740,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" dependencies = [ - "rustls 0.22.2", + "rustls 0.22.4", "rustls-pki-types", "tokio", ] @@ -10150,7 +10091,7 @@ dependencies = [ "assert_cmd", "camino", "chrono", - "clap 4.5.4", + "clap", "console", "datatest-stable", "fs-err", @@ -10427,7 +10368,7 @@ dependencies = [ "camino", "camino-tempfile", "chrono", - "clap 4.5.4", + "clap", "debug-ignore", "display-error-chain", "dropshot", @@ -10458,7 +10399,7 @@ dependencies = [ "camino", "camino-tempfile", "cancel-safe-futures", - "clap 4.5.4", + "clap", "debug-ignore", "derive-where", "either", @@ -10654,12 +10595,6 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" - [[package]] name = "version_check" version = "0.9.4" @@ -10902,7 +10837,7 @@ dependencies = [ "buf-list", "camino", "ciborium", - "clap 4.5.4", + "clap", "crossterm", "futures", "humantime", @@ -10927,7 +10862,7 @@ dependencies = [ "slog-term", "supports-color", "tempfile", - "textwrap 0.16.1", + "textwrap", "tokio", "tokio-util", "toml 0.8.12", @@ -10963,7 +10898,7 @@ dependencies = [ "bytes", "camino", "ciborium", - "clap 4.5.4", + "clap", "crossterm", "omicron-workspace-hack", "reedline", @@ -10988,7 +10923,7 @@ dependencies = [ "bytes", "camino", "camino-tempfile", - "clap 4.5.4", + "clap", "debug-ignore", "display-error-chain", "dpd-client", @@ -11314,7 +11249,7 @@ dependencies = [ "camino", "cargo_metadata", "cargo_toml", - "clap 4.5.4", + "clap", "fs-err", "macaddr", "serde", @@ -11462,7 +11397,7 @@ name = "zone-network-setup" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.4", + "clap", "dropshot", "illumos-utils", "omicron-common", diff --git a/Cargo.toml b/Cargo.toml index a22d0a0827..fa1f548b56 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -197,9 +197,9 @@ cookie = "0.18" criterion = { version = "0.5.1", features = [ "async_tokio" ] } crossbeam = "0.8" crossterm = { version = "0.27.0", features = ["event-stream"] } -crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "5677c7be81b60d9ba9c30991d10376f279a1d3b7" } -crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "5677c7be81b60d9ba9c30991d10376f279a1d3b7" } -crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "5677c7be81b60d9ba9c30991d10376f279a1d3b7" } +crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "1ef72f3c935e7dc936bf43310c04668fb60d7a20" } +crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "1ef72f3c935e7dc936bf43310c04668fb60d7a20" } +crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "1ef72f3c935e7dc936bf43310c04668fb60d7a20" } csv = "1.3.0" curve25519-dalek = "4" datatest-stable = "0.2.6" @@ -209,7 +209,7 @@ db-macros = { path = "nexus/db-macros" } debug-ignore = "1.0.5" derive_more = "0.99.17" derive-where = "1.2.7" -diesel = { version = "2.1.5", features = ["postgres", "r2d2", "chrono", "serde_json", "network-address", "uuid"] } +diesel = { version = "2.1.6", features = ["postgres", "r2d2", "chrono", "serde_json", "network-address", "uuid"] } diesel-dtrace = { git = "https://github.com/oxidecomputer/diesel-dtrace", branch = "main" } dns-server = { path = "dns-server" } dns-service-client = { path = "clients/dns-service-client" } @@ -339,9 +339,9 @@ prettyplease = { version = "0.2.19", features = ["verbatim"] } proc-macro2 = "1.0" progenitor = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } progenitor-client = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } -bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361" } -propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361" } -propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361" } +bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "dd788a311a382b09ce1d3e35f7777b378e09fdf7" } +propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "dd788a311a382b09ce1d3e35f7777b378e09fdf7" } +propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "dd788a311a382b09ce1d3e35f7777b378e09fdf7" } proptest = "1.4.0" quote = "1.0" rand = "0.8.5" diff --git a/clients/nexus-client/src/lib.rs b/clients/nexus-client/src/lib.rs index cd04b8233f..685c83f80c 100644 --- a/clients/nexus-client/src/lib.rs +++ b/clients/nexus-client/src/lib.rs @@ -290,7 +290,8 @@ impl From<&omicron_common::api::internal::shared::SourceNatConfig> fn from( r: &omicron_common::api::internal::shared::SourceNatConfig, ) -> Self { - Self { ip: r.ip, first_port: r.first_port, last_port: r.last_port } + let (first_port, last_port) = r.port_range_raw(); + Self { ip: r.ip, first_port, last_port } } } diff --git a/common/src/api/internal/shared.rs b/common/src/api/internal/shared.rs index c123e1f9c8..bc7a2d76ba 100644 --- a/common/src/api/internal/shared.rs +++ b/common/src/api/internal/shared.rs @@ -4,7 +4,10 @@ //! Types shared between Nexus and Sled Agent. -use crate::api::external::{self, BfdMode, Name}; +use crate::{ + address::NUM_SOURCE_NAT_PORTS, + api::external::{self, BfdMode, Name}, +}; use ipnetwork::{IpNetwork, Ipv4Network, Ipv6Network}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -58,16 +61,95 @@ pub struct NetworkInterface { /// An IP address and port range used for source NAT, i.e., making /// outbound network connections from guests or services. -#[derive( - Debug, Clone, Copy, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, -)] +// Note that `Deserialize` is manually implemented; if you make any changes to +// the fields of this structure, you must make them to that implementation too. +#[derive(Debug, Clone, Copy, Serialize, JsonSchema, PartialEq, Eq, Hash)] pub struct SourceNatConfig { /// The external address provided to the instance or service. pub ip: IpAddr, /// The first port used for source NAT, inclusive. - pub first_port: u16, + first_port: u16, /// The last port used for source NAT, also inclusive. - pub last_port: u16, + last_port: u16, +} + +// We implement `Deserialize` manually to add validity checking on the port +// range. +impl<'de> Deserialize<'de> for SourceNatConfig { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + use serde::de::Error; + + // The fields of `SourceNatConfigShadow` should exactly match the fields + // of `SourceNatConfig`. We're not really using serde's remote derive, + // but by adding the attribute we get compile-time checking that all the + // field names and types match. (It doesn't check the _order_, but that + // should be fine as long as we're using JSON or similar formats.) + #[derive(Deserialize)] + #[serde(remote = "SourceNatConfig")] + struct SourceNatConfigShadow { + ip: IpAddr, + first_port: u16, + last_port: u16, + } + + let shadow = SourceNatConfigShadow::deserialize(deserializer)?; + SourceNatConfig::new(shadow.ip, shadow.first_port, shadow.last_port) + .map_err(D::Error::custom) + } +} + +impl SourceNatConfig { + /// Construct a `SourceNatConfig` with the given port range, both inclusive. + /// + /// # Errors + /// + /// Fails if `(first_port, last_port)` is not aligned to + /// [`NUM_SOURCE_NAT_PORTS`]. + pub fn new( + ip: IpAddr, + first_port: u16, + last_port: u16, + ) -> Result { + if first_port % NUM_SOURCE_NAT_PORTS == 0 + && last_port + .checked_sub(first_port) + .and_then(|diff| diff.checked_add(1)) + == Some(NUM_SOURCE_NAT_PORTS) + { + Ok(Self { ip, first_port, last_port }) + } else { + Err(SourceNatConfigError::UnalignedPortPair { + first_port, + last_port, + }) + } + } + + /// Get the port range. + /// + /// Guaranteed to be aligned to [`NUM_SOURCE_NAT_PORTS`]. + pub fn port_range(&self) -> std::ops::RangeInclusive { + self.first_port..=self.last_port + } + + /// Get the port range as a raw tuple; both values are inclusive. + /// + /// Guaranteed to be aligned to [`NUM_SOURCE_NAT_PORTS`]. + pub fn port_range_raw(&self) -> (u16, u16) { + self.port_range().into_inner() + } +} + +#[derive(Debug, thiserror::Error)] +pub enum SourceNatConfigError { + #[error( + "snat port range is not aligned to {NUM_SOURCE_NAT_PORTS}: \ + ({first_port}, {last_port})" + )] + UnalignedPortPair { first_port: u16, last_port: u16 }, } // We alias [`RackNetworkConfig`] to the current version of the protocol, so diff --git a/dev-tools/omdb/src/bin/omdb/nexus.rs b/dev-tools/omdb/src/bin/omdb/nexus.rs index a7fcc6badc..67b91e0280 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus.rs @@ -97,8 +97,6 @@ enum BlueprintsCommands { Delete(BlueprintIdArgs), /// Interact with the current target blueprint Target(BlueprintsTargetArgs), - /// Generate an initial blueprint from a specific inventory collection - GenerateFromCollection(CollectionIdArgs), /// Generate a new blueprint Regenerate, /// Import a blueprint @@ -361,15 +359,6 @@ impl NexusArgs { let token = omdb.check_allow_destructive()?; cmd_nexus_blueprints_regenerate(&client, token).await } - NexusCommands::Blueprints(BlueprintsArgs { - command: BlueprintsCommands::GenerateFromCollection(args), - }) => { - let token = omdb.check_allow_destructive()?; - cmd_nexus_blueprints_generate_from_collection( - &client, args, token, - ) - .await - } NexusCommands::Blueprints(BlueprintsArgs { command: BlueprintsCommands::Import(args), }) => { @@ -1134,26 +1123,6 @@ async fn cmd_nexus_blueprints_target_set_enabled( Ok(()) } -async fn cmd_nexus_blueprints_generate_from_collection( - client: &nexus_client::Client, - args: &CollectionIdArgs, - _destruction_token: DestructiveOperationToken, -) -> Result<(), anyhow::Error> { - let blueprint = client - .blueprint_generate_from_collection( - &nexus_client::types::CollectionId { - collection_id: args.collection_id, - }, - ) - .await - .context("creating blueprint from collection id")?; - eprintln!( - "created blueprint {} from collection id {}", - blueprint.id, args.collection_id - ); - Ok(()) -} - async fn cmd_nexus_blueprints_regenerate( client: &nexus_client::Client, _destruction_token: DestructiveOperationToken, diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index f09a2715a9..17668d002f 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -457,10 +457,10 @@ parent: zone type zone ID disposition underlay IP ----------------------------------------------------------------------------------------- - sled .....................: zones at generation 2 + sled .....................: blueprint zones at generation 2 (no zones) - sled .....................: zones at generation 2 + sled .....................: blueprint zones at generation 2 clickhouse ..................... in service ::1 cockroach_db ..................... in service ::1 crucible_pantry ..................... in service ::1 @@ -490,10 +490,10 @@ parent: zone type zone ID disposition underlay IP ----------------------------------------------------------------------------------------- - sled .....................: zones at generation 2 + sled .....................: blueprint zones at generation 2 (no zones) - sled .....................: zones at generation 2 + sled .....................: blueprint zones at generation 2 clickhouse ..................... in service ::1 cockroach_db ..................... in service ::1 crucible_pantry ..................... in service ::1 @@ -525,7 +525,7 @@ to: blueprint ............. UNCHANGED SLEDS: - sled .....................: zones at generation 2 + sled .....................: blueprint zones at generation 2 clickhouse ..................... in service ::1 cockroach_db ..................... in service ::1 crucible_pantry ..................... in service ::1 diff --git a/dev-tools/reconfigurator-cli/src/main.rs b/dev-tools/reconfigurator-cli/src/main.rs index abf8cf4441..ae4a6bd648 100644 --- a/dev-tools/reconfigurator-cli/src/main.rs +++ b/dev-tools/reconfigurator-cli/src/main.rs @@ -22,6 +22,7 @@ use nexus_reconfigurator_planning::system::{ }; use nexus_types::deployment::BlueprintZoneFilter; use nexus_types::deployment::OmicronZoneExternalIp; +use nexus_types::deployment::OmicronZoneExternalIpKind; use nexus_types::deployment::OmicronZoneNic; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledFilter; @@ -158,7 +159,9 @@ impl ReconfiguratorSim { .borrow_mut() .entry(ip) .or_insert_with(ExternalIpUuid::new_v4), - ip, + // TODO-cleanup This is potentially wrong; + // zone_type should tell us the IP kind. + kind: OmicronZoneExternalIpKind::Floating(ip), }; builder .add_omicron_zone_external_ip(zone.id, external_ip) @@ -168,7 +171,7 @@ impl ReconfiguratorSim { let nic = OmicronZoneNic { id: nic.id, mac: nic.mac, - ip: nic.ip.into(), + ip: nic.ip, slot: nic.slot, primary: nic.primary, }; @@ -311,9 +314,6 @@ fn process_entry(sim: &mut ReconfiguratorSim, entry: String) -> LoopResult { Commands::InventoryList => cmd_inventory_list(sim), Commands::InventoryGenerate => cmd_inventory_generate(sim), Commands::BlueprintList => cmd_blueprint_list(sim), - Commands::BlueprintFromInventory(args) => { - cmd_blueprint_from_inventory(sim, args) - } Commands::BlueprintEdit(args) => cmd_blueprint_edit(sim, args), Commands::BlueprintPlan(args) => cmd_blueprint_plan(sim, args), Commands::BlueprintShow(args) => cmd_blueprint_show(sim, args), @@ -371,8 +371,6 @@ enum Commands { /// list all blueprints BlueprintList, - /// generate a blueprint that represents the contents of an inventory - BlueprintFromInventory(InventoryArgs), /// run planner to generate a new blueprint BlueprintPlan(BlueprintPlanArgs), /// edit contents of a blueprint directly @@ -715,38 +713,6 @@ fn cmd_blueprint_list( Ok(Some(table)) } -fn cmd_blueprint_from_inventory( - sim: &mut ReconfiguratorSim, - args: InventoryArgs, -) -> anyhow::Result> { - let collection_id = args.collection_id; - let collection = sim - .collections - .get(&collection_id) - .ok_or_else(|| anyhow!("no such collection: {}", collection_id))?; - let dns_version = Generation::new(); - let planning_input = sim - .system - .to_planning_input_builder() - .context("generating planning_input builder")? - .build(); - let creator = "reconfigurator-sim"; - let blueprint = BlueprintBuilder::build_initial_from_collection( - collection, - dns_version, - dns_version, - planning_input.all_sled_ids(SledFilter::All), - creator, - ) - .context("building collection")?; - let rv = format!( - "generated blueprint {} from inventory collection {}", - blueprint.id, collection_id - ); - sim.blueprint_insert_new(blueprint); - Ok(Some(rv)) -} - fn cmd_blueprint_plan( sim: &mut ReconfiguratorSim, args: BlueprintPlanArgs, diff --git a/dev-tools/xtask/src/virtual_hardware.rs b/dev-tools/xtask/src/virtual_hardware.rs index c98d350c73..95190ebfde 100644 --- a/dev-tools/xtask/src/virtual_hardware.rs +++ b/dev-tools/xtask/src/virtual_hardware.rs @@ -210,7 +210,7 @@ fn demount_backingfs() -> Result<()> { const BACKED_SERVICES: &str = "svc:/system/fmd:default"; println!("Disabling {BACKED_SERVICES}"); svcadm_temporary_toggle(BACKED_SERVICES, false)?; - for dataset in zfs_list_internal("yes", "noauto")? { + for dataset in zfs_list_internal("noauto", "yes")? { println!("unmounting: {dataset}"); zfs_umount(&dataset)?; } diff --git a/illumos-utils/src/opte/port_manager.rs b/illumos-utils/src/opte/port_manager.rs index 2b2f622070..03c51c321d 100644 --- a/illumos-utils/src/opte/port_manager.rs +++ b/illumos-utils/src/opte/port_manager.rs @@ -141,7 +141,7 @@ impl PortManager { ); return Err(Error::InvalidPortIpConfig); }; - let ports = snat.first_port..=snat.last_port; + let ports = snat.port_range(); Some($snat_t { external_ip: snat_ip.into(), ports }) } None => None, @@ -428,7 +428,7 @@ impl PortManager { ); return Err(Error::InvalidPortIpConfig); }; - let ports = snat.first_port..=snat.last_port; + let ports = snat.port_range(); Some($snat_t { external_ip: snat_ip.into(), ports }) } None => None, diff --git a/nexus/db-model/Cargo.toml b/nexus/db-model/Cargo.toml index 45a086a5b3..bfe75377c5 100644 --- a/nexus/db-model/Cargo.toml +++ b/nexus/db-model/Cargo.toml @@ -28,6 +28,8 @@ schemars = { workspace = true, features = ["chrono", "uuid1"] } semver.workspace = true serde.workspace = true serde_json.workspace = true +slog.workspace = true +slog-error-chain.workspace = true steno.workspace = true strum.workspace = true thiserror.workspace = true diff --git a/nexus/db-model/src/external_ip.rs b/nexus/db-model/src/external_ip.rs index 93af08fdee..5031b12546 100644 --- a/nexus/db-model/src/external_ip.rs +++ b/nexus/db-model/src/external_ip.rs @@ -9,6 +9,7 @@ use crate::impl_enum_type; use crate::schema::external_ip; use crate::schema::floating_ip; use crate::Name; +use crate::ServiceNetworkInterface; use crate::SqlU16; use chrono::DateTime; use chrono::Utc; @@ -16,17 +17,24 @@ use db_macros::Resource; use diesel::Queryable; use diesel::Selectable; use ipnetwork::IpNetwork; +use nexus_types::deployment::OmicronZoneExternalIp; +use nexus_types::deployment::OmicronZoneExternalIpKind; use nexus_types::external_api::params; use nexus_types::external_api::shared; use nexus_types::external_api::views; -use omicron_common::address::NUM_SOURCE_NAT_PORTS; +use nexus_types::inventory::SourceNatConfig; use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadata; +use omicron_common::api::internal::shared::SourceNatConfigError; +use omicron_uuid_kinds::ExternalIpUuid; use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::OmicronZoneUuid; use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; use sled_agent_client::types::InstanceExternalIpBody; +use sled_agent_client::ZoneKind; +use slog_error_chain::SlogInlineError; use std::convert::TryFrom; use std::net::IpAddr; use uuid::Uuid; @@ -131,6 +139,46 @@ pub struct ExternalIp { pub is_probe: bool, } +#[derive(Debug, thiserror::Error, SlogInlineError)] +pub enum OmicronZoneExternalIpError { + #[error("database IP is for an instance")] + IpIsForInstance, + #[error("invalid SNAT configuration")] + InvalidSnatConfig(#[from] SourceNatConfigError), + #[error( + "database IP is ephemeral; currently unsupported for Omicron zones" + )] + EphemeralIp, +} + +impl TryFrom<&'_ ExternalIp> for OmicronZoneExternalIp { + type Error = OmicronZoneExternalIpError; + + fn try_from(row: &ExternalIp) -> Result { + if !row.is_service { + return Err(OmicronZoneExternalIpError::IpIsForInstance); + } + + let kind = match row.kind { + IpKind::SNat => { + OmicronZoneExternalIpKind::Snat(SourceNatConfig::new( + row.ip.ip(), + row.first_port.0, + row.last_port.0, + )?) + } + IpKind::Floating => { + OmicronZoneExternalIpKind::Floating(row.ip.ip()) + } + IpKind::Ephemeral => { + return Err(OmicronZoneExternalIpError::EphemeralIp) + } + }; + + Ok(Self { id: ExternalIpUuid::from_untyped_uuid(row.id), kind }) + } +} + /// A view type constructed from `ExternalIp` used to represent Floating IP /// objects in user-facing APIs. /// @@ -153,15 +201,13 @@ pub struct FloatingIp { pub project_id: Uuid, } -impl From +impl TryFrom for omicron_common::api::internal::shared::SourceNatConfig { - fn from(eip: ExternalIp) -> Self { - Self { - ip: eip.ip.ip(), - first_port: eip.first_port.0, - last_port: eip.last_port.0, - } + type Error = SourceNatConfigError; + + fn try_from(eip: ExternalIp) -> Result { + Self::new(eip.ip.ip(), eip.first_port.0, eip.last_port.0) } } @@ -303,104 +349,65 @@ impl IncompleteExternalIp { } } - pub fn for_service_explicit( - id: Uuid, - name: &Name, - description: &str, - service_id: Uuid, - pool_id: Uuid, - address: IpAddr, - ) -> Self { - Self { - id, - name: Some(name.clone()), - description: Some(description.to_string()), - time_created: Utc::now(), - kind: IpKind::Floating, - is_service: true, - is_probe: false, - parent_id: Some(service_id), - pool_id, - project_id: None, - explicit_ip: Some(IpNetwork::from(address)), - explicit_port_range: None, - state: IpAttachState::Attached, - } - } - - pub fn for_service_explicit_snat( - id: Uuid, - service_id: Uuid, + pub fn for_omicron_zone( pool_id: Uuid, - address: IpAddr, - (first_port, last_port): (u16, u16), + external_ip: OmicronZoneExternalIp, + zone_id: OmicronZoneUuid, + zone_kind: ZoneKind, ) -> Self { - assert!( - (first_port % NUM_SOURCE_NAT_PORTS == 0) - && (last_port - first_port + 1) == NUM_SOURCE_NAT_PORTS, - "explicit port range must be aligned to {}", - NUM_SOURCE_NAT_PORTS, - ); - let explicit_port_range = Some((first_port.into(), last_port.into())); - let kind = IpKind::SNat; - Self { - id, - name: None, - description: None, - time_created: Utc::now(), - kind, - is_service: true, - is_probe: false, - parent_id: Some(service_id), - pool_id, - project_id: None, - explicit_ip: Some(IpNetwork::from(address)), - explicit_port_range, - state: kind.initial_state(), - } - } - - pub fn for_service( - id: Uuid, - name: &Name, - description: &str, - service_id: Uuid, - pool_id: Uuid, - ) -> Self { - let kind = IpKind::Floating; - Self { - id, - name: Some(name.clone()), - description: Some(description.to_string()), - time_created: Utc::now(), - kind, - is_service: true, - is_probe: false, - parent_id: Some(service_id), - pool_id, - project_id: None, - explicit_ip: None, - explicit_port_range: None, - state: IpAttachState::Attached, - } - } + let (kind, ip, port_range, name, description, state) = match external_ip + .kind + { + OmicronZoneExternalIpKind::Floating(ip) => { + // We'll name this external IP the same as we'll name the NIC + // associated with this zone. + let name = ServiceNetworkInterface::name(zone_id, zone_kind); + + // Using `IpAttachState::Attached` preserves existing behavior, + // `IpKind::Floating.initial_state()` is `::Detached`. If/when + // we do more to unify IPs between services and instances, this + // probably needs to be addressed. + let state = IpAttachState::Attached; + + ( + IpKind::Floating, + ip, + None, + Some(name), + Some(zone_kind.to_string()), + state, + ) + } + OmicronZoneExternalIpKind::Snat(snat_cfg) => { + let (first_port, last_port) = snat_cfg.port_range_raw(); + let kind = IpKind::SNat; + ( + kind, + snat_cfg.ip, + Some((first_port.into(), last_port.into())), + // Only floating IPs are allowed to have names and + // descriptions. + None, + None, + kind.initial_state(), + ) + } + }; - pub fn for_service_snat(id: Uuid, service_id: Uuid, pool_id: Uuid) -> Self { - let kind = IpKind::SNat; Self { - id, - name: None, - description: None, + id: external_ip.id.into_untyped_uuid(), + name, + description, time_created: Utc::now(), kind, is_service: true, is_probe: false, - parent_id: Some(service_id), + parent_id: Some(zone_id.into_untyped_uuid()), pool_id, project_id: None, - explicit_ip: None, - explicit_port_range: None, - state: kind.initial_state(), + explicit_ip: Some(IpNetwork::from(ip)), + explicit_port_range: port_range, + state, } } diff --git a/nexus/db-model/src/network_interface.rs b/nexus/db-model/src/network_interface.rs index 108232275d..ff774699d6 100644 --- a/nexus/db-model/src/network_interface.rs +++ b/nexus/db-model/src/network_interface.rs @@ -13,9 +13,12 @@ use chrono::DateTime; use chrono::Utc; use db_macros::Resource; use diesel::AsChangeset; +use ipnetwork::NetworkSize; use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::api::{external, internal}; +use omicron_uuid_kinds::OmicronZoneUuid; +use sled_agent_client::ZoneKind; use uuid::Uuid; /// The max number of interfaces that may be associated with a resource, @@ -146,15 +149,70 @@ pub struct ServiceNetworkInterface { pub primary: bool, } -impl From for nexus_types::deployment::OmicronZoneNic { - fn from(nic: ServiceNetworkInterface) -> Self { - Self { +impl ServiceNetworkInterface { + /// Generate a suitable [`Name`] for the given Omicron zone ID and kind. + pub fn name(zone_id: OmicronZoneUuid, zone_kind: ZoneKind) -> Name { + // Ideally we'd use `zone_kind.to_string()` here, but that uses + // underscores as separators which aren't allowed in `Name`s. We also + // preserve some existing naming behavior where NTP external networking + // is just called "ntp", not "boundary-ntp". + // + // Most of these zone kinds do not get external networking and therefore + // we don't need to be able to generate names for them, but it's simpler + // to give them valid descriptions than worry about error handling here. + let prefix = match zone_kind { + ZoneKind::BoundaryNtp | ZoneKind::InternalNtp => "ntp", + ZoneKind::Clickhouse => "clickhouse", + ZoneKind::ClickhouseKeeper => "clickhouse-keeper", + ZoneKind::CockroachDb => "cockroach", + ZoneKind::Crucible => "crucible", + ZoneKind::CruciblePantry => "crucible-pantry", + ZoneKind::ExternalDns => "external-dns", + ZoneKind::InternalDns => "internal-dns", + ZoneKind::Nexus => "nexus", + ZoneKind::Oximeter => "oximeter", + }; + + // Now that we have a valid prefix, we know this format string + // always produces a valid `Name`, so we'll unwrap here. + let name = format!("{prefix}-{zone_id}") + .parse() + .expect("valid name failed to parse"); + + Name(name) + } +} + +#[derive(Debug, thiserror::Error)] +#[error("Service NIC {nic_id} has a range of IPs ({ip}); only a single IP is supported")] +pub struct ServiceNicNotSingleIpError { + pub nic_id: Uuid, + pub ip: ipnetwork::IpNetwork, +} + +impl TryFrom<&'_ ServiceNetworkInterface> + for nexus_types::deployment::OmicronZoneNic +{ + type Error = ServiceNicNotSingleIpError; + + fn try_from(nic: &ServiceNetworkInterface) -> Result { + let size = match nic.ip.size() { + NetworkSize::V4(n) => u128::from(n), + NetworkSize::V6(n) => n, + }; + if size != 1 { + return Err(ServiceNicNotSingleIpError { + nic_id: nic.id(), + ip: nic.ip, + }); + } + Ok(Self { id: nic.id(), mac: *nic.mac, - ip: nic.ip, + ip: nic.ip.ip(), slot: *nic.slot, primary: nic.primary, - } + }) } } diff --git a/nexus/db-model/src/omicron_zone_config.rs b/nexus/db-model/src/omicron_zone_config.rs index 1310d553d2..f6d272a1cd 100644 --- a/nexus/db-model/src/omicron_zone_config.rs +++ b/nexus/db-model/src/omicron_zone_config.rs @@ -81,12 +81,13 @@ impl OmicronZone { nic, snat_cfg, } => { + let (first_port, last_port) = snat_cfg.port_range_raw(); ntp_ntp_servers = Some(ntp_servers.clone()); ntp_dns_servers = Some(dns_servers.clone()); ntp_ntp_domain = domain.clone(); snat_ip = Some(IpNetwork::from(snat_cfg.ip)); - snat_first_port = Some(SqlU16::from(snat_cfg.first_port)); - snat_last_port = Some(SqlU16::from(snat_cfg.last_port)); + snat_first_port = Some(SqlU16::from(first_port)); + snat_last_port = Some(SqlU16::from(last_port)); nic_id = Some(nic.id); (ZoneType::BoundaryNtp, address, None) } @@ -304,11 +305,12 @@ impl OmicronZone { self.snat_last_port, ) { (Some(ip), Some(first_port), Some(last_port)) => { - nexus_types::inventory::SourceNatConfig { - ip: ip.ip(), - first_port: *first_port, - last_port: *last_port, - } + nexus_types::inventory::SourceNatConfig::new( + ip.ip(), + *first_port, + *last_port, + ) + .context("bad SNAT config for boundary NTP")? } _ => bail!( "expected non-NULL snat properties, \ diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index 4d5b753c7f..5a17b39fdd 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -1263,12 +1263,12 @@ mod tests { use nexus_inventory::now_db_precision; use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_reconfigurator_planning::blueprint_builder::Ensure; + use nexus_reconfigurator_planning::example::example; use nexus_test_utils::db::test_setup_database; use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::BlueprintZoneFilter; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::PlanningInputBuilder; - use nexus_types::deployment::Policy; use nexus_types::deployment::SledDetails; use nexus_types::deployment::SledDisk; use nexus_types::deployment::SledFilter; @@ -1279,7 +1279,6 @@ mod tests { use nexus_types::external_api::views::SledState; use nexus_types::inventory::Collection; use omicron_common::address::Ipv6Subnet; - use omicron_common::api::external::Generation; use omicron_common::disk::DiskIdentity; use omicron_test_utils::dev; use omicron_uuid_kinds::PhysicalDiskUuid; @@ -1288,6 +1287,7 @@ mod tests { use pretty_assertions::assert_eq; use rand::thread_rng; use rand::Rng; + use slog::Logger; use std::mem; use std::net::Ipv6Addr; @@ -1359,65 +1359,32 @@ mod tests { } } - // Create a `Policy` that contains all the sleds found in `collection` - fn policy_from_collection(collection: &Collection) -> Policy { - Policy { - service_ip_pool_ranges: Vec::new(), - target_nexus_zone_count: collection - .all_omicron_zones() - .filter(|z| z.zone_type.is_nexus()) - .count(), - } - } + fn representative( + log: &Logger, + test_name: &str, + ) -> (Collection, PlanningInput, Blueprint) { + // We'll start with an example system. + let (mut base_collection, planning_input, mut blueprint) = + example(log, test_name, 3); - fn representative() -> (Collection, PlanningInput, Blueprint) { - // We'll start with a representative collection... + // Take a more thorough collection representative (includes SPs, + // etc.)... let mut collection = nexus_inventory::examples::representative().builder.build(); - // ...and then mutate it such that the omicron zones it reports match - // the sled agent IDs it reports. Steal the sled agent info and drop the - // fake sled-agent IDs: - let mut empty_map = BTreeMap::new(); - mem::swap(&mut empty_map, &mut collection.sled_agents); - let mut sled_agents = empty_map.into_values().collect::>(); - - // Now reinsert them with IDs pulled from the omicron zones. This - // assumes we have more fake sled agents than omicron zones, which is - // currently true for the representative collection. - for &sled_id in collection.omicron_zones.keys() { - let some_sled_agent = sled_agents.pop().expect( - "fewer representative sled agents than \ - representative omicron zones sleds", - ); - collection.sled_agents.insert(sled_id, some_sled_agent); - } + // ... and replace its sled agent and Omicron zones with those from our + // example system. + mem::swap( + &mut collection.sled_agents, + &mut base_collection.sled_agents, + ); + mem::swap( + &mut collection.omicron_zones, + &mut base_collection.omicron_zones, + ); - let policy = policy_from_collection(&collection); - let planning_input = { - let mut builder = PlanningInputBuilder::new( - policy, - Generation::new(), - Generation::new(), - ); - for (sled_id, agent) in &collection.sled_agents { - builder - .add_sled( - *sled_id, - fake_sled_details(Some(*agent.sled_agent_address.ip())), - ) - .expect("failed to add sled to representative"); - } - builder.build() - }; - let blueprint = BlueprintBuilder::build_initial_from_collection( - &collection, - Generation::new(), - Generation::new(), - planning_input.all_sled_ids(SledFilter::All), - "test", - ) - .unwrap(); + // Treat this blueprint as the initial blueprint for the system. + blueprint.parent_blueprint_id = None; (collection, planning_input, blueprint) } @@ -1442,17 +1409,11 @@ mod tests { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - // Create an empty collection and a blueprint from it - let collection = - nexus_inventory::CollectionBuilder::new("test").build(); - let blueprint1 = BlueprintBuilder::build_initial_from_collection( - &collection, - Generation::new(), - Generation::new(), + // Create an empty blueprint from it + let blueprint1 = BlueprintBuilder::build_empty_with_sleds( std::iter::empty(), "test", - ) - .unwrap(); + ); let authz_blueprint = authz_blueprint_from_id(blueprint1.id); // Trying to read it from the database should fail with the relevant @@ -1471,7 +1432,7 @@ mod tests { let blueprint_read = datastore .blueprint_read(&opctx, &authz_blueprint) .await - .expect("failed to read collection back"); + .expect("failed to read blueprint back"); assert_eq!(blueprint1, blueprint_read); assert_eq!( blueprint_list_all_ids(&opctx, &datastore).await, @@ -1501,13 +1462,15 @@ mod tests { #[tokio::test] async fn test_representative_blueprint() { + const TEST_NAME: &str = "test_representative_blueprint"; // Setup - let logctx = dev::test_setup_log("test_representative_blueprint"); + let logctx = dev::test_setup_log(TEST_NAME); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; // Create a cohesive representative collection/policy/blueprint - let (collection, planning_input, blueprint1) = representative(); + let (collection, planning_input, blueprint1) = + representative(&logctx.log, TEST_NAME); let authz_blueprint1 = authz_blueprint_from_id(blueprint1.id); // Write it to the database and read it back. @@ -1632,10 +1595,23 @@ mod tests { let blueprint2 = builder.build(); let authz_blueprint2 = authz_blueprint_from_id(blueprint2.id); + let diff = blueprint2.diff_since_blueprint(&blueprint1).unwrap(); + println!("b1 -> b2: {}", diff.display()); + println!("b1 disks: {:?}", blueprint1.blueprint_disks); + println!("b2 disks: {:?}", blueprint2.blueprint_disks); // Check that we added the new sled, as well as its disks and zones. assert_eq!( - blueprint1.blueprint_disks.len() + new_sled_zpools.len(), - blueprint2.blueprint_disks.len(), + blueprint1 + .blueprint_disks + .values() + .map(|c| c.disks.len()) + .sum::() + + new_sled_zpools.len(), + blueprint2 + .blueprint_disks + .values() + .map(|c| c.disks.len()) + .sum::() ); assert_eq!( blueprint1.blueprint_zones.len() + 1, @@ -1757,16 +1733,10 @@ mod tests { // Create three blueprints: // * `blueprint1` has no parent // * `blueprint2` and `blueprint3` both have `blueprint1` as parent - let collection = - nexus_inventory::CollectionBuilder::new("test").build(); - let blueprint1 = BlueprintBuilder::build_initial_from_collection( - &collection, - Generation::new(), - Generation::new(), + let blueprint1 = BlueprintBuilder::build_empty_with_sleds( std::iter::empty(), "test1", - ) - .unwrap(); + ); let blueprint2 = BlueprintBuilder::new_based_on( &logctx.log, &blueprint1, @@ -1911,16 +1881,10 @@ mod tests { let (opctx, datastore) = datastore_test(&logctx, &db).await; // Create an initial blueprint and a child. - let collection = - nexus_inventory::CollectionBuilder::new("test").build(); - let blueprint1 = BlueprintBuilder::build_initial_from_collection( - &collection, - Generation::new(), - Generation::new(), + let blueprint1 = BlueprintBuilder::build_empty_with_sleds( std::iter::empty(), "test1", - ) - .unwrap(); + ); let blueprint2 = BlueprintBuilder::new_based_on( &logctx.log, &blueprint1, diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 28fc5de884..8c54ccd27d 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -40,6 +40,7 @@ use diesel::prelude::*; use nexus_db_model::FloatingIpUpdate; use nexus_db_model::Instance; use nexus_db_model::IpAttachState; +use nexus_types::deployment::OmicronZoneExternalIp; use nexus_types::identity::Resource; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; @@ -52,7 +53,9 @@ use omicron_common::api::external::LookupResult; use omicron_common::api::external::NameOrId; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; +use omicron_uuid_kinds::OmicronZoneUuid; use ref_cast::RefCast; +use sled_agent_client::ZoneKind; use std::net::IpAddr; use uuid::Uuid; @@ -225,44 +228,6 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } - /// Allocates an IP address for internal service usage. - pub async fn external_ip_allocate_service( - &self, - opctx: &OpContext, - ip_id: Uuid, - name: &Name, - description: &str, - service_id: Uuid, - ) -> CreateResult { - let (.., pool) = self.ip_pools_service_lookup(opctx).await?; - - let data = IncompleteExternalIp::for_service( - ip_id, - name, - description, - service_id, - pool.id(), - ); - self.allocate_external_ip(opctx, data).await - } - - /// Allocates an SNAT IP address for internal service usage. - pub async fn external_ip_allocate_service_snat( - &self, - opctx: &OpContext, - ip_id: Uuid, - service_id: Uuid, - ) -> CreateResult { - let (.., pool) = self.ip_pools_service_lookup(opctx).await?; - - let data = IncompleteExternalIp::for_service_snat( - ip_id, - service_id, - pool.id(), - ); - self.allocate_external_ip(opctx, data).await - } - /// Allocates a floating IP address for instance usage. pub async fn allocate_floating_ip( &self, @@ -383,52 +348,21 @@ impl DataStore { }) } - /// Allocates an explicit Floating IP address for an internal service. - /// - /// Unlike the other IP allocation requests, this does not search for an - /// available IP address, it asks for one explicitly. - pub async fn external_ip_allocate_service_explicit( + /// Allocates an explicit IP address for an Omicron zone. + pub async fn external_ip_allocate_omicron_zone( &self, opctx: &OpContext, - ip_id: Uuid, - name: &Name, - description: &str, - service_id: Uuid, - ip: IpAddr, + zone_id: OmicronZoneUuid, + zone_kind: ZoneKind, + external_ip: OmicronZoneExternalIp, ) -> CreateResult { let (authz_pool, pool) = self.ip_pools_service_lookup(opctx).await?; opctx.authorize(authz::Action::CreateChild, &authz_pool).await?; - let data = IncompleteExternalIp::for_service_explicit( - ip_id, - name, - description, - service_id, + let data = IncompleteExternalIp::for_omicron_zone( pool.id(), - ip, - ); - self.allocate_external_ip(opctx, data).await - } - - /// Allocates an explicit SNAT IP address for an internal service. - /// - /// Unlike the other IP allocation requests, this does not search for an - /// available IP address, it asks for one explicitly. - pub async fn external_ip_allocate_service_explicit_snat( - &self, - opctx: &OpContext, - ip_id: Uuid, - service_id: Uuid, - ip: IpAddr, - port_range: (u16, u16), - ) -> CreateResult { - let (authz_pool, pool) = self.ip_pools_service_lookup(opctx).await?; - opctx.authorize(authz::Action::CreateChild, &authz_pool).await?; - let data = IncompleteExternalIp::for_service_explicit_snat( - ip_id, - service_id, - pool.id(), - ip, - port_range, + external_ip, + zone_id, + zone_kind, ); self.allocate_external_ip(opctx, data).await } @@ -1216,9 +1150,12 @@ mod tests { use super::*; use crate::db::datastore::test_utils::datastore_test; use nexus_test_utils::db::test_setup_database; + use nexus_types::deployment::OmicronZoneExternalIpKind; use nexus_types::external_api::shared::IpRange; + use nexus_types::inventory::SourceNatConfig; use omicron_common::address::NUM_SOURCE_NAT_PORTS; use omicron_test_utils::dev; + use omicron_uuid_kinds::ExternalIpUuid; use std::collections::BTreeSet; use std::net::Ipv4Addr; @@ -1267,32 +1204,27 @@ mod tests { // Allocate a bunch of fake service IPs. let mut external_ips = Vec::new(); let mut allocate_snat = false; // flip-flop between regular and snat - for (i, ip) in ip_range.iter().enumerate() { - let name = format!("service-ip-{i}"); - let external_ip = if allocate_snat { - datastore - .external_ip_allocate_service_explicit_snat( - &opctx, - Uuid::new_v4(), - Uuid::new_v4(), - ip, - (0, NUM_SOURCE_NAT_PORTS - 1), - ) - .await - .expect("failed to allocate service IP") + for ip in ip_range.iter() { + let external_ip_kind = if allocate_snat { + OmicronZoneExternalIpKind::Snat( + SourceNatConfig::new(ip, 0, NUM_SOURCE_NAT_PORTS - 1) + .unwrap(), + ) } else { - datastore - .external_ip_allocate_service_explicit( - &opctx, - Uuid::new_v4(), - &Name(name.parse().unwrap()), - &name, - Uuid::new_v4(), - ip, - ) - .await - .expect("failed to allocate service IP") + OmicronZoneExternalIpKind::Floating(ip) }; + let external_ip = datastore + .external_ip_allocate_omicron_zone( + &opctx, + OmicronZoneUuid::new_v4(), + ZoneKind::Nexus, + OmicronZoneExternalIp { + id: ExternalIpUuid::new_v4(), + kind: external_ip_kind, + }, + ) + .await + .expect("failed to allocate service IP"); external_ips.push(external_ip); allocate_snat = !allocate_snat; } diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 225499c0bf..0f4b1b245e 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -47,6 +47,8 @@ use nexus_types::deployment::BlueprintTarget; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneFilter; use nexus_types::deployment::BlueprintZoneType; +use nexus_types::deployment::OmicronZoneExternalIp; +use nexus_types::deployment::OmicronZoneExternalIpKind; use nexus_types::external_api::params as external_params; use nexus_types::external_api::shared; use nexus_types::external_api::shared::IdentityType; @@ -61,6 +63,7 @@ use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use omicron_common::bail_unless; +use omicron_uuid_kinds::ExternalIpUuid; use omicron_uuid_kinds::GenericUuid; use slog_error_chain::InlineErrorChain; use std::sync::{Arc, OnceLock}; @@ -475,70 +478,62 @@ impl DataStore { BlueprintZoneType::ExternalDns( blueprint_zone_type::ExternalDns { nic, dns_address, .. }, ) => { - let external_ip = dns_address.ip(); - let service_kind = format!("{}", zone_type.kind()); - let db_ip = IncompleteExternalIp::for_service_explicit( - Uuid::new_v4(), - &db::model::Name(nic.name.clone()), - &service_kind, - zone_config.id.into_untyped_uuid(), - service_pool.id(), - external_ip, - ); + let external_ip = OmicronZoneExternalIp { + id: ExternalIpUuid::new_v4(), + kind: OmicronZoneExternalIpKind::Floating(dns_address.ip()), + }; let db_nic = IncompleteNetworkInterface::new_service( nic.id, zone_config.id.into_untyped_uuid(), DNS_VPC_SUBNET.clone(), IdentityMetadataCreateParams { name: nic.name.clone(), - description: format!("{service_kind} service vNIC"), + description: format!( + "{} service vNIC", + zone_type.kind() + ), }, nic.ip, nic.mac, nic.slot, ) .map_err(|e| RackInitError::AddingNic(e))?; - Some((db_ip, db_nic)) + Some((external_ip, db_nic)) } BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { nic, external_ip, .. }) => { - let service_kind = format!("{}", zone_type.kind()); - let db_ip = IncompleteExternalIp::for_service_explicit( - Uuid::new_v4(), - &db::model::Name(nic.name.clone()), - &service_kind, - zone_config.id.into_untyped_uuid(), - service_pool.id(), - *external_ip, - ); + let external_ip = OmicronZoneExternalIp { + id: ExternalIpUuid::new_v4(), + kind: OmicronZoneExternalIpKind::Floating(*external_ip), + }; let db_nic = IncompleteNetworkInterface::new_service( nic.id, zone_config.id.into_untyped_uuid(), NEXUS_VPC_SUBNET.clone(), IdentityMetadataCreateParams { name: nic.name.clone(), - description: format!("{service_kind} service vNIC"), + description: format!( + "{} service vNIC", + zone_type.kind() + ), }, nic.ip, nic.mac, nic.slot, ) .map_err(|e| RackInitError::AddingNic(e))?; - Some((db_ip, db_nic)) + Some((external_ip, db_nic)) } BlueprintZoneType::BoundaryNtp( blueprint_zone_type::BoundaryNtp { snat_cfg, nic, .. }, ) => { - let db_ip = IncompleteExternalIp::for_service_explicit_snat( - Uuid::new_v4(), - zone_config.id.into_untyped_uuid(), - service_pool.id(), - snat_cfg.ip, - (snat_cfg.first_port, snat_cfg.last_port), - ); + let external_ip = OmicronZoneExternalIp { + id: ExternalIpUuid::new_v4(), + kind: OmicronZoneExternalIpKind::Snat(*snat_cfg), + }; let db_nic = IncompleteNetworkInterface::new_service( nic.id, zone_config.id.into_untyped_uuid(), @@ -555,7 +550,7 @@ impl DataStore { nic.slot, ) .map_err(|e| RackInitError::AddingNic(e))?; - Some((db_ip, db_nic)) + Some((external_ip, db_nic)) } BlueprintZoneType::InternalNtp(_) | BlueprintZoneType::Clickhouse(_) @@ -566,7 +561,7 @@ impl DataStore { | BlueprintZoneType::InternalDns(_) | BlueprintZoneType::Oximeter(_) => None, }; - let Some((db_ip, db_nic)) = service_ip_nic else { + let Some((external_ip, db_nic)) = service_ip_nic else { info!( log, "No networking records needed for {} service", @@ -574,6 +569,12 @@ impl DataStore { ); return Ok(()); }; + let db_ip = IncompleteExternalIp::for_omicron_zone( + service_pool.id(), + external_ip, + zone_config.id, + zone_config.zone_type.kind(), + ); Self::allocate_external_ip_on_connection(conn, db_ip).await.map_err( |err| { error!( @@ -958,20 +959,19 @@ mod test { use async_bb8_diesel::AsyncSimpleConnection; use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use nexus_db_model::{DnsGroup, Generation, InitialDnsGroup, SledUpdate}; - use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; + use nexus_inventory::now_db_precision; use nexus_reconfigurator_planning::system::{ SledBuilder, SystemDescription, }; use nexus_test_utils::db::test_setup_database; - use nexus_types::deployment::OmicronZoneConfig; - use nexus_types::deployment::OmicronZonesConfig; - use nexus_types::deployment::SledFilter; + use nexus_types::deployment::BlueprintZoneConfig; + use nexus_types::deployment::BlueprintZoneDisposition; + use nexus_types::deployment::BlueprintZonesConfig; use nexus_types::external_api::shared::SiloIdentityMode; use nexus_types::identity::Asset; use nexus_types::internal_api::params::DnsRecord; use nexus_types::inventory::NetworkInterface; use nexus_types::inventory::NetworkInterfaceKind; - use nexus_types::inventory::OmicronZoneType; use omicron_common::address::{ DNS_OPTE_IPV4_SUBNET, NEXUS_OPTE_IPV4_SUBNET, NTP_OPTE_IPV4_SUBNET, }; @@ -981,8 +981,9 @@ mod test { }; use omicron_common::api::internal::shared::SourceNatConfig; use omicron_test_utils::dev; + use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::TypedUuid; - use omicron_uuid_kinds::{GenericUuid, SledUuid, ZpoolUuid}; + use omicron_uuid_kinds::{GenericUuid, ZpoolUuid}; use sled_agent_client::types::OmicronZoneDataset; use std::collections::{BTreeMap, HashMap}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV6}; @@ -1269,61 +1270,52 @@ mod test { SledBuilder::new().id(TypedUuid::from_untyped_uuid(sled3.id())), ) .expect("failed to add sled3"); - let planning_input = system - .to_planning_input_builder() - .expect("failed to make planning input") - .build(); - let mut inventory_builder = system - .to_collection_builder() - .expect("failed to make collection builder"); let external_dns_ip = IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)); let external_dns_pip = DNS_OPTE_IPV4_SUBNET .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 1) .unwrap(); - let external_dns_id = Uuid::new_v4(); + let external_dns_id = OmicronZoneUuid::new_v4(); let nexus_ip = IpAddr::V4(Ipv4Addr::new(1, 2, 3, 6)); let nexus_pip = NEXUS_OPTE_IPV4_SUBNET .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 1) .unwrap(); - let nexus_id = Uuid::new_v4(); + let nexus_id = OmicronZoneUuid::new_v4(); let ntp1_ip = IpAddr::V4(Ipv4Addr::new(1, 2, 3, 5)); let ntp1_pip = NTP_OPTE_IPV4_SUBNET .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 1) .unwrap(); - let ntp1_id = Uuid::new_v4(); + let ntp1_id = OmicronZoneUuid::new_v4(); let ntp2_ip = IpAddr::V4(Ipv4Addr::new(1, 2, 3, 5)); let ntp2_pip = NTP_OPTE_IPV4_SUBNET .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 2) .unwrap(); - let ntp2_id = Uuid::new_v4(); - let ntp3_id = Uuid::new_v4(); + let ntp2_id = OmicronZoneUuid::new_v4(); + let ntp3_id = OmicronZoneUuid::new_v4(); let mut macs = MacAddr::iter_system(); - // Add services for our sleds to the inventory (which will cause them to - // be present in the blueprint we'll generate from it). - inventory_builder - .found_sled_omicron_zones( - "sled1", - SledUuid::from_untyped_uuid(sled1.id()), - OmicronZonesConfig { - generation: Generation::new().next(), - zones: vec![ - OmicronZoneConfig { - id: external_dns_id, - underlay_address: Ipv6Addr::LOCALHOST, - zone_type: OmicronZoneType::ExternalDns { + let mut blueprint_zones = BTreeMap::new(); + blueprint_zones.insert( + sled1.id(), + BlueprintZonesConfig { + generation: Generation::new().next(), + zones: vec![ + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: external_dns_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::ExternalDns( + blueprint_zone_type::ExternalDns { dataset: random_dataset(), - http_address: "[::1]:80".to_string(), + http_address: "[::1]:80".parse().unwrap(), dns_address: SocketAddr::new( external_dns_ip, 53, - ) - .to_string(), + ), nic: NetworkInterface { id: Uuid::new_v4(), kind: NetworkInterfaceKind::Service { - id: external_dns_id, + id: external_dns_id.into_untyped_uuid(), }, name: "external-dns".parse().unwrap(), ip: external_dns_pip.into(), @@ -1337,19 +1329,22 @@ mod test { slot: 0, }, }, - }, - OmicronZoneConfig { - id: ntp1_id, - underlay_address: Ipv6Addr::LOCALHOST, - zone_type: OmicronZoneType::BoundaryNtp { - address: "[::1]:80".to_string(), + ), + }, + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: ntp1_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::BoundaryNtp( + blueprint_zone_type::BoundaryNtp { + address: "[::1]:80".parse().unwrap(), ntp_servers: vec![], dns_servers: vec![], domain: None, nic: NetworkInterface { id: Uuid::new_v4(), kind: NetworkInterfaceKind::Service { - id: ntp1_id, + id: ntp1_id.into_untyped_uuid(), }, name: "ntp1".parse().unwrap(), ip: ntp1_pip.into(), @@ -1362,36 +1357,35 @@ mod test { primary: true, slot: 0, }, - snat_cfg: SourceNatConfig { - ip: ntp1_ip, - first_port: 16384, - last_port: 32767, - }, + snat_cfg: SourceNatConfig::new( + ntp1_ip, 16384, 32767, + ) + .unwrap(), }, - }, - ], - }, - ) - .expect("recording Omicron zones"); - inventory_builder - .found_sled_omicron_zones( - "sled2", - SledUuid::from_untyped_uuid(sled2.id()), - OmicronZonesConfig { - generation: Generation::new().next(), - zones: vec![ - OmicronZoneConfig { - id: nexus_id, - underlay_address: Ipv6Addr::LOCALHOST, - zone_type: OmicronZoneType::Nexus { - internal_address: "[::1]:80".to_string(), + ), + }, + ], + }, + ); + blueprint_zones.insert( + sled2.id(), + BlueprintZonesConfig { + generation: Generation::new().next(), + zones: vec![ + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: nexus_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::Nexus( + blueprint_zone_type::Nexus { + internal_address: "[::1]:80".parse().unwrap(), external_ip: nexus_ip, external_tls: false, external_dns_servers: vec![], nic: NetworkInterface { id: Uuid::new_v4(), kind: NetworkInterfaceKind::Service { - id: nexus_id, + id: nexus_id.into_untyped_uuid(), }, name: "nexus".parse().unwrap(), ip: nexus_pip.into(), @@ -1405,19 +1399,22 @@ mod test { slot: 0, }, }, - }, - OmicronZoneConfig { - id: ntp2_id, - underlay_address: Ipv6Addr::LOCALHOST, - zone_type: OmicronZoneType::BoundaryNtp { - address: "[::1]:80".to_string(), + ), + }, + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: ntp2_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::BoundaryNtp( + blueprint_zone_type::BoundaryNtp { + address: "[::1]:80".parse().unwrap(), ntp_servers: vec![], dns_servers: vec![], domain: None, nic: NetworkInterface { id: Uuid::new_v4(), kind: NetworkInterfaceKind::Service { - id: ntp2_id, + id: ntp2_id.into_untyped_uuid(), }, name: "ntp2".parse().unwrap(), ip: ntp2_pip.into(), @@ -1430,45 +1427,49 @@ mod test { primary: true, slot: 0, }, - snat_cfg: SourceNatConfig { - ip: ntp2_ip, - first_port: 0, - last_port: 16383, - }, + snat_cfg: SourceNatConfig::new( + ntp2_ip, 0, 16383, + ) + .unwrap(), }, - }, - ], - }, - ) - .expect("recording Omicron zones"); - inventory_builder - .found_sled_omicron_zones( - "sled3", - SledUuid::from_untyped_uuid(sled3.id()), - OmicronZonesConfig { - generation: Generation::new().next(), - zones: vec![OmicronZoneConfig { - id: ntp3_id, - underlay_address: Ipv6Addr::LOCALHOST, - zone_type: OmicronZoneType::InternalNtp { - address: "[::1]:80".to_string(), + ), + }, + ], + }, + ); + blueprint_zones.insert( + sled3.id(), + BlueprintZonesConfig { + generation: Generation::new().next(), + zones: vec![BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: ntp3_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::InternalNtp( + blueprint_zone_type::InternalNtp { + address: "[::1]:80".parse().unwrap(), ntp_servers: vec![], dns_servers: vec![], domain: None, }, - }], - }, - ) - .expect("recording Omicron zones"); - let blueprint = BlueprintBuilder::build_initial_from_collection_seeded( - &inventory_builder.build(), - *Generation::new(), - *Generation::new(), - planning_input.all_sled_ids(SledFilter::All), - "test suite", - (test_name, "initial blueprint"), - ) - .expect("failed to build blueprint"); + ), + }], + }, + ); + for zone_config in blueprint_zones.values_mut() { + zone_config.sort(); + } + let blueprint = Blueprint { + id: Uuid::new_v4(), + blueprint_zones, + blueprint_disks: BTreeMap::new(), + parent_blueprint_id: None, + internal_dns_version: *Generation::new(), + external_dns_version: *Generation::new(), + time_created: now_db_precision(), + creator: "test suite".to_string(), + comment: "test blueprint".to_string(), + }; let rack = datastore .rack_set_initialized( @@ -1498,23 +1499,23 @@ mod test { assert_eq!(observed_external_ips.len(), 4); let dns_external_ip = observed_external_ips .iter() - .find(|e| e.parent_id == Some(external_dns_id)) + .find(|e| e.parent_id == Some(external_dns_id.into_untyped_uuid())) .unwrap(); let nexus_external_ip = observed_external_ips .iter() - .find(|e| e.parent_id == Some(nexus_id)) + .find(|e| e.parent_id == Some(nexus_id.into_untyped_uuid())) .unwrap(); let ntp1_external_ip = observed_external_ips .iter() - .find(|e| e.parent_id == Some(ntp1_id)) + .find(|e| e.parent_id == Some(ntp1_id.into_untyped_uuid())) .unwrap(); let ntp2_external_ip = observed_external_ips .iter() - .find(|e| e.parent_id == Some(ntp2_id)) + .find(|e| e.parent_id == Some(ntp2_id.into_untyped_uuid())) .unwrap(); assert!(!observed_external_ips .iter() - .any(|e| e.parent_id == Some(ntp3_id))); + .any(|e| e.parent_id == Some(ntp3_id.into_untyped_uuid()))); assert!(dns_external_ip.is_service); assert_eq!(dns_external_ip.kind, IpKind::Floating); @@ -1601,16 +1602,9 @@ mod test { SledBuilder::new().id(TypedUuid::from_untyped_uuid(sled.id())), ) .expect("failed to add sled"); - let planning_input = system - .to_planning_input_builder() - .expect("failed to make planning input") - .build(); - let mut inventory_builder = system - .to_collection_builder() - .expect("failed to make collection builder"); - - let nexus_id1 = Uuid::new_v4(); - let nexus_id2 = Uuid::new_v4(); + + let nexus_id1 = OmicronZoneUuid::new_v4(); + let nexus_id2 = OmicronZoneUuid::new_v4(); let nexus_pip1 = NEXUS_OPTE_IPV4_SUBNET .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 1) .unwrap(); @@ -1619,25 +1613,26 @@ mod test { .unwrap(); let mut macs = MacAddr::iter_system(); - inventory_builder - .found_sled_omicron_zones( - "sled", - SledUuid::from_untyped_uuid(sled.id()), - OmicronZonesConfig { - generation: Generation::new().next(), - zones: vec![ - OmicronZoneConfig { - id: nexus_id1, - underlay_address: Ipv6Addr::LOCALHOST, - zone_type: OmicronZoneType::Nexus { - internal_address: "[::1]:80".to_string(), + let mut blueprint_zones = BTreeMap::new(); + blueprint_zones.insert( + sled.id(), + BlueprintZonesConfig { + generation: Generation::new().next(), + zones: vec![ + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: nexus_id1, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::Nexus( + blueprint_zone_type::Nexus { + internal_address: "[::1]:80".parse().unwrap(), external_ip: nexus_ip_start.into(), external_tls: false, external_dns_servers: vec![], nic: NetworkInterface { id: Uuid::new_v4(), kind: NetworkInterfaceKind::Service { - id: nexus_id1, + id: nexus_id1.into_untyped_uuid(), }, name: "nexus1".parse().unwrap(), ip: nexus_pip1.into(), @@ -1651,19 +1646,22 @@ mod test { slot: 0, }, }, - }, - OmicronZoneConfig { - id: nexus_id2, - underlay_address: Ipv6Addr::LOCALHOST, - zone_type: OmicronZoneType::Nexus { - internal_address: "[::1]:80".to_string(), + ), + }, + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: nexus_id2, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::Nexus( + blueprint_zone_type::Nexus { + internal_address: "[::1]:80".parse().unwrap(), external_ip: nexus_ip_end.into(), external_tls: false, external_dns_servers: vec![], nic: NetworkInterface { id: Uuid::new_v4(), kind: NetworkInterfaceKind::Service { - id: nexus_id2, + id: nexus_id2.into_untyped_uuid(), }, name: "nexus2".parse().unwrap(), ip: nexus_pip2.into(), @@ -1677,11 +1675,11 @@ mod test { slot: 0, }, }, - }, - ], - }, - ) - .expect("recording Omicron zones"); + ), + }, + ], + }, + ); let datasets = vec![]; @@ -1707,15 +1705,20 @@ mod test { HashMap::from([("api.sys".to_string(), external_records.clone())]), ); - let blueprint = BlueprintBuilder::build_initial_from_collection_seeded( - &inventory_builder.build(), - *Generation::new(), - *Generation::new(), - planning_input.all_sled_ids(SledFilter::All), - "test suite", - (test_name, "initial blueprint"), - ) - .expect("failed to build blueprint"); + for zone_config in blueprint_zones.values_mut() { + zone_config.sort(); + } + let blueprint = Blueprint { + id: Uuid::new_v4(), + blueprint_zones, + blueprint_disks: BTreeMap::new(), + parent_blueprint_id: None, + internal_dns_version: *Generation::new(), + external_dns_version: *Generation::new(), + time_created: now_db_precision(), + creator: "test suite".to_string(), + comment: "test blueprint".to_string(), + }; let rack = datastore .rack_set_initialized( @@ -1867,38 +1870,32 @@ mod test { SledBuilder::new().id(TypedUuid::from_untyped_uuid(sled.id())), ) .expect("failed to add sled"); - let planning_input = system - .to_planning_input_builder() - .expect("failed to make planning input") - .build(); - let mut inventory_builder = system - .to_collection_builder() - .expect("failed to make collection builder"); let nexus_ip = IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)); let nexus_pip = NEXUS_OPTE_IPV4_SUBNET .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 1) .unwrap(); - let nexus_id = Uuid::new_v4(); + let nexus_id = OmicronZoneUuid::new_v4(); let mut macs = MacAddr::iter_system(); - inventory_builder - .found_sled_omicron_zones( - "sled", - SledUuid::from_untyped_uuid(sled.id()), - OmicronZonesConfig { - generation: Generation::new().next(), - zones: vec![OmicronZoneConfig { - id: nexus_id, - underlay_address: Ipv6Addr::LOCALHOST, - zone_type: OmicronZoneType::Nexus { - internal_address: "[::1]:80".to_string(), + let mut blueprint_zones = BTreeMap::new(); + blueprint_zones.insert( + sled.id(), + BlueprintZonesConfig { + generation: Generation::new().next(), + zones: vec![BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: nexus_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::Nexus( + blueprint_zone_type::Nexus { + internal_address: "[::1]:80".parse().unwrap(), external_ip: nexus_ip, external_tls: false, external_dns_servers: vec![], nic: NetworkInterface { id: Uuid::new_v4(), kind: NetworkInterfaceKind::Service { - id: nexus_id, + id: nexus_id.into_untyped_uuid(), }, name: "nexus".parse().unwrap(), ip: nexus_pip.into(), @@ -1912,20 +1909,24 @@ mod test { slot: 0, }, }, - }], - }, - ) - .expect("recording Omicron zones"); - - let blueprint = BlueprintBuilder::build_initial_from_collection_seeded( - &inventory_builder.build(), - *Generation::new(), - *Generation::new(), - planning_input.all_sled_ids(SledFilter::All), - "test suite", - (test_name, "initial blueprint"), - ) - .expect("failed to build blueprint"); + ), + }], + }, + ); + for zone_config in blueprint_zones.values_mut() { + zone_config.sort(); + } + let blueprint = Blueprint { + id: Uuid::new_v4(), + blueprint_zones, + blueprint_disks: BTreeMap::new(), + parent_blueprint_id: None, + internal_dns_version: *Generation::new(), + external_dns_version: *Generation::new(), + time_created: now_db_precision(), + creator: "test suite".to_string(), + comment: "test blueprint".to_string(), + }; let result = datastore .rack_set_initialized( @@ -1965,44 +1966,37 @@ mod test { SledBuilder::new().id(TypedUuid::from_untyped_uuid(sled.id())), ) .expect("failed to add sled"); - let planning_input = system - .to_planning_input_builder() - .expect("failed to make planning input") - .build(); - let mut inventory_builder = system - .to_collection_builder() - .expect("failed to make collection builder"); // Request two services which happen to be using the same IP address. - let external_dns_id = Uuid::new_v4(); + let external_dns_id = OmicronZoneUuid::new_v4(); let external_dns_pip = DNS_OPTE_IPV4_SUBNET .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 1) .unwrap(); - let nexus_id = Uuid::new_v4(); + let nexus_id = OmicronZoneUuid::new_v4(); let nexus_pip = NEXUS_OPTE_IPV4_SUBNET .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 1) .unwrap(); let mut macs = MacAddr::iter_system(); - inventory_builder - .found_sled_omicron_zones( - "sled", - SledUuid::from_untyped_uuid(sled.id()), - OmicronZonesConfig { - generation: Generation::new().next(), - zones: vec![ - OmicronZoneConfig { - id: external_dns_id, - underlay_address: Ipv6Addr::LOCALHOST, - zone_type: OmicronZoneType::ExternalDns { + let mut blueprint_zones = BTreeMap::new(); + blueprint_zones.insert( + sled.id(), + BlueprintZonesConfig { + generation: Generation::new().next(), + zones: vec![ + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: external_dns_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::ExternalDns( + blueprint_zone_type::ExternalDns { dataset: random_dataset(), - http_address: "[::1]:80".to_string(), - dns_address: SocketAddr::new(ip, 53) - .to_string(), + http_address: "[::1]:80".parse().unwrap(), + dns_address: SocketAddr::new(ip, 53), nic: NetworkInterface { id: Uuid::new_v4(), kind: NetworkInterfaceKind::Service { - id: external_dns_id, + id: external_dns_id.into_untyped_uuid(), }, name: "external-dns".parse().unwrap(), ip: external_dns_pip.into(), @@ -2016,19 +2010,22 @@ mod test { slot: 0, }, }, - }, - OmicronZoneConfig { - id: nexus_id, - underlay_address: Ipv6Addr::LOCALHOST, - zone_type: OmicronZoneType::Nexus { - internal_address: "[::1]:80".to_string(), + ), + }, + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: nexus_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::Nexus( + blueprint_zone_type::Nexus { + internal_address: "[::1]:80".parse().unwrap(), external_ip: ip, external_tls: false, external_dns_servers: vec![], nic: NetworkInterface { id: Uuid::new_v4(), kind: NetworkInterfaceKind::Service { - id: nexus_id, + id: nexus_id.into_untyped_uuid(), }, name: "nexus".parse().unwrap(), ip: nexus_pip.into(), @@ -2042,21 +2039,26 @@ mod test { slot: 0, }, }, - }, - ], - }, - ) - .expect("recording Omicron zones"); + ), + }, + ], + }, + ); - let blueprint = BlueprintBuilder::build_initial_from_collection_seeded( - &inventory_builder.build(), - *Generation::new(), - *Generation::new(), - planning_input.all_sled_ids(SledFilter::All), - "test suite", - (test_name, "initial blueprint"), - ) - .expect("failed to build blueprint"); + for zone_config in blueprint_zones.values_mut() { + zone_config.sort(); + } + let blueprint = Blueprint { + id: Uuid::new_v4(), + blueprint_zones, + blueprint_disks: BTreeMap::new(), + parent_blueprint_id: None, + internal_dns_version: *Generation::new(), + external_dns_version: *Generation::new(), + time_created: now_db_precision(), + creator: "test suite".to_string(), + comment: "test blueprint".to_string(), + }; let result = datastore .rack_set_initialized( diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 3969c808f9..e16fcbb3ff 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -879,7 +879,6 @@ mod tests { use crate::db::model::IpKind; use crate::db::model::IpPool; use crate::db::model::IpPoolRange; - use crate::db::model::Name; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; use dropshot::test_util::LogContext; @@ -889,13 +888,20 @@ mod tests { use nexus_db_model::IpPoolResource; use nexus_db_model::IpPoolResourceType; use nexus_test_utils::db::test_setup_database; + use nexus_types::deployment::OmicronZoneExternalIp; + use nexus_types::deployment::OmicronZoneExternalIpKind; use nexus_types::external_api::params::InstanceCreate; use nexus_types::external_api::shared::IpRange; + use nexus_types::inventory::SourceNatConfig; use omicron_common::address::NUM_SOURCE_NAT_PORTS; use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_test_utils::dev; use omicron_test_utils::dev::db::CockroachInstance; + use omicron_uuid_kinds::ExternalIpUuid; + use omicron_uuid_kinds::GenericUuid; + use omicron_uuid_kinds::OmicronZoneUuid; + use sled_agent_client::ZoneKind; use std::net::IpAddr; use std::net::Ipv4Addr; use std::sync::Arc; @@ -1325,163 +1331,9 @@ mod tests { } #[tokio::test] - async fn test_next_external_ip_for_service() { - let context = - TestContext::new("test_next_external_ip_for_service").await; - - let ip_range = IpRange::try_from(( - Ipv4Addr::new(10, 0, 0, 1), - Ipv4Addr::new(10, 0, 0, 3), - )) - .unwrap(); - context.initialize_ip_pool(SERVICE_IP_POOL_NAME, ip_range).await; - - // Allocate an IP address as we would for an external, rack-associated - // service. - let service1_id = Uuid::new_v4(); - - // Check that `service_lookup_external_ips` returns an empty vector for - // a service with no external IPs. - assert_eq!( - context - .db_datastore - .external_ip_list_service(&context.opctx, service1_id) - .await - .expect("Failed to look up service external IPs"), - Vec::new(), - ); - - let id1 = Uuid::new_v4(); - let ip1 = context - .db_datastore - .external_ip_allocate_service( - &context.opctx, - id1, - &Name("service1-ip".parse().unwrap()), - "service1-ip", - service1_id, - ) - .await - .expect("Failed to allocate service IP address"); - assert!(ip1.is_service); - assert_eq!(ip1.kind, IpKind::Floating); - assert_eq!(ip1.ip.ip(), IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1))); - assert_eq!(ip1.first_port.0, 0); - assert_eq!(ip1.last_port.0, u16::MAX); - assert_eq!(ip1.parent_id, Some(service1_id)); - assert_eq!( - context - .db_datastore - .external_ip_list_service(&context.opctx, service1_id) - .await - .expect("Failed to look up service external IPs"), - vec![ip1], - ); - - // Allocate an SNat IP - let service2_id = Uuid::new_v4(); - let id2 = Uuid::new_v4(); - let ip2 = context - .db_datastore - .external_ip_allocate_service_snat(&context.opctx, id2, service2_id) - .await - .expect("Failed to allocate service IP address"); - assert!(ip2.is_service); - assert_eq!(ip2.kind, IpKind::SNat); - assert_eq!(ip2.ip.ip(), IpAddr::V4(Ipv4Addr::new(10, 0, 0, 2))); - assert_eq!(ip2.first_port.0, 0); - assert_eq!(ip2.last_port.0, 16383); - assert_eq!(ip2.parent_id, Some(service2_id)); - assert_eq!( - context - .db_datastore - .external_ip_list_service(&context.opctx, service2_id) - .await - .expect("Failed to look up service external IPs"), - vec![ip2], - ); - - // Allocate the next IP address - let service3_id = Uuid::new_v4(); - let id3 = Uuid::new_v4(); - let ip3 = context - .db_datastore - .external_ip_allocate_service( - &context.opctx, - id3, - &Name("service3-ip".parse().unwrap()), - "service3-ip", - service3_id, - ) - .await - .expect("Failed to allocate service IP address"); - assert!(ip3.is_service); - assert_eq!(ip3.kind, IpKind::Floating); - assert_eq!(ip3.ip.ip(), IpAddr::V4(Ipv4Addr::new(10, 0, 0, 3))); - assert_eq!(ip3.first_port.0, 0); - assert_eq!(ip3.last_port.0, u16::MAX); - assert_eq!(ip3.parent_id, Some(service3_id)); - assert_eq!( - context - .db_datastore - .external_ip_list_service(&context.opctx, service3_id) - .await - .expect("Failed to look up service external IPs"), - vec![ip3], - ); - - // Once we're out of IP addresses, test that we see the right error. - let service3_id = Uuid::new_v4(); - let id3 = Uuid::new_v4(); - let err = context - .db_datastore - .external_ip_allocate_service( - &context.opctx, - id3, - &Name("service3-ip".parse().unwrap()), - "service3-ip", - service3_id, - ) - .await - .expect_err("Should have failed to allocate after pool exhausted"); - assert_eq!( - err, - Error::insufficient_capacity( - "No external IP addresses available", - "NextExternalIp::new returned NotFound", - ), - ); - - // But we should be able to allocate another SNat IP - let service4_id = Uuid::new_v4(); - let id4 = Uuid::new_v4(); - let ip4 = context - .db_datastore - .external_ip_allocate_service_snat(&context.opctx, id4, service4_id) - .await - .expect("Failed to allocate service IP address"); - assert!(ip4.is_service); - assert_eq!(ip4.kind, IpKind::SNat); - assert_eq!(ip4.ip.ip(), IpAddr::V4(Ipv4Addr::new(10, 0, 0, 2))); - assert_eq!(ip4.first_port.0, 16384); - assert_eq!(ip4.last_port.0, 32767); - assert_eq!(ip4.parent_id, Some(service4_id)); - assert_eq!( - context - .db_datastore - .external_ip_list_service(&context.opctx, service4_id) - .await - .expect("Failed to look up service external IPs"), - vec![ip4], - ); - - context.success().await; - } - - #[tokio::test] - async fn test_explicit_external_ip_for_service_is_idempotent() { + async fn test_external_ip_allocate_omicron_zone_is_idempotent() { let context = TestContext::new( - "test_explicit_external_ip_for_service_is_idempotent", + "test_external_ip_allocate_omicron_zone_is_idempotent", ) .await; @@ -1492,19 +1344,22 @@ mod tests { .unwrap(); context.initialize_ip_pool(SERVICE_IP_POOL_NAME, ip_range).await; + let ip_10_0_0_2 = + OmicronZoneExternalIpKind::Floating("10.0.0.2".parse().unwrap()); + let ip_10_0_0_3 = + OmicronZoneExternalIpKind::Floating("10.0.0.3".parse().unwrap()); + // Allocate an IP address as we would for an external, rack-associated // service. - let service_id = Uuid::new_v4(); - let id = Uuid::new_v4(); + let service_id = OmicronZoneUuid::new_v4(); + let id = ExternalIpUuid::new_v4(); let ip = context .db_datastore - .external_ip_allocate_service_explicit( + .external_ip_allocate_omicron_zone( &context.opctx, - id, - &Name("service-ip".parse().unwrap()), - "service-ip", service_id, - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 3)), + ZoneKind::Nexus, + OmicronZoneExternalIp { id, kind: ip_10_0_0_3 }, ) .await .expect("Failed to allocate service IP address"); @@ -1512,18 +1367,16 @@ mod tests { assert_eq!(ip.ip.ip(), IpAddr::V4(Ipv4Addr::new(10, 0, 0, 3))); assert_eq!(ip.first_port.0, 0); assert_eq!(ip.last_port.0, u16::MAX); - assert_eq!(ip.parent_id, Some(service_id)); + assert_eq!(ip.parent_id, Some(service_id.into_untyped_uuid())); // Try allocating the same service IP again. let ip_again = context .db_datastore - .external_ip_allocate_service_explicit( + .external_ip_allocate_omicron_zone( &context.opctx, - id, - &Name("service-ip".parse().unwrap()), - "service-ip", service_id, - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 3)), + ZoneKind::Nexus, + OmicronZoneExternalIp { id, kind: ip_10_0_0_3 }, ) .await .expect("Failed to allocate service IP address"); @@ -1535,13 +1388,14 @@ mod tests { // different UUID. let err = context .db_datastore - .external_ip_allocate_service_explicit( + .external_ip_allocate_omicron_zone( &context.opctx, - Uuid::new_v4(), - &Name("service-ip".parse().unwrap()), - "service-ip", service_id, - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 3)), + ZoneKind::Nexus, + OmicronZoneExternalIp { + id: ExternalIpUuid::new_v4(), + kind: ip_10_0_0_3, + }, ) .await .expect_err("Should have failed to re-allocate same IP address (different UUID)"); @@ -1554,13 +1408,14 @@ mod tests { // different input address. let err = context .db_datastore - .external_ip_allocate_service_explicit( + .external_ip_allocate_omicron_zone( &context.opctx, - id, - &Name("service-ip".parse().unwrap()), - "service-ip", service_id, - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 2)), + ZoneKind::Nexus, + OmicronZoneExternalIp { + id, + kind: ip_10_0_0_2, + }, ) .await .expect_err("Should have failed to re-allocate different IP address (same UUID)"); @@ -1571,14 +1426,17 @@ mod tests { // Try allocating the same service IP once more, but do it with a // different port range. + let ip_10_0_0_3_snat_0 = OmicronZoneExternalIpKind::Snat( + SourceNatConfig::new("10.0.0.3".parse().unwrap(), 0, 16383) + .unwrap(), + ); let err = context .db_datastore - .external_ip_allocate_service_explicit_snat( + .external_ip_allocate_omicron_zone( &context.opctx, - id, service_id, - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 3)), - (0, 16383), + ZoneKind::BoundaryNtp, + OmicronZoneExternalIp { id, kind: ip_10_0_0_3_snat_0 }, ) .await .expect_err("Should have failed to re-allocate different IP address (different port range)"); @@ -1588,16 +1446,22 @@ mod tests { ); // This time start with an explicit SNat - let snat_service_id = Uuid::new_v4(); - let snat_id = Uuid::new_v4(); + let ip_10_0_0_1_snat_32768 = OmicronZoneExternalIpKind::Snat( + SourceNatConfig::new("10.0.0.1".parse().unwrap(), 32768, 49151) + .unwrap(), + ); + let snat_service_id = OmicronZoneUuid::new_v4(); + let snat_id = ExternalIpUuid::new_v4(); let snat_ip = context .db_datastore - .external_ip_allocate_service_explicit_snat( + .external_ip_allocate_omicron_zone( &context.opctx, - snat_id, snat_service_id, - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)), - (32768, 49151), + ZoneKind::BoundaryNtp, + OmicronZoneExternalIp { + id: snat_id, + kind: ip_10_0_0_1_snat_32768, + }, ) .await .expect("Failed to allocate service IP address"); @@ -1606,17 +1470,22 @@ mod tests { assert_eq!(snat_ip.ip.ip(), IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1))); assert_eq!(snat_ip.first_port.0, 32768); assert_eq!(snat_ip.last_port.0, 49151); - assert_eq!(snat_ip.parent_id, Some(snat_service_id)); + assert_eq!( + snat_ip.parent_id, + Some(snat_service_id.into_untyped_uuid()) + ); // Try allocating the same service IP again. let snat_ip_again = context .db_datastore - .external_ip_allocate_service_explicit_snat( + .external_ip_allocate_omicron_zone( &context.opctx, - snat_id, snat_service_id, - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)), - (32768, 49151), + ZoneKind::BoundaryNtp, + OmicronZoneExternalIp { + id: snat_id, + kind: ip_10_0_0_1_snat_32768, + }, ) .await .expect("Failed to allocate service IP address"); @@ -1628,14 +1497,20 @@ mod tests { // Try allocating the same service IP once more, but do it with a // different port range. + let ip_10_0_0_1_snat_49152 = OmicronZoneExternalIpKind::Snat( + SourceNatConfig::new("10.0.0.1".parse().unwrap(), 49152, 65535) + .unwrap(), + ); let err = context .db_datastore - .external_ip_allocate_service_explicit_snat( + .external_ip_allocate_omicron_zone( &context.opctx, - snat_id, snat_service_id, - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)), - (49152, 65535), + ZoneKind::BoundaryNtp, + OmicronZoneExternalIp { + id: snat_id, + kind: ip_10_0_0_1_snat_49152, + }, ) .await .expect_err("Should have failed to re-allocate different IP address (different port range)"); @@ -1648,9 +1523,9 @@ mod tests { } #[tokio::test] - async fn test_explicit_external_ip_for_service_out_of_range() { + async fn test_external_ip_allocate_omicron_zone_out_of_range() { let context = TestContext::new( - "test_explicit_external_ip_for_service_out_of_range", + "test_external_ip_allocate_omicron_zone_out_of_range", ) .await; @@ -1661,17 +1536,19 @@ mod tests { .unwrap(); context.initialize_ip_pool(SERVICE_IP_POOL_NAME, ip_range).await; - let service_id = Uuid::new_v4(); - let id = Uuid::new_v4(); + let ip_10_0_0_5 = OmicronZoneExternalIpKind::Floating(IpAddr::V4( + Ipv4Addr::new(10, 0, 0, 5), + )); + + let service_id = OmicronZoneUuid::new_v4(); + let id = ExternalIpUuid::new_v4(); let err = context .db_datastore - .external_ip_allocate_service_explicit( + .external_ip_allocate_omicron_zone( &context.opctx, - id, - &Name("service-ip".parse().unwrap()), - "service-ip", service_id, - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 5)), + ZoneKind::Nexus, + OmicronZoneExternalIp { id, kind: ip_10_0_0_5 }, ) .await .expect_err("Should have failed to allocate out-of-bounds IP"); @@ -1683,116 +1560,6 @@ mod tests { context.success().await; } - #[tokio::test] - async fn test_insert_external_ip_for_service_is_idempotent() { - let context = TestContext::new( - "test_insert_external_ip_for_service_is_idempotent", - ) - .await; - - let ip_range = IpRange::try_from(( - Ipv4Addr::new(10, 0, 0, 1), - Ipv4Addr::new(10, 0, 0, 2), - )) - .unwrap(); - context.initialize_ip_pool(SERVICE_IP_POOL_NAME, ip_range).await; - - // Allocate an IP address as we would for an external, rack-associated - // service. - let service_id = Uuid::new_v4(); - let id = Uuid::new_v4(); - let ip = context - .db_datastore - .external_ip_allocate_service( - &context.opctx, - id, - &Name("service-ip".parse().unwrap()), - "service-ip", - service_id, - ) - .await - .expect("Failed to allocate service IP address"); - assert!(ip.is_service); - assert_eq!(ip.ip.ip(), IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1))); - assert_eq!(ip.first_port.0, 0); - assert_eq!(ip.last_port.0, u16::MAX); - assert_eq!(ip.parent_id, Some(service_id)); - - let ip_again = context - .db_datastore - .external_ip_allocate_service( - &context.opctx, - id, - &Name("service-ip".parse().unwrap()), - "service-ip", - service_id, - ) - .await - .expect("Failed to allocate service IP address"); - - assert_eq!(ip.id, ip_again.id); - assert_eq!(ip.ip.ip(), ip_again.ip.ip()); - - context.success().await; - } - - // This test is identical to "test_insert_external_ip_is_idempotent", - // but tries to make an idempotent allocation after all addresses in the - // pool have been allocated. - #[tokio::test] - async fn test_insert_external_ip_for_service_is_idempotent_even_when_full() - { - let context = TestContext::new( - "test_insert_external_ip_is_idempotent_even_when_full", - ) - .await; - - let ip_range = IpRange::try_from(( - Ipv4Addr::new(10, 0, 0, 1), - Ipv4Addr::new(10, 0, 0, 1), - )) - .unwrap(); - context.initialize_ip_pool(SERVICE_IP_POOL_NAME, ip_range).await; - - // Allocate an IP address as we would for an external, rack-associated - // service. - let service_id = Uuid::new_v4(); - let id = Uuid::new_v4(); - let ip = context - .db_datastore - .external_ip_allocate_service( - &context.opctx, - id, - &Name("service-ip".parse().unwrap()), - "service-ip", - service_id, - ) - .await - .expect("Failed to allocate service IP address"); - assert!(ip.is_service); - assert_eq!(ip.ip.ip(), IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1))); - assert_eq!(ip.first_port.0, 0); - assert_eq!(ip.last_port.0, u16::MAX); - assert_eq!(ip.parent_id, Some(service_id)); - - let ip_again = context - .db_datastore - .external_ip_allocate_service( - &context.opctx, - id, - &Name("service-ip".parse().unwrap()), - "service-ip", - service_id, - ) - .await - .expect("Failed to allocate service IP address"); - - assert_eq!(ip.id, ip_again.id); - assert_eq!(ip.ip.ip(), ip_again.ip.ip()); - - context.success().await; - } - #[tokio::test] async fn test_insert_external_ip_is_idempotent() { let context = diff --git a/nexus/reconfigurator/execution/src/dns.rs b/nexus/reconfigurator/execution/src/dns.rs index 79eb86fe09..c93ac94408 100644 --- a/nexus/reconfigurator/execution/src/dns.rs +++ b/nexus/reconfigurator/execution/src/dns.rs @@ -465,7 +465,7 @@ mod test { use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; - use nexus_inventory::CollectionBuilder; + use nexus_inventory::now_db_precision; use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_reconfigurator_planning::blueprint_builder::EnsureMultiple; use nexus_reconfigurator_planning::example::example; @@ -476,13 +476,9 @@ mod test { use nexus_types::deployment::BlueprintTarget; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; - use nexus_types::deployment::SledDisk; - use nexus_types::deployment::SledFilter; - use nexus_types::deployment::SledResources; + use nexus_types::deployment::BlueprintZonesConfig; use nexus_types::external_api::params; use nexus_types::external_api::shared; - use nexus_types::external_api::views::PhysicalDiskPolicy; - use nexus_types::external_api::views::PhysicalDiskState; use nexus_types::identity::Resource; use nexus_types::internal_api::params::DnsConfigParams; use nexus_types::internal_api::params::DnsConfigZone; @@ -497,11 +493,9 @@ mod test { use omicron_common::address::SLED_PREFIX; use omicron_common::api::external::Generation; use omicron_common::api::external::IdentityMetadataCreateParams; - use omicron_common::disk::DiskIdentity; use omicron_test_utils::dev::test_setup_log; + use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneUuid; - use omicron_uuid_kinds::PhysicalDiskUuid; - use omicron_uuid_kinds::ZpoolUuid; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::collections::HashMap; @@ -510,23 +504,11 @@ mod test { use std::net::Ipv6Addr; use std::net::SocketAddrV6; use std::sync::Arc; + use uuid::Uuid; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; - fn blueprint_empty() -> Blueprint { - let builder = CollectionBuilder::new("test-suite"); - let collection = builder.build(); - BlueprintBuilder::build_initial_from_collection( - &collection, - Generation::new(), - Generation::new(), - std::iter::empty(), - "test-suite", - ) - .expect("failed to generate empty blueprint") - } - fn dns_config_empty() -> DnsConfigParams { DnsConfigParams { generation: 1, @@ -541,7 +523,10 @@ mod test { /// test blueprint_internal_dns_config(): trivial case of an empty blueprint #[test] fn test_blueprint_internal_dns_empty() { - let blueprint = blueprint_empty(); + let blueprint = BlueprintBuilder::build_empty_with_sleds( + std::iter::empty(), + "test-suite", + ); let blueprint_dns = blueprint_internal_dns_config( &blueprint, &BTreeMap::new(), @@ -566,45 +551,46 @@ mod test { let rack_subnet = ipnet::Ipv6Net::new(rack_subnet_base, RACK_PREFIX).unwrap(); let possible_sled_subnets = rack_subnet.subnets(SLED_PREFIX).unwrap(); - // Ignore sleds with no associated zones in the inventory. - // This is included in the "representative" collection, but it's - // not allowed by BlueprintBuilder::build_initial_from_collection(). - let policy_sleds = collection - .omicron_zones - .keys() - .zip(possible_sled_subnets) - .map(|(sled_id, subnet)| { - let sled_resources = SledResources { - zpools: BTreeMap::from([( - ZpoolUuid::new_v4(), - SledDisk { - disk_identity: DiskIdentity { - vendor: String::from("v"), - serial: format!("s-{sled_id}"), - model: String::from("m"), - }, - disk_id: PhysicalDiskUuid::new_v4(), - policy: PhysicalDiskPolicy::InService, - state: PhysicalDiskState::Active, - }, - )]), - subnet: Ipv6Subnet::new(subnet.network()), - }; - (*sled_id, sled_resources) - }) - .collect::>(); + + // Convert the inventory `OmicronZonesConfig`s into + // `BlueprintZonesConfig`. This is going to get more painful over time + // as we add to blueprints, but for now we can make this work. + let mut blueprint_zones = BTreeMap::new(); + for (sled_id, zones_config) in collection.omicron_zones { + blueprint_zones.insert( + sled_id.into_untyped_uuid(), + BlueprintZonesConfig { + generation: zones_config.zones.generation, + zones: zones_config + .zones + .zones + .into_iter() + .map(|config| { + BlueprintZoneConfig::from_omicron_zone_config( + config, + BlueprintZoneDisposition::InService, + ) + .expect("failed to convert zone config") + }) + .collect(), + }, + ); + } let dns_empty = dns_config_empty(); let initial_dns_generation = Generation::from(u32::try_from(dns_empty.generation).unwrap()); - let mut blueprint = BlueprintBuilder::build_initial_from_collection( - &collection, - initial_dns_generation, - Generation::new(), - policy_sleds.keys().copied(), - "test-suite", - ) - .expect("failed to build initial blueprint"); + let mut blueprint = Blueprint { + id: Uuid::new_v4(), + blueprint_zones, + blueprint_disks: BTreeMap::new(), + parent_blueprint_id: None, + internal_dns_version: initial_dns_generation, + external_dns_version: Generation::new(), + time_created: now_db_precision(), + creator: "test-suite".to_string(), + comment: "test blueprint".to_string(), + }; // To make things slightly more interesting, let's add a zone that's // not currently in service. @@ -630,18 +616,23 @@ mod test { // To generate the blueprint's DNS config, we need to make up a // different set of information about the Quiesced fake system. - let sleds_by_id = policy_sleds - .iter() + let sleds_by_id = blueprint + .blueprint_zones + .keys() + .zip(possible_sled_subnets) .enumerate() - .map(|(i, (sled_id, sled_resources))| { + .map(|(i, (sled_id, subnet))| { + let sled_id = SledUuid::from_untyped_uuid(*sled_id); let sled_info = Sled { - id: *sled_id, - sled_agent_address: get_sled_address(sled_resources.subnet), + id: sled_id, + sled_agent_address: get_sled_address(Ipv6Subnet::new( + subnet.network(), + )), // The first two of these (arbitrarily) will be marked // Scrimlets. is_scrimlet: i < 2, }; - (*sled_id, sled_info) + (sled_id, sled_info) }) .collect(); @@ -693,7 +684,8 @@ mod test { .iter() .filter_map(|(sled_id, sled)| { if sled.is_scrimlet { - let sled_subnet = policy_sleds.get(sled_id).unwrap().subnet; + let sled_subnet = + sleds_by_id.get(sled_id).unwrap().subnet(); let switch_zone_ip = get_switch_zone_address(sled_subnet); Some((switch_zone_ip, *sled_id)) } else { @@ -829,16 +821,9 @@ mod test { async fn test_blueprint_external_dns_basic() { static TEST_NAME: &str = "test_blueprint_external_dns_basic"; let logctx = test_setup_log(TEST_NAME); - let (collection, input) = example(&logctx.log, TEST_NAME, 5); - let initial_external_dns_generation = Generation::new(); - let mut blueprint = BlueprintBuilder::build_initial_from_collection( - &collection, - Generation::new(), - initial_external_dns_generation, - input.all_sled_ids(SledFilter::All), - "test suite", - ) - .expect("failed to generate initial blueprint"); + let (_, _, mut blueprint) = example(&logctx.log, TEST_NAME, 5); + blueprint.internal_dns_version = Generation::new(); + blueprint.external_dns_version = Generation::new(); let my_silo = Silo::new(params::SiloCreate { identity: IdentityMetadataCreateParams { diff --git a/nexus/reconfigurator/execution/src/resource_allocation.rs b/nexus/reconfigurator/execution/src/resource_allocation.rs index 86eeb8af13..42a3a4f5de 100644 --- a/nexus/reconfigurator/execution/src/resource_allocation.rs +++ b/nexus/reconfigurator/execution/src/resource_allocation.rs @@ -7,8 +7,6 @@ use anyhow::bail; use anyhow::Context; use nexus_db_model::IncompleteNetworkInterface; -use nexus_db_model::Name; -use nexus_db_model::SqlU16; use nexus_db_model::VpcSubnet; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::fixed_data::vpc_subnet::DNS_VPC_SUBNET; @@ -18,17 +16,21 @@ use nexus_db_queries::db::DataStore; use nexus_types::deployment::blueprint_zone_type; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneType; +use nexus_types::deployment::OmicronZoneExternalIp; +use nexus_types::deployment::OmicronZoneExternalIpKind; use nexus_types::deployment::SourceNatConfig; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::internal::shared::NetworkInterface; use omicron_common::api::internal::shared::NetworkInterfaceKind; +use omicron_uuid_kinds::ExternalIpUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneUuid; +use sled_agent_client::ZoneKind; +use slog::error; use slog::info; use slog::warn; use std::net::IpAddr; use std::net::SocketAddr; -use uuid::Uuid; pub(crate) async fn ensure_zone_resources_allocated( opctx: &OpContext, @@ -68,7 +70,7 @@ pub(crate) async fn ensure_zone_resources_allocated( ) => { allocator .ensure_boundary_ntp_external_networking_allocated( - z.id, snat_cfg, nic, + z.id, *snat_cfg, nic, ) .await?; } @@ -96,16 +98,15 @@ impl<'a> ResourceAllocator<'a> { // already allocated to a specific service zone. async fn is_external_ip_already_allocated( &self, - zone_type: &'static str, + zone_kind: ZoneKind, zone_id: OmicronZoneUuid, - external_ip: IpAddr, - port_range: Option<(u16, u16)>, + ip_kind: OmicronZoneExternalIpKind, ) -> anyhow::Result { // localhost is used by many components in the test suite. We can't use // the normal path because normally a given external IP must only be // used once. Just treat localhost in the test suite as though it's // already allocated. We do the same in is_nic_already_allocated(). - if cfg!(test) && external_ip.is_loopback() { + if cfg!(test) && ip_kind.ip().is_loopback() { return Ok(true); } @@ -115,54 +116,77 @@ impl<'a> ResourceAllocator<'a> { .await .with_context(|| { format!( - "failed to look up external IPs for {zone_type} {zone_id}" + "failed to look up external IPs for {zone_kind} {zone_id}" ) })?; - if !allocated_ips.is_empty() { - // All the service zones that want external IP addresses only expect - // to have a single IP. This service already has (at least) one: - // make sure this list includes the one we want, or return an error. - for allocated_ip in &allocated_ips { - if allocated_ip.ip.ip() == external_ip - && port_range - .map(|(first, last)| { - allocated_ip.first_port == SqlU16(first) - && allocated_ip.last_port == SqlU16(last) - }) - .unwrap_or(true) - { - info!( - self.opctx.log, "found already-allocated external IP"; - "zone_type" => zone_type, - "zone_id" => %zone_id, - "ip" => %external_ip, - ); - return Ok(true); - } + // We expect to find either 0 or exactly 1 IP for any given zone. If 0, + // we know the IP isn't allocated; if 1, we'll check that it matches + // below. + let existing_ip = match allocated_ips.as_slice() { + [] => { + info!( + self.opctx.log, "external IP allocation required for zone"; + "zone_kind" => %zone_kind, + "zone_id" => %zone_id, + "ip" => ?ip_kind, + ); + + return Ok(false); + } + [ip] => ip, + _ => { + warn!( + self.opctx.log, "zone has multiple IPs allocated"; + "zone_kind" => %zone_kind, + "zone_id" => %zone_id, + "want_ip" => ?ip_kind, + "allocated_ips" => ?allocated_ips, + ); + bail!( + "zone {zone_id} already has {} IPs allocated (expected 1)", + allocated_ips.len() + ); } + }; - warn!( - self.opctx.log, "zone has unexpected IPs allocated"; - "zone_type" => zone_type, + // We expect this to always succeed; a failure here means we've stored + // an Omicron zone IP in the database that can't be converted back to an + // Omicron zone IP! + let existing_ip = match OmicronZoneExternalIp::try_from(existing_ip) { + Ok(existing_ip) => existing_ip, + Err(err) => { + error!( + self.opctx.log, "invalid IP in database for zone"; + "zone_kind" => %zone_kind, + "zone_id" => %zone_id, + "ip" => ?existing_ip, + &err, + ); + bail!("zone {zone_id} has invalid IP database record: {err}"); + } + }; + + // TODO-cleanup The blueprint should store the IP ID, at which point we + // could check full equality here instead of only checking the kind. + if existing_ip.kind == ip_kind { + info!( + self.opctx.log, "found already-allocated external IP"; + "zone_kind" => %zone_kind, "zone_id" => %zone_id, - "want_ip" => %external_ip, - "allocated_ips" => ?allocated_ips, - ); - bail!( - "zone {zone_id} already has {} non-matching IP(s) allocated", - allocated_ips.len() + "ip" => ?ip_kind, ); + return Ok(true); } - info!( - self.opctx.log, "external IP allocation required for zone"; - "zone_type" => zone_type, + warn!( + self.opctx.log, "zone has unexpected IP allocated"; + "zone_kind" => %zone_kind, "zone_id" => %zone_id, - "ip" => %external_ip, + "want_ip" => ?ip_kind, + "allocated_ip" => ?existing_ip, ); - - Ok(false) + bail!("zone {zone_id} has a different IP allocated ({existing_ip:?})",); } // Helper function to determine whether a given NIC is already allocated to @@ -237,14 +261,11 @@ impl<'a> ResourceAllocator<'a> { Ok(false) } - // Nexus and ExternalDns both use non-SNAT service IPs; this method is used - // to allocate external networking for both of them. async fn ensure_external_service_ip( &self, - zone_type: &'static str, - service_id: OmicronZoneUuid, - external_ip: IpAddr, - ip_name: &Name, + zone_kind: ZoneKind, + zone_id: OmicronZoneUuid, + ip_kind: OmicronZoneExternalIpKind, ) -> anyhow::Result<()> { // Only attempt to allocate `external_ip` if it isn't already assigned // to this zone. @@ -259,94 +280,32 @@ impl<'a> ResourceAllocator<'a> { // exactly what we want if two Nexuses try to realize the same // blueprint at the same time. if self - .is_external_ip_already_allocated( - zone_type, - service_id, - external_ip, - None, - ) + .is_external_ip_already_allocated(zone_kind, zone_id, ip_kind) .await? { return Ok(()); } - let ip_id = Uuid::new_v4(); - let description = zone_type; + let ip_id = ExternalIpUuid::new_v4(); self.datastore - .external_ip_allocate_service_explicit( + .external_ip_allocate_omicron_zone( self.opctx, - ip_id, - ip_name, - description, - service_id.into_untyped_uuid(), - external_ip, + zone_id, + zone_kind, + OmicronZoneExternalIp { id: ip_id, kind: ip_kind }, ) .await .with_context(|| { format!( - "failed to allocate IP to {zone_type} {service_id}: \ - {external_ip}" + "failed to allocate IP to {zone_kind} {zone_id}: \ + {ip_kind:?}" ) })?; info!( self.opctx.log, "successfully allocated external IP"; - "zone_type" => zone_type, - "zone_id" => %service_id, - "ip" => %external_ip, - "ip_id" => %ip_id, - ); - - Ok(()) - } - - // BoundaryNtp uses a SNAT service IPs; this method is similar to - // `ensure_external_service_ip` but accounts for that. - async fn ensure_external_service_snat_ip( - &self, - zone_type: &'static str, - service_id: OmicronZoneUuid, - snat: &SourceNatConfig, - ) -> anyhow::Result<()> { - // Only attempt to allocate `external_ip` if it isn't already assigned - // to this zone. - // - // This is subject to the same kind of TOCTOU race as described for IP - // allocation in `ensure_external_service_ip`, and we believe it's okay - // for the same reasons as described there. - if self - .is_external_ip_already_allocated( - zone_type, - service_id, - snat.ip, - Some((snat.first_port, snat.last_port)), - ) - .await? - { - return Ok(()); - } - - let ip_id = Uuid::new_v4(); - self.datastore - .external_ip_allocate_service_explicit_snat( - self.opctx, - ip_id, - service_id.into_untyped_uuid(), - snat.ip, - (snat.first_port, snat.last_port), - ) - .await - .with_context(|| { - format!( - "failed to allocate snat IP to {zone_type} {service_id}: \ - {snat:?}" - ) - })?; - - info!( - self.opctx.log, "successfully allocated external SNAT IP"; - "zone_type" => zone_type, - "zone_id" => %service_id, - "snat" => ?snat, + "zone_kind" => %zone_kind, + "zone_id" => %zone_id, + "ip" => ?ip_kind, "ip_id" => %ip_id, ); @@ -461,10 +420,9 @@ impl<'a> ResourceAllocator<'a> { nic: &NetworkInterface, ) -> anyhow::Result<()> { self.ensure_external_service_ip( - "nexus", + ZoneKind::Nexus, zone_id, - external_ip, - &Name(nic.name.clone()), + OmicronZoneExternalIpKind::Floating(external_ip), ) .await?; self.ensure_service_nic("nexus", zone_id, nic, &NEXUS_VPC_SUBNET) @@ -479,10 +437,9 @@ impl<'a> ResourceAllocator<'a> { nic: &NetworkInterface, ) -> anyhow::Result<()> { self.ensure_external_service_ip( - "external_dns", + ZoneKind::ExternalDns, zone_id, - dns_address.ip(), - &Name(nic.name.clone()), + OmicronZoneExternalIpKind::Floating(dns_address.ip()), ) .await?; self.ensure_service_nic("external_dns", zone_id, nic, &DNS_VPC_SUBNET) @@ -493,10 +450,15 @@ impl<'a> ResourceAllocator<'a> { async fn ensure_boundary_ntp_external_networking_allocated( &self, zone_id: OmicronZoneUuid, - snat: &SourceNatConfig, + snat: SourceNatConfig, nic: &NetworkInterface, ) -> anyhow::Result<()> { - self.ensure_external_service_snat_ip("ntp", zone_id, snat).await?; + self.ensure_external_service_ip( + ZoneKind::BoundaryNtp, + zone_id, + OmicronZoneExternalIpKind::Snat(snat), + ) + .await?; self.ensure_service_nic("ntp", zone_id, nic, &NTP_VPC_SUBNET).await?; Ok(()) } @@ -506,6 +468,7 @@ impl<'a> ResourceAllocator<'a> { mod tests { use super::*; use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; + use nexus_db_model::SqlU16; use nexus_test_utils_macros::nexus_test; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; @@ -521,6 +484,7 @@ mod tests { use omicron_common::api::external::Vni; use std::net::IpAddr; use std::net::Ipv6Addr; + use uuid::Uuid; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -604,11 +568,12 @@ mod tests { // Boundary NTP: let ntp_id = OmicronZoneUuid::new_v4(); - let ntp_snat = SourceNatConfig { - ip: external_ips.next().expect("exhausted external_ips"), - first_port: NUM_SOURCE_NAT_PORTS, - last_port: 2 * NUM_SOURCE_NAT_PORTS - 1, - }; + let ntp_snat = SourceNatConfig::new( + external_ips.next().expect("exhausted external_ips"), + NUM_SOURCE_NAT_PORTS, + 2 * NUM_SOURCE_NAT_PORTS - 1, + ) + .unwrap(); let ntp_nic = NetworkInterface { id: Uuid::new_v4(), kind: NetworkInterfaceKind::Service { @@ -719,8 +684,10 @@ mod tests { assert!(db_ntp_ips[0].is_service); assert_eq!(db_ntp_ips[0].parent_id, Some(ntp_id.into_untyped_uuid())); assert_eq!(db_ntp_ips[0].ip, ntp_snat.ip.into()); - assert_eq!(db_ntp_ips[0].first_port, SqlU16(ntp_snat.first_port)); - assert_eq!(db_ntp_ips[0].last_port, SqlU16(ntp_snat.last_port)); + assert_eq!( + db_ntp_ips[0].first_port.0..=db_ntp_ips[0].last_port.0, + ntp_snat.port_range() + ); // Check that the NIC records were created. let db_nexus_nics = datastore @@ -842,7 +809,7 @@ mod tests { { *external_ip = bogus_ip; return format!( - "zone {} already has 1 non-matching IP", + "zone {} has a different IP allocated", zone.id ); } @@ -862,7 +829,7 @@ mod tests { { *dns_address = SocketAddr::new(bogus_ip, 0); return format!( - "zone {} already has 1 non-matching IP", + "zone {} has a different IP allocated", zone.id ); } @@ -879,10 +846,14 @@ mod tests { }, ) = &mut zone.zone_type { - snat_cfg.first_port += NUM_SOURCE_NAT_PORTS; - snat_cfg.last_port += NUM_SOURCE_NAT_PORTS; + let (mut first, mut last) = snat_cfg.port_range_raw(); + first += NUM_SOURCE_NAT_PORTS; + last += NUM_SOURCE_NAT_PORTS; + *snat_cfg = + SourceNatConfig::new(snat_cfg.ip, first, last) + .unwrap(); return format!( - "zone {} already has 1 non-matching IP", + "zone {} has a different IP allocated", zone.id ); } diff --git a/nexus/reconfigurator/planning/Cargo.toml b/nexus/reconfigurator/planning/Cargo.toml index 06d1c460ca..9c1d462a3b 100644 --- a/nexus/reconfigurator/planning/Cargo.toml +++ b/nexus/reconfigurator/planning/Cargo.toml @@ -6,6 +6,7 @@ edition = "2021" [dependencies] anyhow.workspace = true chrono.workspace = true +debug-ignore.workspace = true gateway-client.workspace = true illumos-utils.workspace = true indexmap.workspace = true @@ -28,4 +29,7 @@ omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true +maplit.workspace = true omicron-test-utils.workspace = true +proptest.workspace = true +test-strategy.workspace = true diff --git a/nexus/reconfigurator/planning/src/blueprint_builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs similarity index 80% rename from nexus/reconfigurator/planning/src/blueprint_builder.rs rename to nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index f024652332..a58b96162b 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -5,8 +5,10 @@ //! Low-level facility for generating Blueprints use crate::ip_allocator::IpAllocator; +use crate::planner::ZoneExpungeReason; use anyhow::anyhow; use anyhow::bail; +use debug_ignore::DebugIgnore; use internal_dns::config::Host; use internal_dns::config::Zone; use ipnet::IpAdd; @@ -22,13 +24,11 @@ use nexus_types::deployment::BlueprintZoneFilter; use nexus_types::deployment::BlueprintZoneType; use nexus_types::deployment::BlueprintZonesConfig; use nexus_types::deployment::DiskFilter; -use nexus_types::deployment::InvalidOmicronZoneType; use nexus_types::deployment::OmicronZoneDataset; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledFilter; use nexus_types::deployment::SledResources; use nexus_types::deployment::ZpoolName; -use nexus_types::inventory::Collection; use omicron_common::address::get_internal_dns_server_addresses; use omicron_common::address::get_sled_address; use omicron_common::address::get_switch_zone_address; @@ -45,15 +45,20 @@ use omicron_common::api::internal::shared::NetworkInterface; use omicron_common::api::internal::shared::NetworkInterfaceKind; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneKind; +use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; use rand::rngs::StdRng; use rand::SeedableRng; +use slog::debug; +use slog::error; +use slog::info; use slog::o; use slog::Logger; use std::borrow::Cow; use std::collections::BTreeMap; +use std::collections::BTreeSet; use std::collections::HashSet; use std::hash::Hash; use std::net::IpAddr; @@ -66,6 +71,10 @@ use typed_rng::TypedUuidRng; use typed_rng::UuidRng; use uuid::Uuid; +use super::zones::is_already_expunged; +use super::zones::BuilderZoneState; +use super::zones::BuilderZonesConfig; + /// Errors encountered while assembling blueprints #[derive(Debug, Error)] pub enum Error { @@ -81,8 +90,6 @@ pub enum Error { ExhaustedNexusIps, #[error("programming error in planner")] Planner(#[from] anyhow::Error), - #[error("invalid OmicronZoneType in collection")] - InvalidOmicronZoneType(#[from] InvalidOmicronZoneType), } /// Describes whether an idempotent "ensure" operation resulted in action taken @@ -118,11 +125,11 @@ fn zpool_id_to_external_name(zpool_id: ZpoolUuid) -> anyhow::Result { /// /// There are two basic ways to assemble a new blueprint: /// -/// 1. Build one directly from a collection. Such blueprints have no parent -/// blueprint. They are not customizable. Use -/// [`BlueprintBuilder::build_initial_from_collection`] for this. This would -/// generally only be used once in the lifetime of a rack, to assemble the -/// first blueprint. +/// 1. Build one directly. This would generally only be used once in the +/// lifetime of a rack, to assemble the first blueprint during rack setup. +/// It is also common in tests. To start with a blueprint that contains an +/// empty zone config for some number of sleds, use +/// [`BlueprintBuilder::build_empty_with_sleds`]. /// /// 2. Build one _from_ another blueprint, called the "parent", making changes /// as desired. Use [`BlueprintBuilder::new_based_on`] for this. Once the @@ -142,123 +149,78 @@ pub struct BlueprintBuilder<'a> { // These fields will become part of the final blueprint. See the // corresponding fields in `Blueprint`. - zones: BlueprintZonesBuilder<'a>, + pub(super) zones: BlueprintZonesBuilder<'a>, disks: BlueprintDisksBuilder<'a>, creator: String, comments: Vec, // These fields mirror how RSS chooses addresses for zone NICs. - nexus_v4_ips: Box + Send>, - nexus_v6_ips: Box + Send>, + nexus_v4_ips: AvailableIterator<'static, Ipv4Addr>, + nexus_v6_ips: AvailableIterator<'static, Ipv6Addr>, // Iterator of available external IPs for service zones - available_external_ips: Box + Send + 'a>, + available_external_ips: AvailableIterator<'a, IpAddr>, // Iterator of available MAC addresses in the system address range - available_system_macs: Box>, + available_system_macs: AvailableIterator<'a, MacAddr>, // Random number generator for new UUIDs rng: BlueprintBuilderRng, } impl<'a> BlueprintBuilder<'a> { - /// Directly construct a `Blueprint` from the contents of a particular - /// collection (representing no changes from the collection state) - pub fn build_initial_from_collection( - collection: &Collection, - internal_dns_version: Generation, - external_dns_version: Generation, - all_sleds: impl Iterator, + /// Directly construct a `Blueprint` that contains an empty zone config for + /// the given sleds. + pub fn build_empty_with_sleds( + sled_ids: impl Iterator, creator: &str, - ) -> Result { - Self::build_initial_impl( - collection, - internal_dns_version, - external_dns_version, - all_sleds, + ) -> Blueprint { + Self::build_empty_with_sleds_impl( + sled_ids, creator, BlueprintBuilderRng::new(), ) } - /// A version of [`Self::build_initial_from_collection`] that allows the + /// A version of [`Self::build_empty_with_sleds`] that allows the /// blueprint ID to be generated from a random seed. - pub fn build_initial_from_collection_seeded( - collection: &Collection, - internal_dns_version: Generation, - external_dns_version: Generation, - all_sleds: impl Iterator, + pub fn build_empty_with_sleds_seeded( + sled_ids: impl Iterator, creator: &str, seed: H, - ) -> Result { + ) -> Blueprint { let mut rng = BlueprintBuilderRng::new(); rng.set_seed(seed); - Self::build_initial_impl( - collection, - internal_dns_version, - external_dns_version, - all_sleds, - creator, - rng, - ) + Self::build_empty_with_sleds_impl(sled_ids, creator, rng) } - fn build_initial_impl( - collection: &Collection, - internal_dns_version: Generation, - external_dns_version: Generation, - all_sleds: impl Iterator, + fn build_empty_with_sleds_impl( + sled_ids: impl Iterator, creator: &str, mut rng: BlueprintBuilderRng, - ) -> Result { - let blueprint_zones = all_sleds + ) -> Blueprint { + let blueprint_zones = sled_ids .map(|sled_id| { - let zones = collection - .omicron_zones - .get(&sled_id) - .map(|z| &z.zones) - .ok_or_else(|| { - // We should not find a sled that's supposed to be - // in-service but is not part of the inventory. It's - // not that that can't ever happen. This could happen - // when a sled is first being added to the system. Of - // course it could also happen if this sled agent failed - // our inventory request. But this is the initial - // blueprint (so this shouldn't be the "add sled" case) - // and we want to get it right (so we don't want to - // leave out sleds whose sled agent happened to be down - // when we tried to do this). The operator (or, more - // likely, a support person) will have to sort out - // what's going on if this happens. - Error::Planner(anyhow!( - "building initial blueprint: sled {:?} is \ - supposed to be in service but has no zones \ - in inventory", - sled_id - )) - })?; - let config = - BlueprintZonesConfig::initial_from_collection(&zones)?; - - Ok(( - // TODO-cleanup use `TypedUuid` everywhere - sled_id.into_untyped_uuid(), - config, - )) + let config = BlueprintZonesConfig { + generation: Generation::new(), + zones: Vec::new(), + }; + (sled_id.into_untyped_uuid(), config) }) - .collect::>()?; - Ok(Blueprint { + .collect::>(); + let num_sleds = blueprint_zones.len(); + Blueprint { id: rng.blueprint_rng.next(), blueprint_zones, blueprint_disks: BTreeMap::new(), parent_blueprint_id: None, - internal_dns_version, - external_dns_version, + internal_dns_version: Generation::new(), + external_dns_version: Generation::new(), time_created: now_db_precision(), creator: creator.to_owned(), - comment: format!("from collection {}", collection.id), - }) + comment: format!("starting blueprint with {num_sleds} empty sleds"), + } } /// Construct a new `BlueprintBuilder` based on a previous blueprint, @@ -279,17 +241,17 @@ impl<'a> BlueprintBuilder<'a> { // need to allocate new resources to that zone. However, allocation at // this point is entirely optimistic and theoretical: our caller may // discard the blueprint we create without ever making it the new - // target, or it might be an arbitrarily long time before it becomes the - // target. We need to be able to make allocation decisions that we + // target, or it might be an arbitrarily long time before it becomes + // the target. We need to be able to make allocation decisions that we // expect the blueprint executor to be able to realize successfully if // and when we become the target, but we cannot _actually_ perform // resource allocation. // // To do this, we look at our parent blueprint's used resources, and - // then choose new resources that aren't already in use (if possible; if - // we need to allocate a new resource and the parent blueprint appears - // to be using all the resources of that kind, our blueprint generation - // will fail). + // then choose new resources that aren't already in use (if possible; + // if we need to allocate a new resource and the parent blueprint + // appears to be using all the resources of that kind, our blueprint + // generation will fail). // // For example, RSS assigns Nexus NIC IPs by stepping through a list of // addresses based on `NEXUS_OPTE_IPVx_SUBNET` (as in the iterators @@ -300,12 +262,19 @@ impl<'a> BlueprintBuilder<'a> { // Note that by building these iterators up front based on // `parent_blueprint`, we cannot reuse resources in a case where we // remove a zone that used a resource and then add another zone that - // wants the same kind of resource. We don't support zone removal yet, - // but expect this to be okay: we don't anticipate removal and addition - // to frequently be combined into the exact same blueprint, particularly - // in a way that expects the addition to reuse resources from the - // removal; we won't want to attempt to reuse resources from a zone - // until we know it's been fully removed. + // wants the same kind of resource. That is mostly okay, but there are + // some cases in which we may have to do that -- particularly external + // DNS zones, which tend to have a small number of fixed IPs. Solving + // that is a TODO. + // + // Also note that currently, we don't perform any kind of garbage + // collection on sleds and zones that no longer have any attached + // resources. Once a sled or zone is marked expunged, it will always + // stay in that state. + // https://github.com/oxidecomputer/omicron/issues/5552 tracks + // implementing this kind of garbage collection, and we should do it + // very soon. + let mut existing_nexus_v4_ips: HashSet = HashSet::new(); let mut existing_nexus_v6_ips: HashSet = HashSet::new(); let mut used_external_ips: HashSet = HashSet::new(); @@ -340,6 +309,7 @@ impl<'a> BlueprintBuilder<'a> { bail!("duplicate external IP: {external_ip}"); } } + if let Some(nic) = zone_type.opte_vnic() { if !used_macs.insert(nic.mac) { bail!("duplicate service vNIC MAC: {}", nic.mac); @@ -353,30 +323,26 @@ impl<'a> BlueprintBuilder<'a> { // of Nexus instances), but wouldn't be ideal if we have many resources // we need to skip. We could do something smarter here based on the sets // of used resources we built above if needed. - let nexus_v4_ips = Box::new( + let nexus_v4_ips = AvailableIterator::new( NEXUS_OPTE_IPV4_SUBNET .0 .iter() - .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES) - .filter(move |ip| !existing_nexus_v4_ips.contains(ip)), + .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES), + existing_nexus_v4_ips, ); - let nexus_v6_ips = Box::new( + let nexus_v6_ips = AvailableIterator::new( NEXUS_OPTE_IPV6_SUBNET .0 .iter() - .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES) - .filter(move |ip| !existing_nexus_v6_ips.contains(ip)), - ); - let available_external_ips = Box::new( - input - .service_ip_pool_ranges() - .iter() - .flat_map(|r| r.iter()) - .filter(move |ip| !used_external_ips.contains(ip)), + .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES), + existing_nexus_v6_ips, ); - let available_system_macs = Box::new( - MacAddr::iter_system().filter(move |mac| !used_macs.contains(mac)), + let available_external_ips = AvailableIterator::new( + input.service_ip_pool_ranges().iter().flat_map(|r| r.iter()), + used_external_ips, ); + let available_system_macs = + AvailableIterator::new(MacAddr::iter_system(), used_macs); Ok(BlueprintBuilder { log, @@ -436,6 +402,93 @@ impl<'a> BlueprintBuilder<'a> { self.comments.push(String::from(comment)); } + /// Expunges all zones from a sled. + /// + /// Returns a list of zone IDs expunged (excluding zones that were already + /// expunged). If the list is empty, then the operation was a no-op. + pub(crate) fn expunge_all_zones_for_sled( + &mut self, + sled_id: SledUuid, + reason: ZoneExpungeReason, + ) -> Result, Error> { + let log = self.log.new(o!( + "sled_id" => sled_id.to_string(), + )); + + // Do any zones need to be marked expunged? + let mut zones_to_expunge = BTreeSet::new(); + + let sled_zones = self.zones.current_sled_zones(sled_id); + for (z, state) in sled_zones { + let is_expunged = + is_already_expunged(z, state).map_err(|error| { + Error::Planner(anyhow!(error).context(format!( + "for sled {sled_id}, error computing zones to expunge" + ))) + })?; + + if !is_expunged { + zones_to_expunge.insert(z.id); + } + } + + if zones_to_expunge.is_empty() { + debug!( + log, + "sled has no zones that need expungement; skipping"; + ); + return Ok(zones_to_expunge); + } + + match reason { + ZoneExpungeReason::SledDecommissioned { policy } => { + // A sled marked as decommissioned should have no resources + // allocated to it. If it does, it's an illegal state, possibly + // introduced by a bug elsewhere in the system -- we need to + // produce a loud warning (i.e. an ERROR-level log message) on + // this, while still removing the zones. + error!( + &log, + "sled has state Decommissioned, yet has zones \ + allocated to it; will expunge them \ + (sled policy is \"{policy}\")" + ); + } + ZoneExpungeReason::SledExpunged => { + // This is the expected situation. + info!( + &log, + "expunged sled with {} non-expunged zones found \ + (will expunge all zones)", + zones_to_expunge.len() + ); + } + } + + // Now expunge all the zones that need it. + let change = self.zones.change_sled_zones(sled_id); + change.expunge_zones(zones_to_expunge.clone()).map_err(|error| { + anyhow!(error) + .context(format!("for sled {sled_id}, error expunging zones")) + })?; + + // Finally, add a comment describing what happened. + let reason = match reason { + ZoneExpungeReason::SledDecommissioned { .. } => { + "sled state is decommissioned" + } + ZoneExpungeReason::SledExpunged => "sled policy is expunged", + }; + + self.comment(format!( + "sled {} ({reason}): {} zones expunged", + sled_id, + zones_to_expunge.len(), + )); + + Ok(zones_to_expunge) + } + /// Ensures that the blueprint contains disks for a sled which already /// exists in the database. /// @@ -519,7 +572,7 @@ impl<'a> BlueprintBuilder<'a> { let has_ntp = self .zones .current_sled_zones(sled_id) - .any(|z| z.zone_type.is_ntp()); + .any(|(z, _)| z.zone_type.is_ntp()); if has_ntp { return Ok(Ensure::NotNeeded); } @@ -583,7 +636,7 @@ impl<'a> BlueprintBuilder<'a> { // If this sled already has a Crucible zone on this pool, do nothing. let has_crucible_on_this_pool = - self.zones.current_sled_zones(sled_id).any(|z| { + self.zones.current_sled_zones(sled_id).any(|(z, _)| { matches!( &z.zone_type, BlueprintZoneType::Crucible(blueprint_zone_type::Crucible { @@ -634,7 +687,7 @@ impl<'a> BlueprintBuilder<'a> { pub fn sled_num_nexus_zones(&self, sled_id: SledUuid) -> usize { self.zones .current_sled_zones(sled_id) - .filter(|z| z.zone_type.is_nexus()) + .filter(|(z, _)| z.zone_type.is_nexus()) .count() } @@ -770,15 +823,11 @@ impl<'a> BlueprintBuilder<'a> { let _ = self.sled_resources(sled_id)?; let sled_zones = self.zones.change_sled_zones(sled_id); - // A sled should have a small number (< 20) of zones so a linear search - // should be very fast. - if sled_zones.zones.iter().any(|z| z.id == zone.id) { - return Err(Error::Planner(anyhow!( - "attempted to add zone that already exists: {}", - zone.id - ))); - } - sled_zones.zones.push(zone); + sled_zones.add_zone(zone).map_err(|error| { + anyhow!(error) + .context(format!("error adding zone to sled {sled_id}")) + })?; + Ok(()) } @@ -811,7 +860,7 @@ impl<'a> BlueprintBuilder<'a> { // Record each of the sled's zones' underlay addresses as // allocated. - for z in self.zones.current_sled_zones(sled_id) { + for (z, _) in self.zones.current_sled_zones(sled_id) { allocator.reserve(z.underlay_address); } @@ -827,13 +876,52 @@ impl<'a> BlueprintBuilder<'a> { ) -> Result<&SledResources, Error> { self.input.sled_resources(&sled_id).ok_or_else(|| { Error::Planner(anyhow!( - "attempted to use sled that is not in service: {}", + "attempted to use sled that is not currently known: {}", sled_id )) }) } } +/// Combines a base iterator with an `in_use` set, filtering out any elements +/// that are in the "in_use" set. +/// +/// This can be done with a chained `.filter` on the iterator, but +/// `AvailableIterator` also allows for inspection of the `in_use` set. +/// +/// Note that this is a stateful iterator -- i.e. it implements `Iterator`, not +/// `IntoIterator`. That's what we currently need in the planner. +#[derive(Debug)] +pub struct AvailableIterator<'a, T> { + base: DebugIgnore + Send + 'a>>, + in_use: HashSet, +} + +impl<'a, T: Hash + Eq> AvailableIterator<'a, T> { + /// Creates a new `AvailableIterator` from a base iterator and a set of + /// elements that are in use. + pub fn new(base: I, in_use: impl IntoIterator) -> Self + where + I: Iterator + Send + 'a, + { + let in_use = in_use.into_iter().collect(); + AvailableIterator { base: DebugIgnore(Box::new(base)), in_use } + } + + /// Returns the in-use set. + pub fn in_use(&self) -> &HashSet { + &self.in_use + } +} + +impl Iterator for AvailableIterator<'_, T> { + type Item = T; + + fn next(&mut self) -> Option { + self.base.find(|item| !self.in_use.contains(item)) + } +} + #[derive(Debug)] struct BlueprintBuilderRng { // Have separate RNGs for the different kinds of UUIDs we might add, @@ -877,8 +965,8 @@ impl BlueprintBuilderRng { /// blueprint. We do this by keeping a copy of any [`BlueprintZonesConfig`] /// that we've changed and a _reference_ to the parent blueprint's zones. This /// struct makes it easy for callers iterate over the right set of zones. -struct BlueprintZonesBuilder<'a> { - changed_zones: BTreeMap, +pub(super) struct BlueprintZonesBuilder<'a> { + changed_zones: BTreeMap, // Temporarily make a clone of the parent blueprint's zones so we can use // typed UUIDs everywhere. Once we're done migrating, this `Cow` can be // removed. @@ -900,37 +988,33 @@ impl<'a> BlueprintZonesBuilder<'a> { pub fn change_sled_zones( &mut self, sled_id: SledUuid, - ) -> &mut BlueprintZonesConfig { + ) -> &mut BuilderZonesConfig { self.changed_zones.entry(sled_id).or_insert_with(|| { if let Some(old_sled_zones) = self.parent_zones.get(&sled_id) { - BlueprintZonesConfig { - generation: old_sled_zones.generation.next(), - zones: old_sled_zones.zones.clone(), - } + BuilderZonesConfig::from_parent(old_sled_zones) } else { - // The first generation is reserved to mean the one containing - // no zones. See OmicronZonesConfig::INITIAL_GENERATION. So - // we start with the next one. - BlueprintZonesConfig { - generation: Generation::new().next(), - zones: vec![], - } + BuilderZonesConfig::new() } }) } /// Iterates over the list of Omicron zones currently configured for this - /// sled in the blueprint that's being built + /// sled in the blueprint that's being built, along with each zone's state + /// in the builder. pub fn current_sled_zones( &self, sled_id: SledUuid, - ) -> Box + '_> { - if let Some(sled_zones) = self - .changed_zones - .get(&sled_id) - .or_else(|| self.parent_zones.get(&sled_id)) - { - Box::new(sled_zones.zones.iter()) + ) -> Box + '_> + { + if let Some(sled_zones) = self.changed_zones.get(&sled_id) { + Box::new(sled_zones.iter_zones().map(|z| (z.zone(), z.state()))) + } else if let Some(parent_zones) = self.parent_zones.get(&sled_id) { + Box::new( + parent_zones + .zones + .iter() + .map(|z| (z, BuilderZoneState::Unchanged)), + ) } else { Box::new(std::iter::empty()) } @@ -945,24 +1029,26 @@ impl<'a> BlueprintZonesBuilder<'a> { .map(|sled_id| { // Start with self.changed_zones, which contains entries for any // sled whose zones config is changing in this blueprint. - let mut zones = self - .changed_zones - .remove(&sled_id) - // If it's not there, use the config from the parent - // blueprint. - .or_else(|| self.parent_zones.get(&sled_id).cloned()) - // If it's not there either, then this must be a new sled - // and we haven't added any zones to it yet. Use the + if let Some(zones) = self.changed_zones.remove(&sled_id) { + (sled_id.into_untyped_uuid(), zones.build()) + } + // Next, check self.parent_zones, to represent an unchanged sled. + else if let Some(parent_zones) = + self.parent_zones.get(&sled_id) + { + (sled_id.into_untyped_uuid(), parent_zones.clone()) + } else { + // If the sled is not in self.parent_zones, then it must be a + // new sled and we haven't added any zones to it yet. Use the // standard initial config. - .unwrap_or_else(|| BlueprintZonesConfig { - generation: Generation::new(), - zones: vec![], - }); - - zones.sort(); - - // TODO-cleanup use `TypedUuid` everywhere - (sled_id.into_untyped_uuid(), zones) + ( + sled_id.into_untyped_uuid(), + BlueprintZonesConfig { + generation: Generation::new(), + zones: vec![], + }, + ) + } }) .collect() } @@ -1071,8 +1157,8 @@ pub mod test { use nexus_types::deployment::BlueprintZoneFilter; use omicron_common::address::IpRange; use omicron_test_utils::dev::test_setup_log; - use sled_agent_client::types::OmicronZoneType; use std::collections::BTreeSet; + use test_strategy::proptest; pub const DEFAULT_N_SLEDS: usize = 3; @@ -1103,18 +1189,8 @@ pub mod test { // describes no changes. static TEST_NAME: &str = "blueprint_builder_test_initial"; let logctx = test_setup_log(TEST_NAME); - let (collection, input) = + let (collection, input, blueprint_initial) = example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); - let blueprint_initial = - BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - Generation::new(), - Generation::new(), - input.all_sled_ids(SledFilter::All), - "the_test", - TEST_NAME, - ) - .expect("failed to create initial blueprint"); verify_blueprint(&blueprint_initial); let diff = @@ -1289,21 +1365,14 @@ pub mod test { fn test_add_physical_disks() { static TEST_NAME: &str = "blueprint_builder_test_add_physical_disks"; let logctx = test_setup_log(TEST_NAME); - let (collection, input) = - example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); + let (_, input, _) = example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); - // We don't care about the DNS versions here. - let internal_dns_version = Generation::new(); - let external_dns_version = Generation::new(); - let parent = BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - internal_dns_version, - external_dns_version, + // Start with an empty blueprint (sleds with no zones). + let parent = BlueprintBuilder::build_empty_with_sleds_seeded( input.all_sled_ids(SledFilter::All), "test", TEST_NAME, - ) - .expect("failed to create initial blueprint"); + ); { // We start empty, and can add a disk @@ -1341,33 +1410,19 @@ pub mod test { static TEST_NAME: &str = "blueprint_builder_test_add_nexus_with_no_existing_nexus_zones"; let logctx = test_setup_log(TEST_NAME); - let (mut collection, input) = - example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); - - // We don't care about the DNS versions here. - let internal_dns_version = Generation::new(); - let external_dns_version = Generation::new(); - - // Adding a new Nexus zone currently requires copying settings from an - // existing Nexus zone. If we remove all Nexus zones from the - // collection, create a blueprint, then try to add a Nexus zone, it - // should fail. - for zones in collection.omicron_zones.values_mut() { - zones.zones.zones.retain(|z| { - !matches!(z.zone_type, OmicronZoneType::Nexus { .. }) - }); - } - let parent = BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - internal_dns_version, - external_dns_version, + // Discard the example blueprint and start with an empty one. + let (collection, input, _) = + example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); + let parent = BlueprintBuilder::build_empty_with_sleds_seeded( input.all_sled_ids(SledFilter::All), "test", TEST_NAME, - ) - .expect("failed to create initial blueprint"); + ); + // Adding a new Nexus zone currently requires copying settings from an + // existing Nexus zone. `parent` has no zones, so we should fail if we + // try to add a Nexus zone. let mut builder = BlueprintBuilder::new_based_on( &logctx.log, &parent, @@ -1400,13 +1455,9 @@ pub mod test { fn test_add_nexus_error_cases() { static TEST_NAME: &str = "blueprint_builder_test_add_nexus_error_cases"; let logctx = test_setup_log(TEST_NAME); - let (mut collection, input) = + let (mut collection, input, mut parent) = example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); - // We don't care about the DNS versions here. - let internal_dns_version = Generation::new(); - let external_dns_version = Generation::new(); - // Remove the Nexus zone from one of the sleds so that // `sled_ensure_zone_nexus` can attempt to add a Nexus zone to // `sled_id`. @@ -1414,27 +1465,22 @@ pub mod test { let mut selected_sled_id = None; for (sled_id, zones) in &mut collection.omicron_zones { let nzones_before_retain = zones.zones.zones.len(); - zones.zones.zones.retain(|z| { - !matches!(z.zone_type, OmicronZoneType::Nexus { .. }) - }); + zones.zones.zones.retain(|z| !z.zone_type.is_nexus()); if zones.zones.zones.len() < nzones_before_retain { selected_sled_id = Some(*sled_id); + // Also remove this zone from the blueprint. + parent + .blueprint_zones + .get_mut(sled_id.as_untyped_uuid()) + .expect("missing sled") + .zones + .retain(|z| !z.zone_type.is_nexus()); break; } } selected_sled_id.expect("found no sleds with Nexus zone") }; - let parent = BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - internal_dns_version, - external_dns_version, - input.all_sled_ids(SledFilter::All), - "test", - TEST_NAME, - ) - .expect("failed to create initial blueprint"); - { // Attempting to add Nexus to the sled we removed it from (with no // other changes to the environment) should succeed. @@ -1521,7 +1567,7 @@ pub mod test { "blueprint_builder_test_invalid_parent_blueprint_\ two_zones_with_same_external_ip"; let logctx = test_setup_log(TEST_NAME); - let (mut collection, input) = + let (_, input, mut parent) = example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); // We should fail if the parent blueprint claims to contain two @@ -1531,10 +1577,12 @@ pub mod test { let mut found_second_nexus_zone = false; let mut nexus_external_ip = None; - 'outer: for zones in collection.omicron_zones.values_mut() { - for z in zones.zones.zones.iter_mut() { - if let OmicronZoneType::Nexus { external_ip, .. } = - &mut z.zone_type + 'outer: for zones in parent.blueprint_zones.values_mut() { + for z in zones.zones.iter_mut() { + if let BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { + external_ip, + .. + }) = &mut z.zone_type { if let Some(ip) = nexus_external_ip { *external_ip = ip; @@ -1549,16 +1597,6 @@ pub mod test { } assert!(found_second_nexus_zone, "only one Nexus zone present?"); - let parent = BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - Generation::new(), - Generation::new(), - input.all_sled_ids(SledFilter::All), - "test", - TEST_NAME, - ) - .unwrap(); - match BlueprintBuilder::new_based_on( &logctx.log, &parent, @@ -1581,7 +1619,7 @@ pub mod test { "blueprint_builder_test_invalid_parent_blueprint_\ two_nexus_zones_with_same_nic_ip"; let logctx = test_setup_log(TEST_NAME); - let (mut collection, input) = + let (_, input, mut parent) = example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); // We should fail if the parent blueprint claims to contain two @@ -1591,9 +1629,13 @@ pub mod test { let mut found_second_nexus_zone = false; let mut nexus_nic_ip = None; - 'outer: for zones in collection.omicron_zones.values_mut() { - for z in zones.zones.zones.iter_mut() { - if let OmicronZoneType::Nexus { nic, .. } = &mut z.zone_type { + 'outer: for zones in parent.blueprint_zones.values_mut() { + for z in zones.zones.iter_mut() { + if let BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { + nic, + .. + }) = &mut z.zone_type + { if let Some(ip) = nexus_nic_ip { nic.ip = ip; found_second_nexus_zone = true; @@ -1607,16 +1649,6 @@ pub mod test { } assert!(found_second_nexus_zone, "only one Nexus zone present?"); - let parent = BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - Generation::new(), - Generation::new(), - input.all_sled_ids(SledFilter::All), - "test", - TEST_NAME, - ) - .unwrap(); - match BlueprintBuilder::new_based_on( &logctx.log, &parent, @@ -1639,7 +1671,7 @@ pub mod test { "blueprint_builder_test_invalid_parent_blueprint_\ two_zones_with_same_vnic_mac"; let logctx = test_setup_log(TEST_NAME); - let (mut collection, input) = + let (_, input, mut parent) = example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); // We should fail if the parent blueprint claims to contain two @@ -1649,9 +1681,13 @@ pub mod test { let mut found_second_nexus_zone = false; let mut nexus_nic_mac = None; - 'outer: for zones in collection.omicron_zones.values_mut() { - for z in zones.zones.zones.iter_mut() { - if let OmicronZoneType::Nexus { nic, .. } = &mut z.zone_type { + 'outer: for zones in parent.blueprint_zones.values_mut() { + for z in zones.zones.iter_mut() { + if let BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { + nic, + .. + }) = &mut z.zone_type + { if let Some(mac) = nexus_nic_mac { nic.mac = mac; found_second_nexus_zone = true; @@ -1665,16 +1701,6 @@ pub mod test { } assert!(found_second_nexus_zone, "only one Nexus zone present?"); - let parent = BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - Generation::new(), - Generation::new(), - input.all_sled_ids(SledFilter::All), - "test", - TEST_NAME, - ) - .unwrap(); - match BlueprintBuilder::new_based_on( &logctx.log, &parent, @@ -1690,4 +1716,31 @@ pub mod test { logctx.cleanup_successful(); } + + /// Test that `AvailableIterator` correctly filters out items that are in + /// use. + #[proptest] + fn test_available_iterator(items: HashSet<(i32, bool)>) { + let mut in_use_map = HashSet::new(); + let mut expected_available = Vec::new(); + let items: Vec<_> = items + .into_iter() + .map(|(item, in_use)| { + if in_use { + in_use_map.insert(item); + } else { + expected_available.push(item); + } + item + }) + .collect(); + + let available = AvailableIterator::new(items.into_iter(), in_use_map); + let actual_available = available.collect::>(); + + assert_eq!( + expected_available, actual_available, + "available items match" + ); + } } diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/mod.rs b/nexus/reconfigurator/planning/src/blueprint_builder/mod.rs new file mode 100644 index 0000000000..e3afa2cdad --- /dev/null +++ b/nexus/reconfigurator/planning/src/blueprint_builder/mod.rs @@ -0,0 +1,10 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Low-level facility for generating Blueprints + +mod builder; +mod zones; + +pub use builder::*; diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs b/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs new file mode 100644 index 0000000000..c0e0918503 --- /dev/null +++ b/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs @@ -0,0 +1,429 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::collections::BTreeSet; + +use nexus_types::deployment::{ + BlueprintZoneConfig, BlueprintZoneDisposition, BlueprintZonesConfig, +}; +use omicron_common::api::external::Generation; +use omicron_uuid_kinds::OmicronZoneUuid; +use thiserror::Error; + +#[derive(Debug)] +#[must_use] +pub(super) struct BuilderZonesConfig { + // The current generation -- this is bumped at blueprint build time and is + // otherwise not exposed to callers. + generation: Generation, + + // The list of zones, along with their state. + zones: Vec, +} + +impl BuilderZonesConfig { + pub(super) fn new() -> Self { + Self { + // Note that the first generation is reserved to mean the one + // containing no zones. See + // OmicronZonesConfig::INITIAL_GENERATION. + // + // Since we're currently assuming that creating a new + // `BuilderZonesConfig` means that we're going to add new zones + // shortly, we start with Generation::new() here. It'll get + // bumped up to the next one in `Self::build`. + generation: Generation::new(), + zones: vec![], + } + } + + pub(super) fn from_parent(parent: &BlueprintZonesConfig) -> Self { + Self { + // We'll bump this up at build time. + generation: parent.generation, + + zones: parent + .zones + .iter() + .map(|zone| BuilderZoneConfig { + zone: zone.clone(), + state: BuilderZoneState::Unchanged, + }) + .collect(), + } + } + + pub(super) fn add_zone( + &mut self, + zone: BlueprintZoneConfig, + ) -> Result<(), BuilderZonesConfigError> { + if self.zones.iter().any(|z| z.zone.id == zone.id) { + // We shouldn't be trying to add zones that already exist -- + // something went wrong in the planner logic. + return Err(BuilderZonesConfigError::AddExistingZone { + zone_id: zone.id, + }); + }; + + self.zones + .push(BuilderZoneConfig { zone, state: BuilderZoneState::Added }); + Ok(()) + } + + pub(super) fn expunge_zones( + &mut self, + mut zones: BTreeSet, + ) -> Result<(), BuilderZonesConfigError> { + for zone in &mut self.zones { + if zones.remove(&zone.zone.id) { + // Check that the zone is expungeable. Typically, zones passed + // in here should have had this check done to them already, but + // in case they're not, or in case something else about those + // zones changed in between, check again. + is_already_expunged(&zone.zone, zone.state)?; + zone.zone.disposition = BlueprintZoneDisposition::Expunged; + zone.state = BuilderZoneState::Modified; + } + } + + // All zones passed in should have been found -- are there any left + // over? + if !zones.is_empty() { + return Err(BuilderZonesConfigError::ExpungeUnmatchedZones { + unmatched: zones, + }); + } + + Ok(()) + } + + pub(super) fn iter_zones( + &self, + ) -> impl Iterator { + self.zones.iter() + } + + pub(super) fn build(self) -> BlueprintZonesConfig { + let mut ret = BlueprintZonesConfig { + // Something we could do here is to check if any zones have + // actually been modified, and if not, return the parent's + // generation. For now, we depend on callers to only call + // `BlueprintZonesBuilder::change_sled_zones` when they really + // mean it. + generation: self.generation.next(), + zones: self.zones.into_iter().map(|z| z.zone).collect(), + }; + ret.sort(); + ret + } +} + +pub(super) fn is_already_expunged( + zone: &BlueprintZoneConfig, + state: BuilderZoneState, +) -> Result { + match zone.disposition { + BlueprintZoneDisposition::InService + | BlueprintZoneDisposition::Quiesced => { + if state != BuilderZoneState::Unchanged { + // We shouldn't be trying to expunge zones that have also been + // changed in this blueprint -- something went wrong in the planner + // logic. + return Err(BuilderZonesConfigError::ExpungeModifiedZone { + zone_id: zone.id, + state, + }); + } + Ok(false) + } + BlueprintZoneDisposition::Expunged => { + // Treat expungement as idempotent. + Ok(true) + } + } +} + +#[derive(Debug)] +pub(super) struct BuilderZoneConfig { + zone: BlueprintZoneConfig, + state: BuilderZoneState, +} + +impl BuilderZoneConfig { + pub(super) fn zone(&self) -> &BlueprintZoneConfig { + &self.zone + } + + pub(super) fn state(&self) -> BuilderZoneState { + self.state + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub(super) enum BuilderZoneState { + Unchanged, + Modified, + Added, +} + +#[derive(Clone, Debug, PartialEq, Eq, Error)] +pub(super) enum BuilderZonesConfigError { + #[error("attempted to add zone that already exists: {zone_id}")] + AddExistingZone { zone_id: OmicronZoneUuid }, + #[error( + "attempted to expunge zone {zone_id} that was in state {state:?} \ + (can only expunge unchanged zones)" + )] + ExpungeModifiedZone { zone_id: OmicronZoneUuid, state: BuilderZoneState }, + #[error( + "while expunging zones, not all zones provided were found: {unmatched:?}" + )] + ExpungeUnmatchedZones { unmatched: BTreeSet }, +} + +#[cfg(test)] +mod tests { + use std::{ + collections::BTreeMap, + net::{Ipv6Addr, SocketAddrV6}, + }; + + use maplit::btreeset; + use nexus_types::{ + deployment::{ + blueprint_zone_type, BlueprintZoneType, SledDetails, SledFilter, + SledResources, + }, + external_api::views::{SledPolicy, SledState}, + }; + use omicron_common::address::Ipv6Subnet; + use omicron_test_utils::dev::test_setup_log; + + use crate::{ + blueprint_builder::{ + test::{verify_blueprint, DEFAULT_N_SLEDS}, + BlueprintBuilder, Ensure, + }, + example::ExampleSystem, + }; + + use super::*; + + /// A test focusing on `BlueprintZonesBuilder` and its internal logic. + #[test] + fn test_builder_zones() { + static TEST_NAME: &str = "blueprint_test_builder_zones"; + let logctx = test_setup_log(TEST_NAME); + let mut example = + ExampleSystem::new(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); + let blueprint_initial = example.blueprint; + + // Add a completely bare sled to the input. + let (new_sled_id, input2) = { + let mut input = example.input.clone().into_builder(); + let new_sled_id = example.sled_rng.next(); + input + .add_sled( + new_sled_id, + SledDetails { + policy: SledPolicy::provisionable(), + state: SledState::Active, + resources: SledResources { + subnet: Ipv6Subnet::new( + "fd00:1::".parse().unwrap(), + ), + zpools: BTreeMap::new(), + }, + }, + ) + .expect("adding new sled"); + + (new_sled_id, input.build()) + }; + + let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, + &blueprint_initial, + &input2, + "the_test", + ) + .expect("creating blueprint builder"); + builder.set_rng_seed((TEST_NAME, "bp2")); + + // Test adding a new sled with an NTP zone. + assert_eq!( + builder.sled_ensure_zone_ntp(new_sled_id).unwrap(), + Ensure::Added + ); + + // Iterate over the zones for the sled and ensure that the NTP zone is + // present. + { + let mut zones = builder.zones.current_sled_zones(new_sled_id); + let (_, state) = zones.next().expect("exactly one zone for sled"); + assert!(zones.next().is_none(), "exactly one zone for sled"); + assert_eq!( + state, + BuilderZoneState::Added, + "NTP zone should have been added" + ); + } + + // Now, test adding a new zone (Oximeter, picked arbitrarily) to an + // existing sled. + let existing_sled_id = example + .input + .all_sled_ids(SledFilter::All) + .next() + .expect("at least one sled present"); + let change = builder.zones.change_sled_zones(existing_sled_id); + + let new_zone_id = OmicronZoneUuid::new_v4(); + change + .add_zone(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: new_zone_id, + underlay_address: Ipv6Addr::UNSPECIFIED, + zone_type: BlueprintZoneType::Oximeter( + blueprint_zone_type::Oximeter { + address: SocketAddrV6::new( + Ipv6Addr::UNSPECIFIED, + 0, + 0, + 0, + ), + }, + ), + }) + .expect("adding new zone"); + + // Attempt to expunge one of the other zones on the sled. + let existing_zone_id = change + .iter_zones() + .find(|z| z.zone.id != new_zone_id) + .expect("at least one existing zone") + .zone + .id; + change + .expunge_zones(btreeset! { existing_zone_id }) + .expect("expunging existing zone"); + // Do it again to ensure that expunging an already-expunged zone is + // idempotent, even within the same blueprint. + change + .expunge_zones(btreeset! { existing_zone_id }) + .expect("expunging already-expunged zone"); + // But expunging a zone that doesn't exist should fail. + let non_existent_zone_id = OmicronZoneUuid::new_v4(); + let non_existent_set = btreeset! { non_existent_zone_id }; + let error = change + .expunge_zones(non_existent_set.clone()) + .expect_err("expunging non-existent zone"); + assert_eq!( + error, + BuilderZonesConfigError::ExpungeUnmatchedZones { + unmatched: non_existent_set + } + ); + + { + // Iterate over the zones and ensure that the Oximeter zone is + // present, and marked added. + let mut zones = builder.zones.current_sled_zones(existing_sled_id); + zones + .find_map(|(z, state)| { + if z.id == new_zone_id { + assert_eq!( + state, + BuilderZoneState::Added, + "new zone ID {new_zone_id} should be marked added" + ); + Some(()) + } else { + None + } + }) + .expect("new zone ID should be present"); + } + + // Also call change_sled_zones without making any changes. This + // currently bumps the generation number, but in the future might + // become smarter and not do so (in which case this test will break). + let control_sled_id = example + .input + .all_sled_ids(SledFilter::All) + .nth(2) + .expect("at least 2 sleds present"); + _ = builder.zones.change_sled_zones(control_sled_id); + + // Attempt to expunge the newly added Oximeter zone. This should fail + // because we only support expunging zones that are unchanged from the + // parent blueprint. + let error = builder + .zones + .change_sled_zones(existing_sled_id) + .expunge_zones(btreeset! { new_zone_id }) + .expect_err("expunging a new zone should fail"); + assert_eq!( + error, + BuilderZonesConfigError::ExpungeModifiedZone { + zone_id: new_zone_id, + state: BuilderZoneState::Added + } + ); + + // Now build the blueprint and ensure that all the changes we described + // above are present. + let blueprint = builder.build(); + verify_blueprint(&blueprint); + let diff = blueprint.diff_since_blueprint(&blueprint_initial).unwrap(); + println!("expecting new NTP and Oximeter zones:\n{}", diff.display()); + + // No sleds were removed. + assert_eq!(diff.sleds_removed().len(), 0); + + // One sled was added. + let sleds: Vec<_> = diff.sleds_added().collect(); + assert_eq!(sleds.len(), 1); + let (sled_id, new_sled_zones) = sleds[0]; + assert_eq!(sled_id, new_sled_id); + // The generation number should be newer than the initial default. + assert_eq!(new_sled_zones.generation, Generation::new().next()); + assert_eq!(new_sled_zones.zones.len(), 1); + + // Two sleds were modified: existing_sled_id and control_sled_id. + let sleds = diff.sleds_modified(); + assert_eq!(sleds.len(), 2, "2 sleds modified"); + for (sled_id, sled_modified) in sleds { + if sled_id == existing_sled_id { + assert_eq!( + sled_modified.generation_after, + sled_modified.generation_before.next() + ); + assert_eq!(sled_modified.zones_added().len(), 1); + let added_zone = sled_modified.zones_added().next().unwrap(); + assert_eq!(added_zone.id, new_zone_id); + + assert_eq!(sled_modified.zones_removed().len(), 0); + assert_eq!(sled_modified.zones_modified().count(), 1); + let modified_zone = + sled_modified.zones_modified().next().unwrap(); + assert_eq!(modified_zone.zone_before.id, existing_zone_id); + } else { + assert_eq!(sled_id, control_sled_id); + + // The generation number is bumped, but nothing else. + assert_eq!( + sled_modified.generation_after, + sled_modified.generation_before.next(), + "control sled has generation number bumped" + ); + assert_eq!(sled_modified.zones_added().len(), 0); + assert_eq!(sled_modified.zones_removed().len(), 0); + assert_eq!(sled_modified.zones_modified().count(), 0); + } + } + + logctx.cleanup_successful(); + } +} diff --git a/nexus/reconfigurator/planning/src/example.rs b/nexus/reconfigurator/planning/src/example.rs index c269d4ccd2..760e880b8d 100644 --- a/nexus/reconfigurator/planning/src/example.rs +++ b/nexus/reconfigurator/planning/src/example.rs @@ -10,15 +10,14 @@ use crate::system::SystemDescription; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintZoneFilter; use nexus_types::deployment::OmicronZoneExternalIp; +use nexus_types::deployment::OmicronZoneExternalIpKind; use nexus_types::deployment::OmicronZoneNic; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledFilter; use nexus_types::inventory::Collection; -use omicron_common::api::external::Generation; use omicron_uuid_kinds::ExternalIpUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::SledKind; -use sled_agent_client::types::OmicronZonesConfig; use typed_rng::TypedUuidRng; pub struct ExampleSystem { @@ -52,37 +51,14 @@ impl ExampleSystem { let mut input_builder = system .to_planning_input_builder() .expect("failed to make planning input builder"); - let mut inventory_builder = - system.to_collection_builder().expect("failed to build collection"); let base_input = input_builder.clone().build(); - // For each sled, have it report 0 zones in the initial inventory. - // This will enable us to build a blueprint from the initial - // inventory, which we can then use to build new blueprints. - for &sled_id in &sled_ids { - inventory_builder - .found_sled_omicron_zones( - "fake sled agent", - sled_id, - OmicronZonesConfig { - generation: Generation::new(), - zones: vec![], - }, - ) - .expect("recording Omicron zones"); - } - - let empty_zone_inventory = inventory_builder.build(); - let initial_blueprint = - BlueprintBuilder::build_initial_from_collection_seeded( - &empty_zone_inventory, - Generation::new(), - Generation::new(), - base_input.all_sled_ids(SledFilter::All), - "test suite", - (test_name, "ExampleSystem initial"), - ) - .unwrap(); + // Start with an empty blueprint containing only our sleds, no zones. + let initial_blueprint = BlueprintBuilder::build_empty_with_sleds_seeded( + base_input.all_sled_ids(SledFilter::All), + "test suite", + (test_name, "ExampleSystem initial"), + ); // Now make a blueprint and collection with some zones on each sled. let mut builder = BlueprintBuilder::new_based_on( @@ -132,7 +108,9 @@ impl ExampleSystem { service_id, OmicronZoneExternalIp { id: ExternalIpUuid::new_v4(), - ip, + // TODO-cleanup This is potentially wrong; + // zone_type should tell us the IP kind. + kind: OmicronZoneExternalIpKind::Floating(ip), }, ) .expect("failed to add Omicron zone external IP"); @@ -144,7 +122,7 @@ impl ExampleSystem { OmicronZoneNic { id: nic.id, mac: nic.mac, - ip: nic.ip.into(), + ip: nic.ip, slot: nic.slot, primary: nic.primary, }, @@ -173,7 +151,8 @@ impl ExampleSystem { } } -/// Returns a collection and planning input describing a pretty simple system. +/// Returns a collection, planning input, and blueprint describing a pretty +/// simple system. /// /// The test name is used as the RNG seed. /// @@ -184,7 +163,7 @@ pub fn example( log: &slog::Logger, test_name: &str, nsleds: usize, -) -> (Collection, PlanningInput) { +) -> (Collection, PlanningInput, Blueprint) { let example = ExampleSystem::new(log, test_name, nsleds); - (example.collection, example.input) + (example.collection, example.input, example.blueprint) } diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 1c054de646..a252f9b821 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -13,6 +13,8 @@ use crate::blueprint_builder::Error; use nexus_types::deployment::Blueprint; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledFilter; +use nexus_types::external_api::views::SledPolicy; +use nexus_types::external_api::views::SledState; use nexus_types::inventory::Collection; use omicron_uuid_kinds::SledUuid; use slog::{info, warn, Logger}; @@ -72,9 +74,35 @@ impl<'a> Planner<'a> { } fn do_plan(&mut self) -> Result<(), Error> { - // The only thing this planner currently knows how to do is add services - // to a sled that's missing them. So let's see if we're in that case. + // We perform planning in two loops: the first one turns expunged sleds + // into expunged zones, and the second one adds services. + self.do_plan_expunge()?; + self.do_plan_add()?; + + Ok(()) + } + + fn do_plan_expunge(&mut self) -> Result<(), Error> { + // Remove services from sleds marked expunged. We use `SledFilter::All` + // and have a custom `needs_zone_expungement` function that allows us + // to produce better errors. + for (sled_id, sled_details) in self.input.all_sleds(SledFilter::All) { + // Does this sled need zone expungement based on the details? + let Some(reason) = + needs_zone_expungement(sled_details.state, sled_details.policy) + else { + continue; + }; + + // Perform the expungement. + self.blueprint.expunge_all_zones_for_sled(sled_id, reason)?; + } + + Ok(()) + } + + fn do_plan_add(&mut self) -> Result<(), Error> { // Internal DNS is a prerequisite for bringing up all other zones. At // this point, we assume that internal DNS (as a service) is already // functioning. At some point, this function will have to grow the @@ -330,12 +358,44 @@ impl<'a> Planner<'a> { } } +/// Returns `Some(reason)` if the sled needs its zones to be expunged, +/// based on the policy and state. +fn needs_zone_expungement( + state: SledState, + policy: SledPolicy, +) -> Option { + match state { + SledState::Active => {} + SledState::Decommissioned => { + // A decommissioned sled that still has resources attached to it is + // an illegal state, but representable. If we see a sled in this + // state, we should still expunge all zones in it, but parent code + // should warn on it. + return Some(ZoneExpungeReason::SledDecommissioned { policy }); + } + } + + match policy { + SledPolicy::InService { .. } => None, + SledPolicy::Expunged => Some(ZoneExpungeReason::SledExpunged), + } +} + +/// The reason a sled's zones need to be expunged. +/// +/// This is used only for introspection and logging -- it's not part of the +/// logical flow. +#[derive(Copy, Clone, Debug)] +pub(crate) enum ZoneExpungeReason { + SledDecommissioned { policy: SledPolicy }, + SledExpunged, +} + #[cfg(test)] mod test { use super::Planner; use crate::blueprint_builder::test::verify_blueprint; use crate::blueprint_builder::test::DEFAULT_N_SLEDS; - use crate::blueprint_builder::BlueprintBuilder; use crate::example::example; use crate::example::ExampleSystem; use crate::system::SledBuilder; @@ -348,7 +408,7 @@ mod test { use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::BlueprintZoneFilter; use nexus_types::deployment::BlueprintZoneType; - use nexus_types::deployment::SledFilter; + use nexus_types::deployment::DiffSledModified; use nexus_types::external_api::views::SledPolicy; use nexus_types::external_api::views::SledProvisionPolicy; use nexus_types::external_api::views::SledState; @@ -356,6 +416,7 @@ mod test { use omicron_common::api::external::Generation; use omicron_test_utils::dev::test_setup_log; use omicron_uuid_kinds::GenericUuid; + use std::collections::HashMap; /// Runs through a basic sequence of blueprints for adding a sled #[test] @@ -363,34 +424,18 @@ mod test { static TEST_NAME: &str = "planner_basic_add_sled"; let logctx = test_setup_log(TEST_NAME); - // For our purposes, we don't care about the DNS generations. - let internal_dns_version = Generation::new(); - let external_dns_version = Generation::new(); - - // Use our example inventory collection. + // Use our example system. let mut example = ExampleSystem::new(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); - - // Build the initial blueprint. We don't bother verifying it here - // because there's a separate test for that. - let blueprint1 = - BlueprintBuilder::build_initial_from_collection_seeded( - &example.collection, - internal_dns_version, - external_dns_version, - example.input.all_sled_ids(SledFilter::All), - "the_test", - (TEST_NAME, "bp1"), - ) - .expect("failed to create initial blueprint"); - verify_blueprint(&blueprint1); + let blueprint1 = &example.blueprint; + verify_blueprint(blueprint1); // Now run the planner. It should do nothing because our initial // system didn't have any issues that the planner currently knows how to // fix. let blueprint2 = Planner::new_based_on( logctx.log.clone(), - &blueprint1, + blueprint1, &example.input, "no-op?", &example.collection, @@ -400,7 +445,7 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint2.diff_since_blueprint(&blueprint1).unwrap(); + let diff = blueprint2.diff_since_blueprint(blueprint1).unwrap(); println!("1 -> 2 (expected no changes):\n{}", diff.display()); assert_eq!(diff.sleds_added().len(), 0); assert_eq!(diff.sleds_removed().len(), 0); @@ -563,14 +608,10 @@ mod test { static TEST_NAME: &str = "planner_add_multiple_nexus_to_one_sled"; let logctx = test_setup_log(TEST_NAME); - // For our purposes, we don't care about the DNS generations. - let internal_dns_version = Generation::new(); - let external_dns_version = Generation::new(); - - // Use our example inventory collection as a starting point, but strip - // it down to just one sled. - let (sled_id, collection, input) = { - let (mut collection, input) = + // Use our example system as a starting point, but strip it down to just + // one sled. + let (sled_id, blueprint1, collection, input) = { + let (mut collection, input, mut blueprint) = example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); // Pick one sled ID to keep and remove the rest. @@ -583,22 +624,13 @@ mod test { assert_eq!(collection.sled_agents.len(), 1); assert_eq!(collection.omicron_zones.len(), 1); + blueprint + .blueprint_zones + .retain(|k, _v| keep_sled_id.as_untyped_uuid() == k); - (keep_sled_id, collection, builder.build()) + (keep_sled_id, blueprint, collection, builder.build()) }; - // Build the initial blueprint. - let blueprint1 = - BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - internal_dns_version, - external_dns_version, - input.all_sled_ids(SledFilter::All), - "the_test", - (TEST_NAME, "bp1"), - ) - .expect("failed to create initial blueprint"); - // This blueprint should only have 1 Nexus instance on the one sled we // kept. assert_eq!(blueprint1.blueprint_zones.len(), 1); @@ -661,22 +693,10 @@ mod test { "planner_spread_additional_nexus_zones_across_sleds"; let logctx = test_setup_log(TEST_NAME); - // Use our example inventory collection as a starting point. - let (collection, input) = + // Use our example system as a starting point. + let (collection, input, blueprint1) = example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); - // Build the initial blueprint. - let blueprint1 = - BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - Generation::new(), - Generation::new(), - input.all_sled_ids(SledFilter::All), - "the_test", - (TEST_NAME, "bp1"), - ) - .expect("failed to create initial blueprint"); - // This blueprint should only have 3 Nexus zones: one on each sled. assert_eq!(blueprint1.blueprint_zones.len(), 3); for sled_config in blueprint1.blueprint_zones.values() { @@ -748,25 +768,14 @@ mod test { "planner_nexus_allocation_skips_nonprovisionable_sleds"; let logctx = test_setup_log(TEST_NAME); - // Use our example inventory collection as a starting point. + // Use our example system as a starting point. // // Request two extra sleds here so we test non-provisionable, expunged, // and decommissioned sleds. (When we add more kinds of // non-provisionable states in the future, we'll have to add more // sleds.) - let (collection, input) = example(&logctx.log, TEST_NAME, 5); - - // Build the initial blueprint. - let blueprint1 = - BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - Generation::new(), - Generation::new(), - input.all_sled_ids(SledFilter::All), - "the_test", - (TEST_NAME, "bp1"), - ) - .expect("failed to create initial blueprint"); + let (collection, input, blueprint1) = + example(&logctx.log, TEST_NAME, 5); // This blueprint should only have 5 Nexus zones: one on each sled. assert_eq!(blueprint1.blueprint_zones.len(), 5); @@ -840,18 +849,41 @@ mod test { ); let diff = blueprint2.diff_since_blueprint(&blueprint1).unwrap(); - println!("1 -> 2 (added additional Nexus zones):\n{}", diff.display()); + println!( + "1 -> 2 (added additional Nexus zones, take 2 sleds out of service):\n{}", + diff.display() + ); assert_contents( "tests/output/planner_nonprovisionable_1_2.txt", &diff.display().to_string(), ); + + // The expunged and decommissioned sleds should have had all zones be + // marked as expunged. (Not removed! Just marked as expunged.) + // + // Note that at this point we're neither removing zones from the + // blueprint nor marking sleds as decommissioned -- we still need to do + // cleanup, and we aren't performing garbage collection on zones or + // sleds at the moment. + assert_eq!(diff.sleds_added().len(), 0); assert_eq!(diff.sleds_removed().len(), 0); - let sleds = diff.sleds_modified().collect::>(); + let mut sleds = diff.sleds_modified().collect::>(); + + let expunged_modified = sleds.remove(&expunged_sled_id).unwrap(); + assert_all_zones_expunged(&expunged_modified, "expunged sled"); - // Only 2 of the 3 sleds should get additional Nexus zones. We expect a - // total of 6 new Nexus zones, which should be split evenly between the - // two sleds, while the non-provisionable sled should be unchanged. + let decommissioned_modified = + sleds.remove(&decommissioned_sled_id).unwrap(); + assert_all_zones_expunged( + &decommissioned_modified, + "decommissioned sled", + ); + + // Only 2 of the 3 remaining sleds (not the non-provisionable sled) + // should get additional Nexus zones. We expect a total of 6 new Nexus + // zones, which should be split evenly between the two sleds, while the + // non-provisionable sled should be unchanged. assert_eq!(sleds.len(), 2); let mut total_new_nexus_zones = 0; for (sled_id, sled_changes) in sleds { @@ -971,4 +1003,40 @@ mod test { logctx.cleanup_successful(); } + + fn assert_all_zones_expunged(modified: &DiffSledModified, desc: &str) { + assert_eq!( + modified.generation_before.next(), + modified.generation_after, + "for {desc}, generation should have been bumped" + ); + + assert_eq!( + modified.zones_added().count(), + 0, + "for {desc}, no zones should have been added to blueprint" + ); + + // A zone disposition going to expunged *does not* mean that the + // zone is actually removed, i.e. `zones_removed` is still 0. Any + // zone removal will be part of some future garbage collection + // process that isn't currently defined. + + assert_eq!( + modified.zones_removed().len(), + 0, + "for {desc}, no zones should have been removed from blueprint" + ); + + // Run through all the common zones and ensure that all of them + // have been marked expunged. + for zone in modified.zones_modified() { + assert_eq!( + zone.zone_after.disposition, + BlueprintZoneDisposition::Expunged, + "for {desc}, zone {} should have been marked expunged", + zone.zone_after.id + ); + } + } } diff --git a/nexus/reconfigurator/planning/tests/output/blueprint_builder_initial_diff.txt b/nexus/reconfigurator/planning/tests/output/blueprint_builder_initial_diff.txt index 7323008ad1..b421b8f383 100644 --- a/nexus/reconfigurator/planning/tests/output/blueprint_builder_initial_diff.txt +++ b/nexus/reconfigurator/planning/tests/output/blueprint_builder_initial_diff.txt @@ -1,5 +1,5 @@ from: collection 094d362b-7d79-49e7-a244-134276cca8fe -to: blueprint 9d2c007b-46f1-4ff2-8b4c-8a5767030f76 +to: blueprint e4aeb3b3-272f-4967-be34-2d34daa46aa1 ------------------------------------------------------------------------------------------------------ zone type zone ID disposition underlay IP status @@ -7,7 +7,7 @@ to: blueprint 9d2c007b-46f1-4ff2-8b4c-8a5767030f76 UNCHANGED SLEDS: - sled 08c7046b-c9c4-4368-881f-19a72df22143: zones at generation 2 + sled 08c7046b-c9c4-4368-881f-19a72df22143: blueprint zones at generation 2 crucible 44afce85-3377-4b20-a398-517c1579df4d in service fd00:1122:3344:103::23 crucible 4644ea0c-0ec3-41be-a356-660308e1c3fc in service fd00:1122:3344:103::2c crucible 55f4d117-0b9d-4256-a2c0-f46d3ed5fff9 in service fd00:1122:3344:103::25 @@ -21,7 +21,7 @@ to: blueprint 9d2c007b-46f1-4ff2-8b4c-8a5767030f76 internal_ntp c81c9d4a-36d7-4796-9151-f564d3735152 in service fd00:1122:3344:103::21 nexus b2573120-9c91-4ed7-8b4f-a7bfe8dbc807 in service fd00:1122:3344:103::22 - sled 84ac367e-9b03-4e9d-a846-df1a08deee6c: zones at generation 2 + sled 84ac367e-9b03-4e9d-a846-df1a08deee6c: blueprint zones at generation 2 crucible 0faa9350-2c02-47c7-a0a6-9f4afd69152c in service fd00:1122:3344:101::2c crucible 5b44003e-1a3d-4152-b606-872c72efce0e in service fd00:1122:3344:101::25 crucible 943fea7a-9458-4935-9dc7-01ee5cfe5a02 in service fd00:1122:3344:101::29 @@ -35,7 +35,7 @@ to: blueprint 9d2c007b-46f1-4ff2-8b4c-8a5767030f76 internal_ntp 38b047ea-e3de-4859-b8e0-70cac5871446 in service fd00:1122:3344:101::21 nexus fb36b9dc-273a-4bc3-aaa9-19ee4d0ef552 in service fd00:1122:3344:101::22 - sled be7f4375-2a6b-457f-b1a4-3074a715e5fe: zones at generation 2 + sled be7f4375-2a6b-457f-b1a4-3074a715e5fe: blueprint zones at generation 2 crucible 248db330-56e6-4c7e-b5ff-9cd6cbcb210a in service fd00:1122:3344:102::2c crucible 353b0aff-4c71-4fae-a6bd-adcb1d2a1a1d in service fd00:1122:3344:102::29 crucible 4330134c-41b9-4097-aa0b-3eaefa06d473 in service fd00:1122:3344:102::24 diff --git a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt index 3aad697aa0..b135303ead 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt @@ -7,7 +7,7 @@ to: blueprint 4171ad05-89dd-474b-846b-b007e4346366 UNCHANGED SLEDS: - sled 41f45d9f-766e-4ca6-a881-61ee45c80f57: zones at generation 2 + sled 41f45d9f-766e-4ca6-a881-61ee45c80f57: blueprint zones at generation 2 crucible 322ee9f1-8903-4542-a0a8-a54cefabdeca in service fd00:1122:3344:103::24 crucible 4ab1650f-32c5-447f-939d-64b8103a7645 in service fd00:1122:3344:103::2a crucible 64aa65f8-1ccb-4cd6-9953-027aebdac8ff in service fd00:1122:3344:103::27 @@ -21,7 +21,7 @@ to: blueprint 4171ad05-89dd-474b-846b-b007e4346366 internal_ntp 267ed614-92af-4b9d-bdba-c2881c2e43a2 in service fd00:1122:3344:103::21 nexus cc816cfe-3869-4dde-b596-397d41198628 in service fd00:1122:3344:103::22 - sled 43677374-8d2f-4deb-8a41-eeea506db8e0: zones at generation 2 + sled 43677374-8d2f-4deb-8a41-eeea506db8e0: blueprint zones at generation 2 crucible 02acbe6a-1c88-47e3-94c3-94084cbde098 in service fd00:1122:3344:101::27 crucible 07c3c805-8888-4fe5-9543-3d2479dbe6f3 in service fd00:1122:3344:101::26 crucible 10d98a73-ec88-4aff-a7e8-7db6a87880e6 in service fd00:1122:3344:101::24 @@ -35,7 +35,7 @@ to: blueprint 4171ad05-89dd-474b-846b-b007e4346366 internal_ntp 08c7f8aa-1ea9-469b-8cac-2fdbfc11ebcb in service fd00:1122:3344:101::21 nexus c66ab6d5-ff7a-46d1-9fd0-70cefa352d25 in service fd00:1122:3344:101::22 - sled 590e3034-d946-4166-b0e5-2d0034197a07: zones at generation 2 + sled 590e3034-d946-4166-b0e5-2d0034197a07: blueprint zones at generation 2 crucible 18f8fe40-646e-4962-b17a-20e201f3a6e5 in service fd00:1122:3344:102::2a crucible 56d5d7cf-db2c-40a3-a775-003241ad4820 in service fd00:1122:3344:102::29 crucible 6af7f4d6-33b6-4eb3-a146-d8e9e4ae9d66 in service fd00:1122:3344:102::2b @@ -51,7 +51,7 @@ to: blueprint 4171ad05-89dd-474b-846b-b007e4346366 ADDED SLEDS: -+ sled b59ec570-2abb-4017-80ce-129d94e7a025: zones at generation 2 ++ sled b59ec570-2abb-4017-80ce-129d94e7a025: blueprint zones at generation 2 + internal_ntp 2d73d30e-ca47-46a8-9c12-917d4ab824b6 in service fd00:1122:3344:104::21 added METADATA: diff --git a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt index 233821412f..89120cf377 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt @@ -7,7 +7,7 @@ to: blueprint f432fcd5-1284-4058-8b4a-9286a3de6163 UNCHANGED SLEDS: - sled 41f45d9f-766e-4ca6-a881-61ee45c80f57: zones at generation 2 + sled 41f45d9f-766e-4ca6-a881-61ee45c80f57: blueprint zones at generation 2 crucible 322ee9f1-8903-4542-a0a8-a54cefabdeca in service fd00:1122:3344:103::24 crucible 4ab1650f-32c5-447f-939d-64b8103a7645 in service fd00:1122:3344:103::2a crucible 64aa65f8-1ccb-4cd6-9953-027aebdac8ff in service fd00:1122:3344:103::27 @@ -21,7 +21,7 @@ to: blueprint f432fcd5-1284-4058-8b4a-9286a3de6163 internal_ntp 267ed614-92af-4b9d-bdba-c2881c2e43a2 in service fd00:1122:3344:103::21 nexus cc816cfe-3869-4dde-b596-397d41198628 in service fd00:1122:3344:103::22 - sled 43677374-8d2f-4deb-8a41-eeea506db8e0: zones at generation 2 + sled 43677374-8d2f-4deb-8a41-eeea506db8e0: blueprint zones at generation 2 crucible 02acbe6a-1c88-47e3-94c3-94084cbde098 in service fd00:1122:3344:101::27 crucible 07c3c805-8888-4fe5-9543-3d2479dbe6f3 in service fd00:1122:3344:101::26 crucible 10d98a73-ec88-4aff-a7e8-7db6a87880e6 in service fd00:1122:3344:101::24 @@ -35,7 +35,7 @@ to: blueprint f432fcd5-1284-4058-8b4a-9286a3de6163 internal_ntp 08c7f8aa-1ea9-469b-8cac-2fdbfc11ebcb in service fd00:1122:3344:101::21 nexus c66ab6d5-ff7a-46d1-9fd0-70cefa352d25 in service fd00:1122:3344:101::22 - sled 590e3034-d946-4166-b0e5-2d0034197a07: zones at generation 2 + sled 590e3034-d946-4166-b0e5-2d0034197a07: blueprint zones at generation 2 crucible 18f8fe40-646e-4962-b17a-20e201f3a6e5 in service fd00:1122:3344:102::2a crucible 56d5d7cf-db2c-40a3-a775-003241ad4820 in service fd00:1122:3344:102::29 crucible 6af7f4d6-33b6-4eb3-a146-d8e9e4ae9d66 in service fd00:1122:3344:102::2b @@ -51,7 +51,7 @@ to: blueprint f432fcd5-1284-4058-8b4a-9286a3de6163 MODIFIED SLEDS: -* sled b59ec570-2abb-4017-80ce-129d94e7a025: zones at generation: 2 -> 3 +* sled b59ec570-2abb-4017-80ce-129d94e7a025: blueprint zones at generation: 2 -> 3 internal_ntp 2d73d30e-ca47-46a8-9c12-917d4ab824b6 in service fd00:1122:3344:104::21 + crucible 1a20ee3c-f66e-4fca-ab85-2a248aa3d79d in service fd00:1122:3344:104::2b added + crucible 28852beb-d0e5-4cba-9adb-e7f0cd4bb864 in service fd00:1122:3344:104::29 added diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt index c19403906e..ecc5b125d9 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt @@ -1,89 +1,137 @@ -from: blueprint 55502b1b-e255-438b-a16a-2680a4b5f962 +from: blueprint 4d4e6c38-cd95-4c4e-8f45-6af4d686964b to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 - ------------------------------------------------------------------------------------------------------ - zone type zone ID disposition underlay IP status - ------------------------------------------------------------------------------------------------------ - - UNCHANGED SLEDS: - - sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: zones at generation 2 - crucible 19fbc4f8-a683-4f22-8f5a-e74782b935be in service fd00:1122:3344:105::26 - crucible 4f1ce8a2-d3a5-4a38-be4c-9817de52db37 in service fd00:1122:3344:105::2c - crucible 6b53ab2e-d98c-485f-87a3-4d5df595390f in service fd00:1122:3344:105::27 - crucible 93b137a1-a1d6-4b5b-b2cb-21a9f11e2883 in service fd00:1122:3344:105::23 - crucible 9f0abbad-dbd3-4d43-9675-78092217ffd9 in service fd00:1122:3344:105::25 - crucible b0c63f48-01ea-4aae-bb26-fb0dd59d1662 in service fd00:1122:3344:105::28 - crucible c406da50-34b9-4bb4-a460-8f49875d2a6a in service fd00:1122:3344:105::24 - crucible d660d7ed-28c0-45ae-9ace-dc3ecf7e8786 in service fd00:1122:3344:105::2a - crucible e98cc0de-abf6-4da4-a20d-d05c7a9bb1d7 in service fd00:1122:3344:105::2b - crucible f55e6aaf-e8fc-4913-9e3c-8cd1bd4bdad3 in service fd00:1122:3344:105::29 - internal_ntp 7f4e9f9f-08f8-4d14-885d-e977c05525ad in service fd00:1122:3344:105::21 - nexus 6dff7633-66bb-4924-a6ff-2c896e66964b in service fd00:1122:3344:105::22 - - sled 48d95fef-bc9f-4f50-9a53-1e075836291d: zones at generation 2 - crucible 094f27af-1acb-4d1e-ba97-1fc1377d4bf2 in service fd00:1122:3344:103::2c - crucible 0dcfdfc5-481e-4153-b97c-11cf02b648ea in service fd00:1122:3344:103::25 - crucible 2f5e8010-a94d-43a4-9c5c-3f52832f5f7f in service fd00:1122:3344:103::27 - crucible 4a9a0a9d-87f0-4f1d-9181-27f6b435e637 in service fd00:1122:3344:103::28 - crucible 56ac1706-9e2a-49ba-bd6f-a99c44cb2ccb in service fd00:1122:3344:103::24 - crucible 67622d61-2df4-414d-aa0e-d1277265f405 in service fd00:1122:3344:103::23 - crucible b91b271d-8d80-4f49-99a0-34006ae86063 in service fd00:1122:3344:103::2a - crucible d6ee1338-3127-43ec-9aaa-b973ccf05496 in service fd00:1122:3344:103::26 - crucible e39d7c9e-182b-48af-af87-58079d723583 in service fd00:1122:3344:103::29 - crucible f69f92a1-5007-4bb0-a85b-604dc217154b in service fd00:1122:3344:103::2b - internal_ntp 67d913e0-0005-4599-9b28-0abbf6cc2916 in service fd00:1122:3344:103::21 - nexus 2aa0ea4f-3561-4989-a98c-9ab7d9a240fb in service fd00:1122:3344:103::22 - - sled 68d24ac5-f341-49ea-a92a-0381b52ab387: zones at generation 2 - crucible 3b3c14b6-a8e2-4054-a577-8d96cb576230 in service fd00:1122:3344:102::2c - crucible 47a87c6e-ef45-4d52-9a3e-69cdd96737cc in service fd00:1122:3344:102::23 - crucible 6464d025-4652-4948-919e-740bec5699b1 in service fd00:1122:3344:102::24 - crucible 6939ce48-b17c-4616-b176-8a419a7697be in service fd00:1122:3344:102::29 - crucible 878dfddd-3113-4197-a3ea-e0d4dbe9b476 in service fd00:1122:3344:102::25 - crucible 8d4d2b28-82bb-4e36-80da-1408d8c35d82 in service fd00:1122:3344:102::2b - crucible 9fd52961-426f-4e62-a644-b70871103fca in service fd00:1122:3344:102::26 - crucible b44cdbc0-0ce0-46eb-8b21-a09e113aa1d0 in service fd00:1122:3344:102::27 - crucible b6b759d0-f60d-42b7-bbbc-9d61c9e895a9 in service fd00:1122:3344:102::28 - crucible c407795c-6c8b-428e-8ab8-b962913c447f in service fd00:1122:3344:102::2a - internal_ntp f3f2e4f3-0985-4ef6-8336-ce479382d05d in service fd00:1122:3344:102::21 - nexus 01d58626-e1b0-480f-96be-ac784863c7dc in service fd00:1122:3344:102::22 - - MODIFIED SLEDS: - -* sled 75bc286f-2b4b-482c-9431-59272af529da: zones at generation: 2 -> 3 - crucible 15bb9def-69b8-4d2e-b04f-9fee1143387c in service fd00:1122:3344:104::25 - crucible 23a8fa2b-ef3e-4017-a43f-f7a83953bd7c in service fd00:1122:3344:104::2c - crucible 621509d6-3772-4009-aca1-35eefd1098fb in service fd00:1122:3344:104::28 - crucible 85b8c68a-160d-461d-94dd-1baf175fa75c in service fd00:1122:3344:104::2a - crucible 996d7570-b0df-46d5-aaa4-0c97697cf484 in service fd00:1122:3344:104::26 - crucible a732c489-d29a-4f75-b900-5966385943af in service fd00:1122:3344:104::29 - crucible b1783e95-9598-451d-b6ba-c50b52b428c3 in service fd00:1122:3344:104::24 - crucible c6dd531e-2d1d-423b-acc8-358533dab78c in service fd00:1122:3344:104::27 - crucible e4b3e159-3dbe-48cb-8497-e3da92a90e5a in service fd00:1122:3344:104::23 - crucible f0ff59e8-4105-4980-a4bb-a1f4c58de1e3 in service fd00:1122:3344:104::2b - internal_ntp 57b96d5c-b71e-43e4-8869-7d514003d00d in service fd00:1122:3344:104::21 - nexus b4947d31-f70e-4ee0-8817-0ca6cea9b16b in service fd00:1122:3344:104::22 -+ nexus 2ec75441-3d7d-4b4b-9614-af03de5a3666 in service fd00:1122:3344:104::2d added -+ nexus 508abd03-cbfe-4654-9a6d-7f15a1ad32e5 in service fd00:1122:3344:104::2e added -+ nexus 59950bc8-1497-44dd-8cbf-b6502ba921b2 in service fd00:1122:3344:104::2f added - -* sled affab35f-600a-4109-8ea0-34a067a4e0bc: zones at generation: 2 -> 3 - crucible 0dfbf374-9ef9-430f-b06d-f271bf7f84c4 in service fd00:1122:3344:101::27 - crucible 3aa07966-5899-4789-ace5-f8eeb375c6c3 in service fd00:1122:3344:101::24 - crucible 4ad0e9da-08f8-4d40-b4d3-d17e711b5bbf in service fd00:1122:3344:101::29 - crucible 72c5a909-077d-4ec1-a9d5-ae64ef9d716e in service fd00:1122:3344:101::26 - crucible 95482c25-1e7f-43e8-adf1-e3548a1b3ae0 in service fd00:1122:3344:101::23 - crucible a1c03689-fc62-4ea5-bb72-4d01f5138614 in service fd00:1122:3344:101::2a - crucible a568e92e-4fbd-4b69-acd8-f16277073031 in service fd00:1122:3344:101::2c - crucible bf79a56a-97af-4cc4-94a5-8b20d64c2cda in service fd00:1122:3344:101::28 - crucible c60379ba-4e30-4628-a79a-0ae509aef4c5 in service fd00:1122:3344:101::25 - crucible d47f4996-fac0-4657-bcea-01b1fee6404d in service fd00:1122:3344:101::2b - internal_ntp f1a7b9a7-fc6a-4b23-b829-045ff33117ff in service fd00:1122:3344:101::21 - nexus 15c103f0-ac63-423b-ba5d-1b5fcd563ba3 in service fd00:1122:3344:101::22 -+ nexus 3ca5292f-8a59-4475-bb72-0f43714d0fff in service fd00:1122:3344:101::2e added -+ nexus 99f6d544-8599-4e2b-a55a-82d9e0034662 in service fd00:1122:3344:101::2d added -+ nexus c26b3bda-5561-44a1-a69f-22103fe209a1 in service fd00:1122:3344:101::2f added + -------------------------------------------------------------------------------------------------------- + zone type zone ID disposition underlay IP status + -------------------------------------------------------------------------------------------------------- + + UNCHANGED SLEDS: + + sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: blueprint zones at generation 2 + crucible 19fbc4f8-a683-4f22-8f5a-e74782b935be in service fd00:1122:3344:105::26 + crucible 4f1ce8a2-d3a5-4a38-be4c-9817de52db37 in service fd00:1122:3344:105::2c + crucible 6b53ab2e-d98c-485f-87a3-4d5df595390f in service fd00:1122:3344:105::27 + crucible 93b137a1-a1d6-4b5b-b2cb-21a9f11e2883 in service fd00:1122:3344:105::23 + crucible 9f0abbad-dbd3-4d43-9675-78092217ffd9 in service fd00:1122:3344:105::25 + crucible b0c63f48-01ea-4aae-bb26-fb0dd59d1662 in service fd00:1122:3344:105::28 + crucible c406da50-34b9-4bb4-a460-8f49875d2a6a in service fd00:1122:3344:105::24 + crucible d660d7ed-28c0-45ae-9ace-dc3ecf7e8786 in service fd00:1122:3344:105::2a + crucible e98cc0de-abf6-4da4-a20d-d05c7a9bb1d7 in service fd00:1122:3344:105::2b + crucible f55e6aaf-e8fc-4913-9e3c-8cd1bd4bdad3 in service fd00:1122:3344:105::29 + internal_ntp 7f4e9f9f-08f8-4d14-885d-e977c05525ad in service fd00:1122:3344:105::21 + nexus 6dff7633-66bb-4924-a6ff-2c896e66964b in service fd00:1122:3344:105::22 + + MODIFIED SLEDS: + +* sled 48d95fef-bc9f-4f50-9a53-1e075836291d: blueprint zones at generation: 2 -> 3 +- crucible 094f27af-1acb-4d1e-ba97-1fc1377d4bf2 in service fd00:1122:3344:103::2c modified ++ ├─ expunged fd00:1122:3344:103::2c +* └─ changed: disposition +- crucible 0dcfdfc5-481e-4153-b97c-11cf02b648ea in service fd00:1122:3344:103::25 modified ++ ├─ expunged fd00:1122:3344:103::25 +* └─ changed: disposition +- crucible 2f5e8010-a94d-43a4-9c5c-3f52832f5f7f in service fd00:1122:3344:103::27 modified ++ ├─ expunged fd00:1122:3344:103::27 +* └─ changed: disposition +- crucible 4a9a0a9d-87f0-4f1d-9181-27f6b435e637 in service fd00:1122:3344:103::28 modified ++ ├─ expunged fd00:1122:3344:103::28 +* └─ changed: disposition +- crucible 56ac1706-9e2a-49ba-bd6f-a99c44cb2ccb in service fd00:1122:3344:103::24 modified ++ ├─ expunged fd00:1122:3344:103::24 +* └─ changed: disposition +- crucible 67622d61-2df4-414d-aa0e-d1277265f405 in service fd00:1122:3344:103::23 modified ++ ├─ expunged fd00:1122:3344:103::23 +* └─ changed: disposition +- crucible b91b271d-8d80-4f49-99a0-34006ae86063 in service fd00:1122:3344:103::2a modified ++ ├─ expunged fd00:1122:3344:103::2a +* └─ changed: disposition +- crucible d6ee1338-3127-43ec-9aaa-b973ccf05496 in service fd00:1122:3344:103::26 modified ++ ├─ expunged fd00:1122:3344:103::26 +* └─ changed: disposition +- crucible e39d7c9e-182b-48af-af87-58079d723583 in service fd00:1122:3344:103::29 modified ++ ├─ expunged fd00:1122:3344:103::29 +* └─ changed: disposition +- crucible f69f92a1-5007-4bb0-a85b-604dc217154b in service fd00:1122:3344:103::2b modified ++ ├─ expunged fd00:1122:3344:103::2b +* └─ changed: disposition +- internal_ntp 67d913e0-0005-4599-9b28-0abbf6cc2916 in service fd00:1122:3344:103::21 modified ++ ├─ expunged fd00:1122:3344:103::21 +* └─ changed: disposition +- nexus 2aa0ea4f-3561-4989-a98c-9ab7d9a240fb in service fd00:1122:3344:103::22 modified ++ ├─ expunged fd00:1122:3344:103::22 +* └─ changed: disposition + +* sled 68d24ac5-f341-49ea-a92a-0381b52ab387: blueprint zones at generation: 2 -> 3 +- crucible 3b3c14b6-a8e2-4054-a577-8d96cb576230 in service fd00:1122:3344:102::2c modified ++ ├─ expunged fd00:1122:3344:102::2c +* └─ changed: disposition +- crucible 47a87c6e-ef45-4d52-9a3e-69cdd96737cc in service fd00:1122:3344:102::23 modified ++ ├─ expunged fd00:1122:3344:102::23 +* └─ changed: disposition +- crucible 6464d025-4652-4948-919e-740bec5699b1 in service fd00:1122:3344:102::24 modified ++ ├─ expunged fd00:1122:3344:102::24 +* └─ changed: disposition +- crucible 6939ce48-b17c-4616-b176-8a419a7697be in service fd00:1122:3344:102::29 modified ++ ├─ expunged fd00:1122:3344:102::29 +* └─ changed: disposition +- crucible 878dfddd-3113-4197-a3ea-e0d4dbe9b476 in service fd00:1122:3344:102::25 modified ++ ├─ expunged fd00:1122:3344:102::25 +* └─ changed: disposition +- crucible 8d4d2b28-82bb-4e36-80da-1408d8c35d82 in service fd00:1122:3344:102::2b modified ++ ├─ expunged fd00:1122:3344:102::2b +* └─ changed: disposition +- crucible 9fd52961-426f-4e62-a644-b70871103fca in service fd00:1122:3344:102::26 modified ++ ├─ expunged fd00:1122:3344:102::26 +* └─ changed: disposition +- crucible b44cdbc0-0ce0-46eb-8b21-a09e113aa1d0 in service fd00:1122:3344:102::27 modified ++ ├─ expunged fd00:1122:3344:102::27 +* └─ changed: disposition +- crucible b6b759d0-f60d-42b7-bbbc-9d61c9e895a9 in service fd00:1122:3344:102::28 modified ++ ├─ expunged fd00:1122:3344:102::28 +* └─ changed: disposition +- crucible c407795c-6c8b-428e-8ab8-b962913c447f in service fd00:1122:3344:102::2a modified ++ ├─ expunged fd00:1122:3344:102::2a +* └─ changed: disposition +- internal_ntp f3f2e4f3-0985-4ef6-8336-ce479382d05d in service fd00:1122:3344:102::21 modified ++ ├─ expunged fd00:1122:3344:102::21 +* └─ changed: disposition +- nexus 01d58626-e1b0-480f-96be-ac784863c7dc in service fd00:1122:3344:102::22 modified ++ ├─ expunged fd00:1122:3344:102::22 +* └─ changed: disposition + +* sled 75bc286f-2b4b-482c-9431-59272af529da: blueprint zones at generation: 2 -> 3 + crucible 15bb9def-69b8-4d2e-b04f-9fee1143387c in service fd00:1122:3344:104::25 + crucible 23a8fa2b-ef3e-4017-a43f-f7a83953bd7c in service fd00:1122:3344:104::2c + crucible 621509d6-3772-4009-aca1-35eefd1098fb in service fd00:1122:3344:104::28 + crucible 85b8c68a-160d-461d-94dd-1baf175fa75c in service fd00:1122:3344:104::2a + crucible 996d7570-b0df-46d5-aaa4-0c97697cf484 in service fd00:1122:3344:104::26 + crucible a732c489-d29a-4f75-b900-5966385943af in service fd00:1122:3344:104::29 + crucible b1783e95-9598-451d-b6ba-c50b52b428c3 in service fd00:1122:3344:104::24 + crucible c6dd531e-2d1d-423b-acc8-358533dab78c in service fd00:1122:3344:104::27 + crucible e4b3e159-3dbe-48cb-8497-e3da92a90e5a in service fd00:1122:3344:104::23 + crucible f0ff59e8-4105-4980-a4bb-a1f4c58de1e3 in service fd00:1122:3344:104::2b + internal_ntp 57b96d5c-b71e-43e4-8869-7d514003d00d in service fd00:1122:3344:104::21 + nexus b4947d31-f70e-4ee0-8817-0ca6cea9b16b in service fd00:1122:3344:104::22 ++ nexus 2ec75441-3d7d-4b4b-9614-af03de5a3666 in service fd00:1122:3344:104::2d added ++ nexus 508abd03-cbfe-4654-9a6d-7f15a1ad32e5 in service fd00:1122:3344:104::2e added ++ nexus 59950bc8-1497-44dd-8cbf-b6502ba921b2 in service fd00:1122:3344:104::2f added + +* sled affab35f-600a-4109-8ea0-34a067a4e0bc: blueprint zones at generation: 2 -> 3 + crucible 0dfbf374-9ef9-430f-b06d-f271bf7f84c4 in service fd00:1122:3344:101::27 + crucible 3aa07966-5899-4789-ace5-f8eeb375c6c3 in service fd00:1122:3344:101::24 + crucible 4ad0e9da-08f8-4d40-b4d3-d17e711b5bbf in service fd00:1122:3344:101::29 + crucible 72c5a909-077d-4ec1-a9d5-ae64ef9d716e in service fd00:1122:3344:101::26 + crucible 95482c25-1e7f-43e8-adf1-e3548a1b3ae0 in service fd00:1122:3344:101::23 + crucible a1c03689-fc62-4ea5-bb72-4d01f5138614 in service fd00:1122:3344:101::2a + crucible a568e92e-4fbd-4b69-acd8-f16277073031 in service fd00:1122:3344:101::2c + crucible bf79a56a-97af-4cc4-94a5-8b20d64c2cda in service fd00:1122:3344:101::28 + crucible c60379ba-4e30-4628-a79a-0ae509aef4c5 in service fd00:1122:3344:101::25 + crucible d47f4996-fac0-4657-bcea-01b1fee6404d in service fd00:1122:3344:101::2b + internal_ntp f1a7b9a7-fc6a-4b23-b829-045ff33117ff in service fd00:1122:3344:101::21 + nexus 15c103f0-ac63-423b-ba5d-1b5fcd563ba3 in service fd00:1122:3344:101::22 ++ nexus 3ca5292f-8a59-4475-bb72-0f43714d0fff in service fd00:1122:3344:101::2e added ++ nexus 99f6d544-8599-4e2b-a55a-82d9e0034662 in service fd00:1122:3344:101::2d added ++ nexus c26b3bda-5561-44a1-a69f-22103fe209a1 in service fd00:1122:3344:101::2f added METADATA: internal DNS version: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt index 74dd0fbbaf..00ca05b4b8 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt @@ -7,7 +7,7 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 UNCHANGED SLEDS: - sled 75bc286f-2b4b-482c-9431-59272af529da: zones at generation 3 + sled 75bc286f-2b4b-482c-9431-59272af529da: blueprint zones at generation 3 crucible 15bb9def-69b8-4d2e-b04f-9fee1143387c in service fd00:1122:3344:104::25 crucible 23a8fa2b-ef3e-4017-a43f-f7a83953bd7c in service fd00:1122:3344:104::2c crucible 621509d6-3772-4009-aca1-35eefd1098fb in service fd00:1122:3344:104::28 @@ -24,7 +24,7 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 nexus 59950bc8-1497-44dd-8cbf-b6502ba921b2 in service fd00:1122:3344:104::2f nexus b4947d31-f70e-4ee0-8817-0ca6cea9b16b in service fd00:1122:3344:104::22 - sled affab35f-600a-4109-8ea0-34a067a4e0bc: zones at generation 3 + sled affab35f-600a-4109-8ea0-34a067a4e0bc: blueprint zones at generation 3 crucible 0dfbf374-9ef9-430f-b06d-f271bf7f84c4 in service fd00:1122:3344:101::27 crucible 3aa07966-5899-4789-ace5-f8eeb375c6c3 in service fd00:1122:3344:101::24 crucible 4ad0e9da-08f8-4d40-b4d3-d17e711b5bbf in service fd00:1122:3344:101::29 @@ -43,23 +43,23 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 REMOVED SLEDS: -- sled 68d24ac5-f341-49ea-a92a-0381b52ab387: zones at generation 2 -- crucible 3b3c14b6-a8e2-4054-a577-8d96cb576230 in service fd00:1122:3344:102::2c removed -- crucible 47a87c6e-ef45-4d52-9a3e-69cdd96737cc in service fd00:1122:3344:102::23 removed -- crucible 6464d025-4652-4948-919e-740bec5699b1 in service fd00:1122:3344:102::24 removed -- crucible 6939ce48-b17c-4616-b176-8a419a7697be in service fd00:1122:3344:102::29 removed -- crucible 878dfddd-3113-4197-a3ea-e0d4dbe9b476 in service fd00:1122:3344:102::25 removed -- crucible 8d4d2b28-82bb-4e36-80da-1408d8c35d82 in service fd00:1122:3344:102::2b removed -- crucible 9fd52961-426f-4e62-a644-b70871103fca in service fd00:1122:3344:102::26 removed -- crucible b44cdbc0-0ce0-46eb-8b21-a09e113aa1d0 in service fd00:1122:3344:102::27 removed -- crucible b6b759d0-f60d-42b7-bbbc-9d61c9e895a9 in service fd00:1122:3344:102::28 removed -- crucible c407795c-6c8b-428e-8ab8-b962913c447f in service fd00:1122:3344:102::2a removed -- internal_ntp f3f2e4f3-0985-4ef6-8336-ce479382d05d in service fd00:1122:3344:102::21 removed -- nexus 01d58626-e1b0-480f-96be-ac784863c7dc in service fd00:1122:3344:102::22 removed +- sled 68d24ac5-f341-49ea-a92a-0381b52ab387: blueprint zones at generation 3 +- crucible 3b3c14b6-a8e2-4054-a577-8d96cb576230 expunged fd00:1122:3344:102::2c removed +- crucible 47a87c6e-ef45-4d52-9a3e-69cdd96737cc expunged fd00:1122:3344:102::23 removed +- crucible 6464d025-4652-4948-919e-740bec5699b1 expunged fd00:1122:3344:102::24 removed +- crucible 6939ce48-b17c-4616-b176-8a419a7697be expunged fd00:1122:3344:102::29 removed +- crucible 878dfddd-3113-4197-a3ea-e0d4dbe9b476 expunged fd00:1122:3344:102::25 removed +- crucible 8d4d2b28-82bb-4e36-80da-1408d8c35d82 expunged fd00:1122:3344:102::2b removed +- crucible 9fd52961-426f-4e62-a644-b70871103fca expunged fd00:1122:3344:102::26 removed +- crucible b44cdbc0-0ce0-46eb-8b21-a09e113aa1d0 expunged fd00:1122:3344:102::27 removed +- crucible b6b759d0-f60d-42b7-bbbc-9d61c9e895a9 expunged fd00:1122:3344:102::28 removed +- crucible c407795c-6c8b-428e-8ab8-b962913c447f expunged fd00:1122:3344:102::2a removed +- internal_ntp f3f2e4f3-0985-4ef6-8336-ce479382d05d expunged fd00:1122:3344:102::21 removed +- nexus 01d58626-e1b0-480f-96be-ac784863c7dc expunged fd00:1122:3344:102::22 removed MODIFIED SLEDS: -* sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: zones at generation: 2 +* sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: blueprint zones at generation: 2 ! warning: generation should have changed crucible 6b53ab2e-d98c-485f-87a3-4d5df595390f in service fd00:1122:3344:105::27 crucible 93b137a1-a1d6-4b5b-b2cb-21a9f11e2883 in service fd00:1122:3344:105::23 @@ -80,19 +80,19 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 + ├─ in service fd00:1122:3344:105::22 * └─ changed: zone type config -* sled 48d95fef-bc9f-4f50-9a53-1e075836291d: zones at generation: 2 -> 3 -- crucible 094f27af-1acb-4d1e-ba97-1fc1377d4bf2 in service fd00:1122:3344:103::2c removed -- crucible 0dcfdfc5-481e-4153-b97c-11cf02b648ea in service fd00:1122:3344:103::25 removed -- crucible 2f5e8010-a94d-43a4-9c5c-3f52832f5f7f in service fd00:1122:3344:103::27 removed -- crucible 4a9a0a9d-87f0-4f1d-9181-27f6b435e637 in service fd00:1122:3344:103::28 removed -- crucible 56ac1706-9e2a-49ba-bd6f-a99c44cb2ccb in service fd00:1122:3344:103::24 removed -- crucible 67622d61-2df4-414d-aa0e-d1277265f405 in service fd00:1122:3344:103::23 removed -- crucible b91b271d-8d80-4f49-99a0-34006ae86063 in service fd00:1122:3344:103::2a removed -- crucible d6ee1338-3127-43ec-9aaa-b973ccf05496 in service fd00:1122:3344:103::26 removed -- crucible e39d7c9e-182b-48af-af87-58079d723583 in service fd00:1122:3344:103::29 removed -- crucible f69f92a1-5007-4bb0-a85b-604dc217154b in service fd00:1122:3344:103::2b removed -- internal_ntp 67d913e0-0005-4599-9b28-0abbf6cc2916 in service fd00:1122:3344:103::21 removed -- nexus 2aa0ea4f-3561-4989-a98c-9ab7d9a240fb in service fd00:1122:3344:103::22 removed +* sled 48d95fef-bc9f-4f50-9a53-1e075836291d: blueprint zones at generation: 3 -> 4 +- crucible 094f27af-1acb-4d1e-ba97-1fc1377d4bf2 expunged fd00:1122:3344:103::2c removed +- crucible 0dcfdfc5-481e-4153-b97c-11cf02b648ea expunged fd00:1122:3344:103::25 removed +- crucible 2f5e8010-a94d-43a4-9c5c-3f52832f5f7f expunged fd00:1122:3344:103::27 removed +- crucible 4a9a0a9d-87f0-4f1d-9181-27f6b435e637 expunged fd00:1122:3344:103::28 removed +- crucible 56ac1706-9e2a-49ba-bd6f-a99c44cb2ccb expunged fd00:1122:3344:103::24 removed +- crucible 67622d61-2df4-414d-aa0e-d1277265f405 expunged fd00:1122:3344:103::23 removed +- crucible b91b271d-8d80-4f49-99a0-34006ae86063 expunged fd00:1122:3344:103::2a removed +- crucible d6ee1338-3127-43ec-9aaa-b973ccf05496 expunged fd00:1122:3344:103::26 removed +- crucible e39d7c9e-182b-48af-af87-58079d723583 expunged fd00:1122:3344:103::29 removed +- crucible f69f92a1-5007-4bb0-a85b-604dc217154b expunged fd00:1122:3344:103::2b removed +- internal_ntp 67d913e0-0005-4599-9b28-0abbf6cc2916 expunged fd00:1122:3344:103::21 removed +- nexus 2aa0ea4f-3561-4989-a98c-9ab7d9a240fb expunged fd00:1122:3344:103::22 removed METADATA: internal DNS version: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt index 92cfd1f651..623bf0a756 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt @@ -1,11 +1,11 @@ blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 -parent: 55502b1b-e255-438b-a16a-2680a4b5f962 +parent: 4d4e6c38-cd95-4c4e-8f45-6af4d686964b -------------------------------------------------------------------------------------------- zone type zone ID disposition underlay IP -------------------------------------------------------------------------------------------- - sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: zones at generation 2 + sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: blueprint zones at generation 2 crucible 19fbc4f8-a683-4f22-8f5a-e74782b935be in service fd00:1122:3344:105::26 crucible 4f1ce8a2-d3a5-4a38-be4c-9817de52db37 in service fd00:1122:3344:105::2c crucible 6b53ab2e-d98c-485f-87a3-4d5df595390f in service fd00:1122:3344:105::27 @@ -19,35 +19,35 @@ parent: 55502b1b-e255-438b-a16a-2680a4b5f962 internal_ntp 7f4e9f9f-08f8-4d14-885d-e977c05525ad in service fd00:1122:3344:105::21 nexus 6dff7633-66bb-4924-a6ff-2c896e66964b in service fd00:1122:3344:105::22 - sled 48d95fef-bc9f-4f50-9a53-1e075836291d: zones at generation 2 - crucible 094f27af-1acb-4d1e-ba97-1fc1377d4bf2 in service fd00:1122:3344:103::2c - crucible 0dcfdfc5-481e-4153-b97c-11cf02b648ea in service fd00:1122:3344:103::25 - crucible 2f5e8010-a94d-43a4-9c5c-3f52832f5f7f in service fd00:1122:3344:103::27 - crucible 4a9a0a9d-87f0-4f1d-9181-27f6b435e637 in service fd00:1122:3344:103::28 - crucible 56ac1706-9e2a-49ba-bd6f-a99c44cb2ccb in service fd00:1122:3344:103::24 - crucible 67622d61-2df4-414d-aa0e-d1277265f405 in service fd00:1122:3344:103::23 - crucible b91b271d-8d80-4f49-99a0-34006ae86063 in service fd00:1122:3344:103::2a - crucible d6ee1338-3127-43ec-9aaa-b973ccf05496 in service fd00:1122:3344:103::26 - crucible e39d7c9e-182b-48af-af87-58079d723583 in service fd00:1122:3344:103::29 - crucible f69f92a1-5007-4bb0-a85b-604dc217154b in service fd00:1122:3344:103::2b - internal_ntp 67d913e0-0005-4599-9b28-0abbf6cc2916 in service fd00:1122:3344:103::21 - nexus 2aa0ea4f-3561-4989-a98c-9ab7d9a240fb in service fd00:1122:3344:103::22 + sled 48d95fef-bc9f-4f50-9a53-1e075836291d: blueprint zones at generation 3 + crucible 094f27af-1acb-4d1e-ba97-1fc1377d4bf2 expunged fd00:1122:3344:103::2c + crucible 0dcfdfc5-481e-4153-b97c-11cf02b648ea expunged fd00:1122:3344:103::25 + crucible 2f5e8010-a94d-43a4-9c5c-3f52832f5f7f expunged fd00:1122:3344:103::27 + crucible 4a9a0a9d-87f0-4f1d-9181-27f6b435e637 expunged fd00:1122:3344:103::28 + crucible 56ac1706-9e2a-49ba-bd6f-a99c44cb2ccb expunged fd00:1122:3344:103::24 + crucible 67622d61-2df4-414d-aa0e-d1277265f405 expunged fd00:1122:3344:103::23 + crucible b91b271d-8d80-4f49-99a0-34006ae86063 expunged fd00:1122:3344:103::2a + crucible d6ee1338-3127-43ec-9aaa-b973ccf05496 expunged fd00:1122:3344:103::26 + crucible e39d7c9e-182b-48af-af87-58079d723583 expunged fd00:1122:3344:103::29 + crucible f69f92a1-5007-4bb0-a85b-604dc217154b expunged fd00:1122:3344:103::2b + internal_ntp 67d913e0-0005-4599-9b28-0abbf6cc2916 expunged fd00:1122:3344:103::21 + nexus 2aa0ea4f-3561-4989-a98c-9ab7d9a240fb expunged fd00:1122:3344:103::22 - sled 68d24ac5-f341-49ea-a92a-0381b52ab387: zones at generation 2 - crucible 3b3c14b6-a8e2-4054-a577-8d96cb576230 in service fd00:1122:3344:102::2c - crucible 47a87c6e-ef45-4d52-9a3e-69cdd96737cc in service fd00:1122:3344:102::23 - crucible 6464d025-4652-4948-919e-740bec5699b1 in service fd00:1122:3344:102::24 - crucible 6939ce48-b17c-4616-b176-8a419a7697be in service fd00:1122:3344:102::29 - crucible 878dfddd-3113-4197-a3ea-e0d4dbe9b476 in service fd00:1122:3344:102::25 - crucible 8d4d2b28-82bb-4e36-80da-1408d8c35d82 in service fd00:1122:3344:102::2b - crucible 9fd52961-426f-4e62-a644-b70871103fca in service fd00:1122:3344:102::26 - crucible b44cdbc0-0ce0-46eb-8b21-a09e113aa1d0 in service fd00:1122:3344:102::27 - crucible b6b759d0-f60d-42b7-bbbc-9d61c9e895a9 in service fd00:1122:3344:102::28 - crucible c407795c-6c8b-428e-8ab8-b962913c447f in service fd00:1122:3344:102::2a - internal_ntp f3f2e4f3-0985-4ef6-8336-ce479382d05d in service fd00:1122:3344:102::21 - nexus 01d58626-e1b0-480f-96be-ac784863c7dc in service fd00:1122:3344:102::22 + sled 68d24ac5-f341-49ea-a92a-0381b52ab387: blueprint zones at generation 3 + crucible 3b3c14b6-a8e2-4054-a577-8d96cb576230 expunged fd00:1122:3344:102::2c + crucible 47a87c6e-ef45-4d52-9a3e-69cdd96737cc expunged fd00:1122:3344:102::23 + crucible 6464d025-4652-4948-919e-740bec5699b1 expunged fd00:1122:3344:102::24 + crucible 6939ce48-b17c-4616-b176-8a419a7697be expunged fd00:1122:3344:102::29 + crucible 878dfddd-3113-4197-a3ea-e0d4dbe9b476 expunged fd00:1122:3344:102::25 + crucible 8d4d2b28-82bb-4e36-80da-1408d8c35d82 expunged fd00:1122:3344:102::2b + crucible 9fd52961-426f-4e62-a644-b70871103fca expunged fd00:1122:3344:102::26 + crucible b44cdbc0-0ce0-46eb-8b21-a09e113aa1d0 expunged fd00:1122:3344:102::27 + crucible b6b759d0-f60d-42b7-bbbc-9d61c9e895a9 expunged fd00:1122:3344:102::28 + crucible c407795c-6c8b-428e-8ab8-b962913c447f expunged fd00:1122:3344:102::2a + internal_ntp f3f2e4f3-0985-4ef6-8336-ce479382d05d expunged fd00:1122:3344:102::21 + nexus 01d58626-e1b0-480f-96be-ac784863c7dc expunged fd00:1122:3344:102::22 - sled 75bc286f-2b4b-482c-9431-59272af529da: zones at generation 3 + sled 75bc286f-2b4b-482c-9431-59272af529da: blueprint zones at generation 3 crucible 15bb9def-69b8-4d2e-b04f-9fee1143387c in service fd00:1122:3344:104::25 crucible 23a8fa2b-ef3e-4017-a43f-f7a83953bd7c in service fd00:1122:3344:104::2c crucible 621509d6-3772-4009-aca1-35eefd1098fb in service fd00:1122:3344:104::28 @@ -64,7 +64,7 @@ parent: 55502b1b-e255-438b-a16a-2680a4b5f962 nexus 59950bc8-1497-44dd-8cbf-b6502ba921b2 in service fd00:1122:3344:104::2f nexus b4947d31-f70e-4ee0-8817-0ca6cea9b16b in service fd00:1122:3344:104::22 - sled affab35f-600a-4109-8ea0-34a067a4e0bc: zones at generation 3 + sled affab35f-600a-4109-8ea0-34a067a4e0bc: blueprint zones at generation 3 crucible 0dfbf374-9ef9-430f-b06d-f271bf7f84c4 in service fd00:1122:3344:101::27 crucible 3aa07966-5899-4789-ace5-f8eeb375c6c3 in service fd00:1122:3344:101::24 crucible 4ad0e9da-08f8-4d40-b4d3-d17e711b5bbf in service fd00:1122:3344:101::29 @@ -82,8 +82,8 @@ parent: 55502b1b-e255-438b-a16a-2680a4b5f962 nexus c26b3bda-5561-44a1-a69f-22103fe209a1 in service fd00:1122:3344:101::2f METADATA: - created by: test_blueprint2 - created at: 1970-01-01T00:00:00.000Z - comment: sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: altered disks, sled 75bc286f-2b4b-482c-9431-59272af529da: altered disks, sled affab35f-600a-4109-8ea0-34a067a4e0bc: altered disks - internal DNS version: 1 - external DNS version: 1 + created by: test_blueprint2 + created at: 1970-01-01T00:00:00.000Z + comment: sled 48d95fef-bc9f-4f50-9a53-1e075836291d (sled policy is expunged): 12 zones expunged, sled 68d24ac5-f341-49ea-a92a-0381b52ab387 (sled state is decommissioned): 12 zones expunged, sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: altered disks, sled 75bc286f-2b4b-482c-9431-59272af529da: altered disks, sled affab35f-600a-4109-8ea0-34a067a4e0bc: altered disks + internal DNS version: 1 + external DNS version: 1 diff --git a/nexus/reconfigurator/preparation/src/lib.rs b/nexus/reconfigurator/preparation/src/lib.rs index 8f590d95f4..30370edb16 100644 --- a/nexus/reconfigurator/preparation/src/lib.rs +++ b/nexus/reconfigurator/preparation/src/lib.rs @@ -7,6 +7,7 @@ use anyhow::Context; use futures::StreamExt; use nexus_db_model::DnsGroup; +use nexus_db_model::IpKind; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::datastore::DataStoreDnsTest; use nexus_db_queries::db::datastore::DataStoreInventoryTest; @@ -16,7 +17,10 @@ use nexus_db_queries::db::pagination::Paginator; use nexus_db_queries::db::DataStore; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintMetadata; +use nexus_types::deployment::OmicronZoneExternalIpKind; +use nexus_types::deployment::OmicronZoneNic; use nexus_types::deployment::PlanningInput; +use nexus_types::deployment::PlanningInputBuildError; use nexus_types::deployment::PlanningInputBuilder; use nexus_types::deployment::Policy; use nexus_types::deployment::SledDetails; @@ -26,6 +30,7 @@ use nexus_types::deployment::UnstableReconfiguratorState; use nexus_types::identity::Asset; use nexus_types::identity::Resource; use nexus_types::inventory::Collection; +use nexus_types::inventory::SourceNatConfig; use omicron_common::address::IpRange; use omicron_common::address::Ipv6Subnet; use omicron_common::address::NEXUS_REDUNDANCY; @@ -128,13 +133,34 @@ impl PlanningInputFromDb<'_> { ); continue; }; + let zone_id = OmicronZoneUuid::from_untyped_uuid(zone_id); + + let to_kind = |ip| match external_ip_row.kind { + IpKind::Floating => Ok(OmicronZoneExternalIpKind::Floating(ip)), + IpKind::SNat => { + let snat = SourceNatConfig::new( + ip, + *external_ip_row.first_port, + *external_ip_row.last_port, + ) + .map_err(|err| { + PlanningInputBuildError::BadSnatConfig { zone_id, err } + })?; + Ok(OmicronZoneExternalIpKind::Snat(snat)) + } + IpKind::Ephemeral => Err( + PlanningInputBuildError::EphemeralIpUnsupported(zone_id), + ), + }; + builder .add_omicron_zone_external_ip_network( zone_id, // TODO-cleanup use `TypedUuid` everywhere ExternalIpUuid::from_untyped_uuid(external_ip_row.id), external_ip_row.ip, + to_kind, ) .map_err(|e| { Error::internal_error(&format!( @@ -144,6 +170,22 @@ impl PlanningInputFromDb<'_> { })?; } + for nic_row in self.service_nic_rows { + let zone_id = + OmicronZoneUuid::from_untyped_uuid(nic_row.service_id); + let nic = OmicronZoneNic::try_from(nic_row).map_err(|e| { + Error::internal_error(&format!( + "invalid Omicron zone NIC read from database: {e}" + )) + })?; + builder.add_omicron_zone_nic(zone_id, nic).map_err(|e| { + Error::internal_error(&format!( + "unexpectedly failed to add Omicron zone NIC \ + to planning input: {e}" + )) + })?; + } + Ok(builder.build()) } } diff --git a/nexus/src/app/background/sync_service_zone_nat.rs b/nexus/src/app/background/sync_service_zone_nat.rs index 59be7db5f2..d1bb9955d7 100644 --- a/nexus/src/app/background/sync_service_zone_nat.rs +++ b/nexus/src/app/background/sync_service_zone_nat.rs @@ -155,14 +155,16 @@ impl BackgroundTask for ServiceZoneNatTracker { ipnetwork::Ipv4Network::new(external_ip, 32) .unwrap(); + let (snat_first_port, snat_last_port) = + snat_cfg.port_range_raw(); let nat_value = Ipv4NatValues { external_address: nexus_db_model::Ipv4Net( omicron_common::api::external::Ipv4Net( external_address, ), ), - first_port: snat_cfg.first_port.into(), - last_port: snat_cfg.last_port.into(), + first_port: snat_first_port.into(), + last_port: snat_last_port.into(), sled_address: sled_address.into(), vni: nexus_db_model::Vni(nic.vni), mac: nexus_db_model::MacAddr(nic.mac), diff --git a/nexus/src/app/deployment.rs b/nexus/src/app/deployment.rs index 5f2d316efd..4d17cf43b0 100644 --- a/nexus/src/app/deployment.rs +++ b/nexus/src/app/deployment.rs @@ -7,7 +7,6 @@ use nexus_db_model::DnsGroup; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; -use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_reconfigurator_planning::planner::Planner; use nexus_reconfigurator_preparation::PlanningInputFromDb; use nexus_types::deployment::Blueprint; @@ -15,7 +14,6 @@ use nexus_types::deployment::BlueprintMetadata; use nexus_types::deployment::BlueprintTarget; use nexus_types::deployment::BlueprintTargetSet; use nexus_types::deployment::PlanningInput; -use nexus_types::deployment::SledFilter; use nexus_types::inventory::Collection; use omicron_common::address::NEXUS_REDUNDANCY; use omicron_common::api::external::CreateResult; @@ -26,7 +24,6 @@ use omicron_common::api::external::InternalContext; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::LookupType; -use omicron_uuid_kinds::CollectionUuid; use slog_error_chain::InlineErrorChain; use uuid::Uuid; @@ -205,35 +202,6 @@ impl super::Nexus { self.db_datastore.blueprint_insert(opctx, blueprint).await } - pub async fn blueprint_generate_from_collection( - &self, - opctx: &OpContext, - collection_id: CollectionUuid, - ) -> CreateResult { - let collection = self - .datastore() - .inventory_collection_read(opctx, collection_id) - .await?; - let planning_context = self.blueprint_planning_context(opctx).await?; - let blueprint = BlueprintBuilder::build_initial_from_collection( - &collection, - planning_context.planning_input.internal_dns_version(), - planning_context.planning_input.external_dns_version(), - planning_context.planning_input.all_sled_ids(SledFilter::All), - &planning_context.creator, - ) - .map_err(|error| { - Error::internal_error(&format!( - "error generating initial blueprint from collection {}: {}", - collection_id, - InlineErrorChain::new(&error) - )) - })?; - - self.blueprint_add(&opctx, &blueprint).await?; - Ok(blueprint) - } - pub async fn blueprint_create_regenerate( &self, opctx: &OpContext, diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 4008d33736..5d9c05331a 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -1159,7 +1159,12 @@ impl super::Nexus { )); } let source_nat = - SourceNatConfig::from(snat_ip.into_iter().next().unwrap()); + SourceNatConfig::try_from(snat_ip.into_iter().next().unwrap()) + .map_err(|err| { + Error::internal_error(&format!( + "read invalid SNAT config from db: {err}" + )) + })?; // Gather the firewall rules for the VPC this instance is in. // The NIC info we gathered above doesn't have VPC information diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 1bb42b20b2..1c7fadea05 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -378,8 +378,8 @@ impl Nexus { &background_ctx, Arc::clone(&db_datastore), &config.pkg.background_tasks, + rack_id, config.deployment.id, - config.deployment.rack_id, resolver.clone(), saga_request, ); diff --git a/nexus/src/app/sagas/common_storage.rs b/nexus/src/app/sagas/common_storage.rs index bf530ef858..0fe14f6d2a 100644 --- a/nexus/src/app/sagas/common_storage.rs +++ b/nexus/src/app/sagas/common_storage.rs @@ -49,6 +49,7 @@ pub(crate) async fn ensure_region_in_dataset( cert_pem: None, key_pem: None, root_pem: None, + source: None, }; let create_region = || async { diff --git a/nexus/src/internal_api/http_entrypoints.rs b/nexus/src/internal_api/http_entrypoints.rs index 35ec5167f9..c2582daaf4 100644 --- a/nexus/src/internal_api/http_entrypoints.rs +++ b/nexus/src/internal_api/http_entrypoints.rs @@ -49,7 +49,6 @@ use omicron_common::api::internal::nexus::RepairProgress; use omicron_common::api::internal::nexus::RepairStartInfo; use omicron_common::api::internal::nexus::SledInstanceState; use omicron_common::update::ArtifactId; -use omicron_uuid_kinds::CollectionUuid; use omicron_uuid_kinds::DownstairsKind; use omicron_uuid_kinds::TypedUuid; use omicron_uuid_kinds::UpstairsKind; @@ -102,7 +101,6 @@ pub(crate) fn internal_api() -> NexusApiDescription { api.register(blueprint_target_view)?; api.register(blueprint_target_set)?; api.register(blueprint_target_set_enabled)?; - api.register(blueprint_generate_from_collection)?; api.register(blueprint_regenerate)?; api.register(blueprint_import)?; @@ -956,33 +954,6 @@ async fn blueprint_target_set_enabled( // Generating blueprints -#[derive(Debug, Deserialize, JsonSchema)] -struct CollectionId { - collection_id: CollectionUuid, -} - -/// Generates a new blueprint matching the specified inventory collection -#[endpoint { - method = POST, - path = "/deployment/blueprints/generate-from-collection", -}] -async fn blueprint_generate_from_collection( - rqctx: RequestContext>, - params: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_internal_api(&rqctx).await; - let nexus = &apictx.nexus; - let collection_id = params.into_inner().collection_id; - let result = nexus - .blueprint_generate_from_collection(&opctx, collection_id) - .await?; - Ok(HttpResponseOk(result)) - }; - apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await -} - /// Generates a new blueprint for the current system, re-evaluating anything /// that's changed since the last one was generated #[endpoint { diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index cb7cc29ffc..3a8e6e4066 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -45,6 +45,7 @@ mod zone_type; pub use planning_input::DiskFilter; pub use planning_input::OmicronZoneExternalIp; +pub use planning_input::OmicronZoneExternalIpKind; pub use planning_input::OmicronZoneNic; pub use planning_input::PlanningInput; pub use planning_input::PlanningInputBuildError; @@ -158,6 +159,19 @@ impl Blueprint { }) } + // Temporary method that provides the list of Omicron zones using + // `TypedUuid`. + // + // In the future, `all_omicron_zones` will return `SledUuid`, + // and this method will go away. + pub fn all_omicron_zones_typed( + &self, + filter: BlueprintZoneFilter, + ) -> impl Iterator { + self.all_omicron_zones(filter) + .map(|(sled_id, z)| (SledUuid::from_untyped_uuid(sled_id), z)) + } + /// Iterate over the ids of all sleds in the blueprint pub fn sleds(&self) -> impl Iterator + '_ { self.blueprint_zones.keys().copied().map(SledUuid::from_untyped_uuid) @@ -189,11 +203,7 @@ impl Blueprint { /// /// Note that collections do not include information about zone /// disposition, so it is assumed that all zones in the collection have the - /// [`InService`](BlueprintZoneDisposition::InService) disposition. (This - /// is the same assumption made by - /// [`BlueprintZonesConfig::initial_from_collection`]. The logic here may - /// also be expanded to handle cases where not all zones in the collection - /// are in-service.) + /// [`InService`](BlueprintZoneDisposition::InService) disposition. pub fn diff_since_collection( &self, before: &Collection, @@ -310,37 +320,6 @@ pub struct BlueprintZonesConfig { } impl BlueprintZonesConfig { - /// Constructs a new [`BlueprintZonesConfig`] from a collection's zones. - /// - /// For the initial blueprint, all zones within a collection are assumed to - /// have the [`InService`](BlueprintZoneDisposition::InService) - /// disposition. - pub fn initial_from_collection( - collection: &OmicronZonesConfig, - ) -> Result { - let zones = collection - .zones - .iter() - .map(|z| { - BlueprintZoneConfig::from_omicron_zone_config( - z.clone(), - BlueprintZoneDisposition::InService, - ) - }) - .collect::>()?; - - let mut ret = Self { - // An initial `BlueprintZonesConfig` reuses the generation from - // `OmicronZonesConfig`. - generation: collection.generation, - zones, - }; - // For testing, it's helpful for zones to be in sorted order. - ret.sort(); - - Ok(ret) - } - /// Sorts the list of zones stored in this configuration. /// /// This is not strictly necessary. But for testing (particularly snapshot @@ -1263,7 +1242,7 @@ mod table_display { for (sled_id, sled_zones) in blueprint_zones { let heading = format!( - "{SLED_INDENT}sled {sled_id}: zones at generation {}", + "{SLED_INDENT}sled {sled_id}: blueprint zones at generation {}", sled_zones.generation ); builder.make_section( @@ -1514,7 +1493,7 @@ mod table_display { section: &mut StSectionBuilder, ) { let heading = format!( - "{}{SLED_INDENT}sled {sled_id}: zones at generation {}", + "{}{SLED_INDENT}sled {sled_id}: blueprint zones at generation {}", kind.prefix(), sled_zones.generation, ); @@ -1549,26 +1528,28 @@ mod table_display { modified: &DiffSledModified, section: &mut StSectionBuilder, ) { - let (generation_heading, warning) = if modified.generation_before - != modified.generation_after - { - ( - format!( - "zones at generation: {} -> {}", - modified.generation_before, modified.generation_after, - ), - None, - ) - } else { - // Modified sleds should always see a generation bump. - ( - format!("zones at generation: {}", modified.generation_before), - Some(format!( - "{WARNING_PREFIX}{ZONE_HEAD_INDENT}\ + let (generation_heading, warning) = + if modified.generation_before != modified.generation_after { + ( + format!( + "blueprint zones at generation: {} -> {}", + modified.generation_before, modified.generation_after, + ), + None, + ) + } else { + // Modified sleds should always see a generation bump. + ( + format!( + "blueprint zones at generation: {}", + modified.generation_before + ), + Some(format!( + "{WARNING_PREFIX}{ZONE_HEAD_INDENT}\ warning: generation should have changed" - )), - ) - }; + )), + ) + }; let sled_heading = format!("{MODIFIED_PREFIX}{SLED_INDENT}sled {sled_id}: {generation_heading}"); diff --git a/nexus/types/src/deployment/planning_input.rs b/nexus/types/src/deployment/planning_input.rs index 9c0714ffab..2503ff81f3 100644 --- a/nexus/types/src/deployment/planning_input.rs +++ b/nexus/types/src/deployment/planning_input.rs @@ -17,6 +17,8 @@ use omicron_common::address::Ipv6Subnet; use omicron_common::address::SLED_PREFIX; use omicron_common::api::external::Generation; use omicron_common::api::external::MacAddr; +use omicron_common::api::internal::shared::SourceNatConfig; +use omicron_common::api::internal::shared::SourceNatConfigError; use omicron_common::disk::DiskIdentity; use omicron_uuid_kinds::ExternalIpUuid; use omicron_uuid_kinds::OmicronZoneUuid; @@ -97,15 +99,33 @@ impl SledResources { } } +/// External IP variants possible for Omicron-managed zones. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum OmicronZoneExternalIpKind { + Floating(IpAddr), + Snat(SourceNatConfig), + // We should probably have `Ephemeral(IpAddr)` too (for Nexus), but + // currently we record Nexus as Floating. +} + +impl OmicronZoneExternalIpKind { + pub fn ip(&self) -> IpAddr { + match self { + OmicronZoneExternalIpKind::Floating(ip) => *ip, + OmicronZoneExternalIpKind::Snat(snat) => snat.ip, + } + } +} + /// External IP allocated to an Omicron-managed zone. /// /// This is a slimmer `nexus_db_model::ExternalIp` that only stores the fields /// necessary for blueprint planning, and requires that the zone have a single /// IP. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] pub struct OmicronZoneExternalIp { pub id: ExternalIpUuid, - pub ip: IpAddr, + pub kind: OmicronZoneExternalIpKind, } /// Network interface allocated to an Omicron-managed zone. @@ -116,7 +136,7 @@ pub struct OmicronZoneExternalIp { pub struct OmicronZoneNic { pub id: Uuid, pub mac: MacAddr, - pub ip: IpNetwork, + pub ip: IpAddr, pub slot: u8, pub primary: bool, } @@ -404,13 +424,21 @@ impl PlanningInput { pub enum PlanningInputBuildError { #[error("duplicate sled ID: {0}")] DuplicateSledId(SledUuid), - #[error("Omicron zone {zone_id} has a range of IPs ({ip:?}), only a single IP is supported")] + #[error("Omicron zone {zone_id} has a range of IPs ({ip}); only a single IP is supported")] NotSingleIp { zone_id: OmicronZoneUuid, ip: IpNetwork }, #[error("Omicron zone {zone_id} already has an external IP ({ip:?})")] DuplicateOmicronZoneExternalIp { zone_id: OmicronZoneUuid, ip: OmicronZoneExternalIp, }, + #[error("Omicron zone {0} has an ephemeral IP (unsupported)")] + EphemeralIpUnsupported(OmicronZoneUuid), + #[error("Omicron zone {zone_id} has a bad SNAT config")] + BadSnatConfig { + zone_id: OmicronZoneUuid, + #[source] + err: SourceNatConfigError, + }, #[error("Omicron zone {zone_id} already has a NIC ({nic:?})")] DuplicateOmicronZoneNic { zone_id: OmicronZoneUuid, nic: OmicronZoneNic }, } @@ -474,12 +502,19 @@ impl PlanningInputBuilder { /// Like `add_omicron_zone_external_ip`, but can accept an [`IpNetwork`], /// validating that the IP is a single address. - pub fn add_omicron_zone_external_ip_network( + pub fn add_omicron_zone_external_ip_network( &mut self, zone_id: OmicronZoneUuid, ip_id: ExternalIpUuid, ip: IpNetwork, - ) -> Result<(), PlanningInputBuildError> { + to_kind: F, + ) -> Result<(), PlanningInputBuildError> + where + F: FnOnce( + IpAddr, + ) + -> Result, + { let size = match ip.size() { NetworkSize::V4(n) => u128::from(n), NetworkSize::V6(n) => n, @@ -487,10 +522,11 @@ impl PlanningInputBuilder { if size != 1 { return Err(PlanningInputBuildError::NotSingleIp { zone_id, ip }); } + let kind = to_kind(ip.ip())?; self.add_omicron_zone_external_ip( zone_id, - OmicronZoneExternalIp { id: ip_id, ip: ip.ip() }, + OmicronZoneExternalIp { id: ip_id, kind }, ) } @@ -507,7 +543,7 @@ impl PlanningInputBuilder { Entry::Occupied(prev) => { Err(PlanningInputBuildError::DuplicateOmicronZoneExternalIp { zone_id, - ip: prev.get().clone(), + ip: *prev.get(), }) } } diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 0383c9cbd2..593a841bfc 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -455,40 +455,6 @@ } } }, - "/deployment/blueprints/generate-from-collection": { - "post": { - "summary": "Generates a new blueprint matching the specified inventory collection", - "operationId": "blueprint_generate_from_collection", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CollectionId" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Blueprint" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, "/deployment/blueprints/import": { "post": { "summary": "Imports a client-provided blueprint", @@ -3199,17 +3165,6 @@ "key" ] }, - "CollectionId": { - "type": "object", - "properties": { - "collection_id": { - "$ref": "#/components/schemas/TypedUuidForCollectionKind" - } - }, - "required": [ - "collection_id" - ] - }, "Cumulativedouble": { "description": "A cumulative or counter data type.", "type": "object", @@ -7178,10 +7133,6 @@ "SwitchPutResponse": { "type": "object" }, - "TypedUuidForCollectionKind": { - "type": "string", - "format": "uuid" - }, "TypedUuidForDownstairsRegionKind": { "type": "string", "format": "uuid" diff --git a/package-manifest.toml b/package-manifest.toml index d1c874281e..2248e352d5 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -492,10 +492,10 @@ only_for_targets.image = "standard" # 3. Use source.type = "manual" instead of "prebuilt" source.type = "prebuilt" source.repo = "crucible" -source.commit = "5677c7be81b60d9ba9c30991d10376f279a1d3b7" +source.commit = "1ef72f3c935e7dc936bf43310c04668fb60d7a20" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible.sha256.txt -source.sha256 = "5341c5572f80b8d1763f6563412dc03d9604d8c7af4022fc5da55338ee60d35c" +source.sha256 = "f4b9189d82729f851bab25ee7991134db2732f82657a15e88889500ed8a6e6c2" output.type = "zone" output.intermediate_only = true @@ -504,10 +504,10 @@ service_name = "crucible_pantry_prebuilt" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "crucible" -source.commit = "5677c7be81b60d9ba9c30991d10376f279a1d3b7" +source.commit = "1ef72f3c935e7dc936bf43310c04668fb60d7a20" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible-pantry.sha256.txt -source.sha256 = "bf281bae1331279109dac23328ff86756331d7776e69396b02c77a4d08a225c7" +source.sha256 = "e7bf9cf165c3191c899c1f019df4edb6a34c0fe83d61cce861ae0aefc649882d" output.type = "zone" output.intermediate_only = true @@ -519,10 +519,10 @@ service_name = "propolis-server" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "propolis" -source.commit = "8ff3ab62246fa1f8b8a5bfab0a7b8e1000926361" +source.commit = "dd788a311a382b09ce1d3e35f7777b378e09fdf7" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/propolis/image//propolis-server.sha256.txt -source.sha256 = "35c5956b14d3b0a843351ce8ea7e8cb52e631a96a89041810fe0f91cc4072638" +source.sha256 = "f9ebee502fdaa115563ac84e855805c0bf5582437820445dd1734423216dfc5b" output.type = "zone" [package.mg-ddm-gz] diff --git a/sled-agent/src/instance.rs b/sled-agent/src/instance.rs index d016715591..94ad8522c7 100644 --- a/sled-agent/src/instance.rs +++ b/sled-agent/src/instance.rs @@ -1753,11 +1753,12 @@ mod tests { hostname: Hostname::from_str("bert").unwrap(), }, nics: vec![], - source_nat: SourceNatConfig { - ip: IpAddr::V6(Ipv6Addr::UNSPECIFIED), - first_port: 0, - last_port: 0, - }, + source_nat: SourceNatConfig::new( + IpAddr::V6(Ipv6Addr::UNSPECIFIED), + 0, + 16383, + ) + .unwrap(), ephemeral_ip: None, floating_ips: vec![], firewall_rules: vec![], diff --git a/sled-agent/src/rack_setup/plan/service.rs b/sled-agent/src/rack_setup/plan/service.rs index d868448bed..6e3ce4a6ac 100644 --- a/sled-agent/src/rack_setup/plan/service.rs +++ b/sled-agent/src/rack_setup/plan/service.rs @@ -24,6 +24,7 @@ use omicron_common::address::{ use omicron_common::api::external::{Generation, MacAddr, Vni}; use omicron_common::api::internal::shared::{ NetworkInterface, NetworkInterfaceKind, SourceNatConfig, + SourceNatConfigError, }; use omicron_common::backoff::{ retry_notify_ext, retry_policy_internal_service_aggressive, BackoffError, @@ -1118,7 +1119,14 @@ impl ServicePortBuilder { self.next_snat_ip = None; } - let snat_cfg = SourceNatConfig { ip: snat_ip, first_port, last_port }; + let snat_cfg = + match SourceNatConfig::new(snat_ip, first_port, last_port) { + Ok(cfg) => cfg, + // We know our port pair is aligned, making this unreachable. + Err(err @ SourceNatConfigError::UnalignedPortPair { .. }) => { + unreachable!("{err}"); + } + }; let (ip, subnet) = match snat_ip { IpAddr::V4(_) => ( diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index 3584e8f139..1ddb3f9b0a 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -1274,7 +1274,10 @@ impl ServiceManager { // XXX: need to revisit iff. any services get more than one // address. let (target_ip, first_port, last_port) = match snat { - Some(s) => (s.ip, s.first_port, s.last_port), + Some(s) => { + let (first_port, last_port) = s.port_range_raw(); + (s.ip, first_port, last_port) + } None => (floating_ips[0], 0, u16::MAX), }; diff --git a/sled-agent/src/sim/storage.rs b/sled-agent/src/sim/storage.rs index b21edf0915..6a688f6101 100644 --- a/sled-agent/src/sim/storage.rs +++ b/sled-agent/src/sim/storage.rs @@ -97,6 +97,8 @@ impl CrucibleDataInner { cert_pem: None, key_pem: None, root_pem: None, + source: None, + read_only: false, }; let old = self.regions.insert(id, region.clone()); diff --git a/tools/permslip_production b/tools/permslip_production index 331209b1f0..2cf844d9d3 100644 --- a/tools/permslip_production +++ b/tools/permslip_production @@ -1 +1 @@ -394b0bb7c759eead2e41cec98c2376e5e558d6b401418b56ca0db50d55d434ad manifest-oxide-rot-1-v1.0.9.toml +75bf4467effc6077958c926c19fe83c05a09b02795d4b0b6ad9191ed93a6d5b9 manifest-oxide-rot-1-v1.0.10.toml diff --git a/tools/permslip_staging b/tools/permslip_staging index 7b4e5f161a..4e3a32c785 100644 --- a/tools/permslip_staging +++ b/tools/permslip_staging @@ -1,4 +1,4 @@ b1b0d63a179652fcc80fabbb49307c0fe28cf52744f58f7b8a768f14d6721a3f manifest-gimlet-v1.0.15.toml -686f5fff41ed3b33ba0be38d2becdeb67847705fd590f05f6d8f7c600db87fb7 manifest-oxide-rot-1-v1.0.9.toml -7d26b9f719a7f2c22e091d7d80de66933c11bdb9ae174ae59552b376400d63db manifest-psc-v1.0.14.toml -cd8c1bb64990573b9d29dcc2312d9c8cb4b08bc59873196ac50ce2b506037594 manifest-sidecar-v1.0.14.toml +e34b2f363ed0e1399e175bfae9e5e50217255c7984154697180d8a2d4611f65d manifest-oxide-rot-1-v1.0.10.toml +8c7a57a733df2cbff4963bf32073066871aae26a7f9eca878490e8f125bd2688 manifest-psc-v1.0.15.toml +267c8953c26f91614a59015719162f6f8f55d31d795a458387191dd1d874f9f0 manifest-sidecar-v1.0.15.toml diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 654d92869d..c6e17df884 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -38,7 +38,7 @@ crossbeam-utils = { version = "0.8.19" } crossterm = { version = "0.27.0", features = ["event-stream", "serde"] } crypto-common = { version = "0.1.6", default-features = false, features = ["getrandom", "std"] } der = { version = "0.7.8", default-features = false, features = ["derive", "flagset", "oid", "pem", "std"] } -diesel = { version = "2.1.5", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } +diesel = { version = "2.1.6", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } digest = { version = "0.10.7", features = ["mac", "oid", "std"] } either = { version = "1.11.0" } elliptic-curve = { version = "0.13.8", features = ["ecdh", "hazmat", "pem", "std"] } @@ -145,7 +145,7 @@ crossbeam-utils = { version = "0.8.19" } crossterm = { version = "0.27.0", features = ["event-stream", "serde"] } crypto-common = { version = "0.1.6", default-features = false, features = ["getrandom", "std"] } der = { version = "0.7.8", default-features = false, features = ["derive", "flagset", "oid", "pem", "std"] } -diesel = { version = "2.1.5", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } +diesel = { version = "2.1.6", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } digest = { version = "0.10.7", features = ["mac", "oid", "std"] } either = { version = "1.11.0" } elliptic-curve = { version = "0.13.8", features = ["ecdh", "hazmat", "pem", "std"] }